repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
alok1974/nbCodeLines | modules/codeLines.py | 1 | 7366 | ###########################################################################################
###########################################################################################
## ##
## Nb Code Lines v 1.0 (c) 2015 Alok Gandhi ([email protected]) ##
## ##
## ##
## This file is part of Nb Code Lines. ##
## ##
## Nb Code lines is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License, Version 3, 29 June 2007 ##
## as published by the Free Software Foundation, ##
## ##
## Nb Code Lines is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with Nb Code lines. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################################
###########################################################################################
import os
import sys
from datetime import date
from PyQt4 import QtCore, QtGui
import time
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.join(__file__)), '..'))
if ROOT_DIR not in sys.path:
sys.path.append(ROOT_DIR)
from gui.logger import Logger
class CodeLines(object):
def __init__(self, qThread=None, folder="", ext=[], startDate=0, startMonth=0, startYear=0, *args, **kwargs):
super(CodeLines, self).__init__(*args, **kwargs)
# Supplied Arguments
self._qThread = qThread
self._folder = folder
self._ext = ext
self._startDate = startDate
self._startMonth = startMonth
self._startYear = startYear
# Data to Calculate
self._data = []
self._prjStartDate = None
self._nbPrjDays = 0
self._nbTotalLines = 0
self._nbActualLines = 0
self._codeDensity = 0.0
self._avgLinesPerDay = 0
self._avgLinesPerHour = 0
self._hasError = False
self._errStr = ''
self._findAll = False
if '*.*' in self._ext:
self._findAll = True
# Initialization Methods
if not self._qThread:
self._assert()
self._generateData()
def runThread(self):
self._assert()
self._generateData()
return self.getData()
def _assert(self):
if self._folder == '':
self._hasError = True
self._errStr = 'No script folder provided!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
if not os.path.exists(self._folder):
self._hasError = True
self._errStr = 'The folder <%s> does not exist!' % self._folder
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
if len(self._ext) == 0:
self._hasError = True
self._errStr = 'No script file extensions provided!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
try:
self._prjStartDate = date(self._startYear, self._startMonth, self._startDate)
self._nbPrjDays = (date.today() - self._prjStartDate).days
if self._nbPrjDays <= 0:
self._hasError = True
self._errStr = 'Project Start Date should be smaller than current date !'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
except:
self._hasError = True
self._errStr = 'Supplied Date parameters are not valid!'
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
def _generateData(self):
if self._hasError:
return
for root, dirs, files in os.walk(self._folder):
for f in files:
fName, ext = os.path.splitext(f)
openPath = os.path.abspath(os.path.join(root, f))
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("update(PyQt_PyObject)"), str(f))
if not self._findAll:
if ext not in self._ext:
continue
with open(openPath) as file:
lines = file.readlines()
nbLines = len(lines)
n = 0
for line in lines:
if not str(line).strip():
continue
n += 1
self._data.append(((n, nbLines), str(f), str(os.path.join(root, f))))
self._nbTotalLines += nbLines
self._nbActualLines += n
self._data.sort(reverse=True)
if len(self._data) == 0:
self._hasError = True
self._errStr = self._wrap(self._folder, 'No Script files found in the root folder:')
if self._qThread:
self._qThread.emit(QtCore.SIGNAL("errorOccured(PyQt_PyObject)"), self._errStr)
return
self._codeDensity = (round((self._nbActualLines / float(self._nbTotalLines)) * 100, 2))
self._avgLinesPerDay = int(self._nbActualLines / float(self._nbPrjDays))
self._avgLinesPerHour = int(self._avgLinesPerDay / 8.0)
@staticmethod
def _wrap(folderPath, defaultStr):
result = ''
if len(folderPath) > len(defaultStr):
result = folderPath[:len(defaultStr) - 2]
result += '... '
return '%s\n\n%s' % (defaultStr, result)
def getData(self):
return self._data, self._nbPrjDays, self._avgLinesPerDay, self._avgLinesPerHour, self._codeDensity | gpl-3.0 | 5,580,130,533,473,561,000 | 37.570681 | 113 | 0.435379 | false |
jleclanche/fireplace | fireplace/cards/classic/neutral_common.py | 1 | 5375 | from ..utils import *
##
# Free basic minions
class CS2_122:
"""Raid Leader"""
update = Refresh(FRIENDLY_MINIONS - SELF, buff="CS2_122e")
CS2_122e = buff(atk=1)
class CS2_222:
"""Stormwind Champion"""
update = Refresh(FRIENDLY_MINIONS - SELF, buff="CS2_222o")
CS2_222o = buff(+1, +1)
class CS2_226:
"""Frostwolf Warlord"""
play = Buff(SELF, "CS2_226e") * Count(FRIENDLY_MINIONS - SELF)
CS2_226e = buff(+1, +1)
class EX1_011:
"""Voodoo Doctor"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Heal(TARGET, 2)
class EX1_015:
"""Novice Engineer"""
play = Draw(CONTROLLER)
class EX1_082:
"""Mad Bomber"""
play = Hit(RANDOM_OTHER_CHARACTER, 1) * 3
class EX1_102:
"""Demolisher"""
events = OWN_TURN_BEGIN.on(Hit(RANDOM_ENEMY_CHARACTER, 2))
class EX1_162:
"""Dire Wolf Alpha"""
update = Refresh(SELF_ADJACENT, buff="EX1_162o")
EX1_162o = buff(atk=1)
class EX1_399:
"""Gurubashi Berserker"""
events = SELF_DAMAGE.on(Buff(SELF, "EX1_399e"))
EX1_399e = buff(atk=3)
class EX1_508:
"""Grimscale Oracle"""
update = Refresh(FRIENDLY_MINIONS + MURLOC - SELF, buff="EX1_508o")
EX1_508o = buff(atk=1)
class EX1_593:
"""Nightblade"""
play = Hit(ENEMY_HERO, 3)
class EX1_595:
"""Cult Master"""
events = Death(FRIENDLY + MINION).on(Draw(CONTROLLER))
##
# Common basic minions
class CS2_117:
"""Earthen Ring Farseer"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Heal(TARGET, 3)
class CS2_141:
"""Ironforge Rifleman"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 1)
class CS2_146:
"""Southsea Deckhand"""
update = Find(FRIENDLY_WEAPON) & Refresh(SELF, {GameTag.CHARGE: True})
class CS2_147:
"""Gnomish Inventor"""
play = Draw(CONTROLLER)
class CS2_150:
"""Stormpike Commando"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 2)
class CS2_151:
"""Silver Hand Knight"""
play = Summon(CONTROLLER, "CS2_152")
class CS2_189:
"""Elven Archer"""
requirements = {PlayReq.REQ_NONSELF_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Hit(TARGET, 1)
class CS2_188:
"""Abusive Sergeant"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "CS2_188o")
CS2_188o = buff(atk=2)
class CS2_196:
"""Razorfen Hunter"""
play = Summon(CONTROLLER, "CS2_boar")
class CS2_203:
"""Ironbeak Owl"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Silence(TARGET)
class CS2_221:
"""Spiteful Smith"""
enrage = Refresh(FRIENDLY_WEAPON, buff="CS2_221e")
CS2_221e = buff(atk=2)
class CS2_227:
"""Venture Co. Mercenary"""
update = Refresh(FRIENDLY_HAND + MINION, {GameTag.COST: +3})
class DS1_055:
"""Darkscale Healer"""
play = Heal(FRIENDLY_CHARACTERS, 2)
class EX1_007:
"""Acolyte of Pain"""
events = SELF_DAMAGE.on(Draw(CONTROLLER))
class EX1_019:
"""Shattered Sun Cleric"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "EX1_019e")
EX1_019e = buff(+1, +1)
class EX1_025:
"""Dragonling Mechanic"""
play = Summon(CONTROLLER, "EX1_025t")
class EX1_029:
"""Leper Gnome"""
deathrattle = Hit(ENEMY_HERO, 2)
class EX1_046:
"""Dark Iron Dwarf"""
requirements = {PlayReq.REQ_MINION_TARGET: 0, PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Buff(TARGET, "EX1_046e")
EX1_046e = buff(atk=2)
class EX1_048:
"""Spellbreaker"""
requirements = {
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Silence(TARGET)
class EX1_049:
"""Youthful Brewmaster"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Bounce(TARGET)
class EX1_057:
"""Ancient Brewmaster"""
requirements = {
PlayReq.REQ_FRIENDLY_TARGET: 0,
PlayReq.REQ_MINION_TARGET: 0,
PlayReq.REQ_NONSELF_TARGET: 0,
PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Bounce(TARGET)
class EX1_066:
"""Acidic Swamp Ooze"""
play = Destroy(ENEMY_WEAPON)
class EX1_096:
"""Loot Hoarder"""
deathrattle = Draw(CONTROLLER)
class EX1_283:
"""Frost Elemental"""
requirements = {PlayReq.REQ_TARGET_IF_AVAILABLE: 0}
play = Freeze(TARGET)
class EX1_390:
"""Tauren Warrior"""
enrage = Refresh(SELF, buff="EX1_390e")
EX1_390e = buff(atk=3)
class EX1_393:
"""Amani Berserker"""
enrage = Refresh(SELF, buff="EX1_393e")
EX1_393e = buff(atk=3)
class EX1_412:
"""Raging Worgen"""
enrage = Refresh(SELF, buff="EX1_412e")
class EX1_412e:
tags = {GameTag.ATK: +1}
windfury = SET(1)
class EX1_506:
"""Murloc Tidehunter"""
play = Summon(CONTROLLER, "EX1_506a")
class EX1_556:
"""Harvest Golem"""
deathrattle = Summon(CONTROLLER, "skele21")
class EX1_583:
"""Priestess of Elune"""
play = Heal(FRIENDLY_HERO, 4)
class NEW1_018:
"""Bloodsail Raider"""
play = Find(FRIENDLY_WEAPON) & Buff(SELF, "NEW1_018e", atk=ATK(FRIENDLY_WEAPON))
class NEW1_022:
"""Dread Corsair"""
cost_mod = -ATK(FRIENDLY_WEAPON)
class tt_004:
"""Flesheating Ghoul"""
events = Death(MINION).on(Buff(SELF, "tt_004o"))
tt_004o = buff(atk=1)
##
# Unused buffs
# Full Strength (Injured Blademaster)
CS2_181e = buff(atk=2)
| agpl-3.0 | 2,019,998,216,743,704,000 | 16.33871 | 83 | 0.673116 | false |
matteoredaelli/scrapy_web | scrapy_web/spiders/nomi_maschili_femminili_nomix_it.py | 1 | 1908 | # -*- coding: utf-8 -*-
# scrapy_web
# Copyright (C) 2016-2017 [email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# scrapy crawl dizionario_italiano_corriere -t jsonlines -o diz.json
import scrapy
class NomiMaschiliFemminiliNomixItSpider(scrapy.Spider):
name = "nomi-nomix.it"
allowed_domains = ["nomix.it"]
start_urls = (
'http://www.nomix.it/nomi-italiani-maschili-e-femminili.php',
)
def parse(self, response):
for nome in response.xpath('//div[@class="pure-g"]/div[1]/table//td/text()').extract():
yield {"word": nome,
"class": "nome proprio",
"sex": "male",
"source": "nomix.com"}
for nome in response.xpath('//div[@class="pure-g"]/div[2]/table//td/text()').extract():
yield {"word": nome,
"class": "nome proprio",
"sex": "female",
"source": "nomix.com"}
# extracting next pages
for next_page in response.xpath('//h2/a/@href').extract():
if next_page is not None and next_page != "#":
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| gpl-3.0 | 1,822,298,830,182,337,500 | 37.16 | 95 | 0.604822 | false |
hh-italian-group/HHbbTauTau | TreeProduction/python/TriggerObjectBlock_cfi.py | 1 | 1874 | ## @package TriggerObjectBlock_cfi
# Configuration file that defines the producer of ROOT-tuple for trigger objects.
#
# \author Subir Sarkar
# \author Rosamaria Venditti (INFN Bari, Bari University)
# \author Konstantin Androsov (University of Siena, INFN Pisa)
# \author Maria Teresa Grippo (University of Siena, INFN Pisa)
#
# Copyright 2011-2013 Subir Sarkar, Rosamaria Venditti (INFN Bari, Bari University)
# Copyright 2014 Konstantin Androsov <[email protected]>,
# Maria Teresa Grippo <[email protected]>
#
# This file is part of X->HH->bbTauTau.
#
# X->HH->bbTauTau is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# X->HH->bbTauTau is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with X->HH->bbTauTau. If not, see <http://www.gnu.org/licenses/>.
import FWCore.ParameterSet.Config as cms
triggerObjectBlock = cms.EDAnalyzer("TriggerObjectBlock",
verbosity = cms.int32(0),
hltInputTag = cms.InputTag('TriggerResults','','HLT'),
triggerEventTag = cms.InputTag('patTriggerEvent'),
hltPathsOfInterest = cms.vstring ("HLT_DoubleMu",
"HLT_Mu",
"HLT_IsoMu",
"HLT_TripleMu",
"IsoPFTau",
"TrkIsoT",
"HLT_Ele"),
May10ReRecoData = cms.bool(False)
)
| gpl-2.0 | -7,589,809,344,042,750,000 | 43.619048 | 84 | 0.654216 | false |
stephanehenry27/Sickbeard-anime | sickbeard/providers/newznab.py | 1 | 7885 | # Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import urllib
import datetime
import re
import os
from xml.dom.minidom import parseString
import sickbeard
import generic
from sickbeard import classes
from sickbeard.helpers import sanitizeSceneName
from sickbeard import scene_exceptions
from sickbeard import encodingKludge as ek
from sickbeard import exceptions
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.exceptions import ex
class NewznabProvider(generic.NZBProvider):
def __init__(self, name, url, key=''):
generic.NZBProvider.__init__(self, name)
self.cache = NewznabCache(self)
self.url = url
self.key = key
# if a provider doesn't need an api key then this can be false
self.needs_auth = True
self.enabled = True
self.supportsBacklog = True
self.default = False
def configStr(self):
return self.name + '|' + self.url + '|' + self.key + '|' + str(int(self.enabled))
def imageName(self):
if ek.ek(os.path.isfile, ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', 'providers', self.getID()+'.gif')):
return self.getID()+'.gif'
return 'newznab.gif'
def isEnabled(self):
return self.enabled
def _get_season_search_strings(self, show, season=None, scene=False):
if not show:
return [{}]
to_return = []
# add new query strings for exceptions
name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid, season) + [show.name]
name_exceptions = set(name_exceptions)
for cur_exception in name_exceptions:
cur_params = {}
# search directly by tvrage id
if show.tvrid:
cur_params['rid'] = show.tvrid
# if we can't then fall back on a very basic name search
else:
cur_params['q'] = sanitizeSceneName(cur_exception).replace('.', '_')
if season != None:
# air-by-date means &season=2010&q=2010.03, no other way to do it atm
if show.air_by_date:
cur_params['season'] = season.split('-')[0]
if 'q' in cur_params:
cur_params['q'] += '.' + season.replace('-', '.')
else:
cur_params['q'] = season.replace('-', '.')
else:
cur_params['season'] = season
# hack to only add a single result if it's a rageid search
if not ('rid' in cur_params and to_return):
to_return.append(cur_params)
return to_return
def _get_episode_search_strings(self, ep_obj):
params = {}
if not ep_obj:
return [params]
# search directly by tvrage id
if ep_obj.show.tvrid:
params['rid'] = ep_obj.show.tvrid
# if we can't then fall back on a very basic name search
else:
params['q'] = sanitizeSceneName(ep_obj.show.name).replace('.', '_')
if ep_obj.show.air_by_date:
date_str = str(ep_obj.airdate)
params['season'] = date_str.partition('-')[0]
params['ep'] = date_str.partition('-')[2].replace('-','/')
else:
params['season'] = ep_obj.scene_season
params['ep'] = ep_obj.scene_episode
to_return = [params]
# only do exceptions if we are searching by name
if 'q' in params:
# add new query strings for exceptions
name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid)
for cur_exception in name_exceptions:
# don't add duplicates
if cur_exception == ep_obj.show.name:
continue
cur_return = params.copy()
cur_return['q'] = sanitizeSceneName(cur_exception).replace('.', '_')
to_return.append(cur_return)
return to_return
def _doGeneralSearch(self, search_string):
return self._doSearch({'q': search_string})
def _checkAuthFromData(self, data):
try:
parsedXML = parseString(data)
except Exception:
return False
if parsedXML.documentElement.tagName == 'error':
code = parsedXML.documentElement.getAttribute('code')
if code == '100':
raise exceptions.AuthException("Your API key for "+self.name+" is incorrect, check your config.")
elif code == '101':
raise exceptions.AuthException("Your account on "+self.name+" has been suspended, contact the administrator.")
elif code == '102':
raise exceptions.AuthException("Your account isn't allowed to use the API on "+self.name+", contact the administrator")
else:
logger.log(u"Unknown error given from "+self.name+": "+parsedXML.documentElement.getAttribute('description'), logger.ERROR)
return False
return True
def _doSearch(self, search_params, show=None):
params = {"t": "tvsearch",
"maxage": sickbeard.USENET_RETENTION,
"limit": 100,
"cat": '5030,5040'}
# hack this in for now
if self.getID() == 'nzbs_org':
params['cat'] += ',5070,5090'
if search_params:
params.update(search_params)
if self.key:
params['apikey'] = self.key
searchURL = self.url + 'api?' + urllib.urlencode(params)
logger.log(u"Search url: " + searchURL, logger.DEBUG)
data = self.getURL(searchURL)
if not data:
return []
# hack this in until it's fixed server side
if not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
try:
parsedXML = parseString(data)
items = parsedXML.getElementsByTagName('item')
except Exception, e:
logger.log(u"Error trying to load "+self.name+" RSS feed: "+ex(e), logger.ERROR)
logger.log(u"RSS data: "+data, logger.DEBUG)
return []
if not self._checkAuthFromData(data):
return []
if parsedXML.documentElement.tagName != 'rss':
logger.log(u"Resulting XML from "+self.name+" isn't RSS, not parsing it", logger.ERROR)
return []
results = []
for curItem in items:
(title, url) = self._get_title_and_url(curItem)
if not title or not url:
logger.log(u"The XML returned from the "+self.name+" RSS feed is incomplete, this result is unusable: "+data, logger.ERROR)
continue
results.append(curItem)
return results
def findPropers(self, date=None):
return []
results = []
for curResult in self._doGeneralSearch("proper repack"):
match = re.search('(\w{3}, \d{1,2} \w{3} \d{4} \d\d:\d\d:\d\d) [\+\-]\d{4}', curResult.findtext('pubDate'))
if not match:
continue
resultDate = datetime.datetime.strptime(match.group(1), "%a, %d %b %Y %H:%M:%S")
if date == None or resultDate > date:
results.append(classes.Proper(curResult.findtext('title'), curResult.findtext('link'), resultDate))
return results
class NewznabCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# only poll newznab providers every 15 minutes max
self.minTime = 15
def _getRSSData(self):
params = {"t": "tvsearch",
"age": sickbeard.USENET_RETENTION,
"cat": '5040,5030'}
# hack this in for now
if self.provider.getID() == 'nzbs_org':
params['cat'] += ',5070,5090'
if self.provider.key:
params['apikey'] = self.provider.key
url = self.provider.url + 'api?' + urllib.urlencode(params)
logger.log(self.provider.name + " cache update URL: "+ url, logger.DEBUG)
data = self.provider.getURL(url)
# hack this in until it's fixed server side
if data and not data.startswith('<?xml'):
data = '<?xml version="1.0" encoding="ISO-8859-1" ?>' + data
return data
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
| gpl-3.0 | 2,714,241,382,934,127,000 | 26.378472 | 127 | 0.674445 | false |
vericred/vericred-python | vericred_client/models/plan_deleted.py | 1 | 11507 | # coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class PlanDeleted(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
PlanDeleted - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 2,301,925,499,552,926,000 | 37.744108 | 228 | 0.62223 | false |
flaxatives/leetcode | minimum_window_substring.py | 1 | 1883 | #!/usr/bin/env python
"""
Given strings S and T, finds the minimum substring of S which contains all the
characters in T. Done in O(n) time.
"""
def min_window(S, T):
freq = {}
for letter in T:
freq[letter] = 0
# search S until we find a substring with all chars
start = 0
while start < len(S) and S[start] not in T:
start += 1
if start > len(S):
return ""
end = start
allfound = False
while not allfound and end < len(S):
char = S[end]
if char in T:
freq[char] += 1
allfound = allfound or all((freq[c] > 0 for c in T))
end += 1
end -= 1
if end == len(S):
return ""
# search the rest of the string for smaller windows
min_start = start
min_end = end
end += 1
while end < len(S):
# expand on the right side until we match the front char
while end < len(S) and S[start] != S[end]:
if S[end] in freq:
freq[S[end]] += 1
end += 1
if end >= len(S):
break
# remove excess characters from the front
start += 1
while start < end:
char = S[start]
if char in T and freq[char] > 1:
freq[S[start]] -= 1
elif char in T and freq[char] == 1:
break
start += 1
# check if new window is smaller
if end - start < min_end - min_start:
min_start, min_end = start, end
end += 1
return S[min_start:min_end+1]
if __name__ == "__main__":
import sys
if len(sys.argv) >= 3:
print(min_window(*sys.argv[1:3]))
| mit | 4,124,336,163,982,471,700 | 27.104478 | 78 | 0.446097 | false |
swisscom/cleanerversion | versions/descriptors.py | 1 | 25432 | from collections import namedtuple
from django import VERSION
from django.core.exceptions import SuspiciousOperation, FieldDoesNotExist
from django.db import router, transaction
from django.db.models.base import Model
from django.db.models.fields.related import (ForwardManyToOneDescriptor,
ReverseManyToOneDescriptor,
ManyToManyDescriptor)
from django.db.models.fields.related_descriptors import \
create_forward_many_to_many_manager
from django.db.models.query_utils import Q
from django.utils.functional import cached_property
from versions.util import get_utc_now
def matches_querytime(instance, querytime):
"""
Checks whether the given instance satisfies the given QueryTime object.
:param instance: an instance of Versionable
:param querytime: QueryTime value to check against
"""
if not querytime.active:
return True
if not querytime.time:
return instance.version_end_date is None
return (instance.version_start_date <= querytime.time and (
instance.version_end_date is None or
instance.version_end_date > querytime.time))
class VersionedForwardManyToOneDescriptor(ForwardManyToOneDescriptor):
"""
The VersionedForwardManyToOneDescriptor is used when pointing another
Model using a VersionedForeignKey;
For example:
class Team(Versionable):
name = CharField(max_length=200)
city = VersionedForeignKey(City, null=True)
``team.city`` is a VersionedForwardManyToOneDescriptor
"""
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides the parent method to:
- force queryset to use the querytime of the parent objects
- ensure that the join is done on identity, not id
- make the cache key identity, not id.
"""
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
# CleanerVersion change 1: force the querytime to be the same as the
# prefetched-for instance.
# This is necessary to have reliable results and avoid extra queries
# for cache misses when accessing the child objects from their
# parents (e.g. choice.poll).
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time must "
"match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
# CleanerVersion change 2: make rel_obj_attr return a tuple with
# the object's identity.
# rel_obj_attr = self.field.get_foreign_related_value
def versioned_fk_rel_obj_attr(versioned_rel_obj):
return versioned_rel_obj.identity,
rel_obj_attr = versioned_fk_rel_obj_attr
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
# CleanerVersion change 3: fake the related field so that it provides
# a name of 'identity'.
# related_field = self.field.foreign_related_fields[0]
related_field = namedtuple('VersionedRelatedFieldTuple', 'name')(
'identity')
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(
self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(
instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name())
else:
return (queryset, rel_obj_attr, instance_attr, True,
self.field.get_cache_name(), False)
def get_queryset(self, **hints):
queryset = self.field.remote_field.model.objects\
.db_manager(hints=hints).all()
if hasattr(queryset, 'querytime'):
if 'instance' in hints:
instance = hints['instance']
if hasattr(instance, '_querytime'):
if instance._querytime.active and \
instance._querytime != queryset.querytime:
queryset = queryset.as_of(instance._querytime.time)
else:
queryset = queryset.as_of(None)
return queryset
def __get__(self, instance, cls=None):
"""
The getter method returns the object, which points instance,
e.g. choice.poll returns a Poll instance, whereas the Poll class
defines the ForeignKey.
:param instance: The object on which the property was accessed
:param instance_type: The type of the instance object
:return: Returns a Versionable
"""
from versions.models import Versionable
if instance is None:
return self
current_elt = super(self.__class__, self).__get__(instance,
cls)
if not current_elt:
return None
if not isinstance(current_elt, Versionable):
raise TypeError("VersionedForeignKey target is of type " +
str(type(current_elt)) +
", which is not a subclass of Versionable")
if hasattr(instance, '_querytime'):
# If current_elt matches the instance's querytime, there's no
# need to make a database query.
if matches_querytime(current_elt, instance._querytime):
current_elt._querytime = instance._querytime
return current_elt
return current_elt.__class__.objects.as_of(
instance._querytime.time).get(identity=current_elt.identity)
else:
return current_elt.__class__.objects.current.get(
identity=current_elt.identity)
vforward_many_to_one_descriptor_class = VersionedForwardManyToOneDescriptor
class VersionedReverseManyToOneDescriptor(ReverseManyToOneDescriptor):
@cached_property
def related_manager_cls(self):
manager_cls = super(VersionedReverseManyToOneDescriptor,
self).related_manager_cls
rel_field = self.field
class VersionedRelatedManager(manager_cls):
def __init__(self, instance):
super(VersionedRelatedManager, self).__init__(instance)
# This is a hack, in order to get the versioned related objects
for key in self.core_filters.keys():
if '__exact' in key or '__' not in key:
self.core_filters[key] = instance.identity
def get_queryset(self):
from versions.models import VersionedQuerySet
queryset = super(VersionedRelatedManager, self).get_queryset()
# Do not set the query time if it is already correctly set.
# queryset.as_of() returns a clone of the queryset, and this
# will destroy the prefetched objects cache if it exists.
if isinstance(queryset, VersionedQuerySet) \
and self.instance._querytime.active \
and queryset.querytime != self.instance._querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def get_prefetch_queryset(self, instances, queryset=None):
"""
Overrides RelatedManager's implementation of
get_prefetch_queryset so that it works nicely with
VersionedQuerySets. It ensures that identities and time-limited
where clauses are used when selecting related reverse foreign
key objects.
"""
if queryset is None:
# Note that this intentionally call's VersionManager's
# get_queryset, instead of simply calling the superclasses'
# get_queryset (as the non-versioned RelatedManager does),
# because what is needed is a simple Versioned queryset
# without any restrictions (e.g. do not apply
# self.core_filters).
from versions.models import VersionManager
queryset = VersionManager.get_queryset(self)
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
instance_querytime = instances[0]._querytime
if instance_querytime.active:
if queryset.querytime.active and \
queryset.querytime.time != \
instance_querytime.time:
raise ValueError(
"A Prefetch queryset that specifies an as_of time "
"must match the as_of of the base queryset.")
else:
queryset.querytime = instance_querytime
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
# Use identities instead of ids so that this will work with
# versioned objects.
instances_dict = {(inst.identity,): inst for inst in instances}
identities = [inst.identity for inst in instances]
query = {'%s__identity__in' % rel_field.name: identities}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must
# manage the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
if VERSION[:1] < (2,):
return (queryset, rel_obj_attr, instance_attr, False,
cache_name)
else:
return (queryset, rel_obj_attr, instance_attr, False,
cache_name, False)
def add(self, *objs, **kwargs):
from versions.models import Versionable
cloned_objs = ()
for obj in objs:
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to add a non-Versionable to a "
"VersionedForeignKey relationship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).add(*cloned_objs,
**kwargs)
# clear() and remove() are present if the FK is nullable
if 'clear' in dir(manager_cls):
def clear(self, **kwargs):
"""
Overridden to ensure that the current queryset is used,
and to clone objects before they are removed, so that
history is not lost.
"""
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model,
instance=self.instance)
queryset = self.current.using(db)
with transaction.atomic(using=db, savepoint=False):
cloned_pks = [obj.clone().pk for obj in queryset]
update_qs = self.current.filter(pk__in=cloned_pks)
self._clear(update_qs, bulk)
if 'remove' in dir(manager_cls):
def remove(self, *objs, **kwargs):
from versions.models import Versionable
val = rel_field.get_foreign_related_value(self.instance)
cloned_objs = ()
for obj in objs:
# Is obj actually part of this descriptor set?
# Otherwise, silently go over it, since Django
# handles that case
if rel_field.get_local_related_value(obj) == val:
# Silently pass over non-versionable items
if not isinstance(obj, Versionable):
raise TypeError(
"Trying to remove a non-Versionable from "
"a VersionedForeignKey realtionship")
cloned_objs += (obj.clone(),)
super(VersionedRelatedManager, self).remove(*cloned_objs,
**kwargs)
return VersionedRelatedManager
class VersionedManyToManyDescriptor(ManyToManyDescriptor):
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_versioned_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def __set__(self, instance, value):
"""
Completely overridden to avoid bulk deletion that happens when the
parent method calls clear().
The parent method's logic is basically: clear all in bulk, then add
the given objects in bulk.
Instead, we figure out which ones are being added and removed, and
call add and remove for these values.
This lets us retain the versioning information.
Since this is a many-to-many relationship, it is assumed here that
the django.db.models.deletion.Collector logic, that is used in
clear(), is not necessary here. Collector collects related models,
e.g. ones that should also be deleted because they have
a ON CASCADE DELETE relationship to the object, or, in the case of
"Multi-table inheritance", are parent objects.
:param instance: The instance on which the getter was called
:param value: iterable of items to set
"""
if not instance.is_current:
raise SuspiciousOperation(
"Related values can only be directly set on the current "
"version of an object")
if not self.field.remote_field.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError((
"Cannot set values on a ManyToManyField "
"which specifies an intermediary model. "
"Use %s.%s's Manager instead.") % (
opts.app_label, opts.object_name))
manager = self.__get__(instance)
# Below comment is from parent __set__ method. We'll force
# evaluation, too:
# clear() can change expected output of 'value' queryset, we force
# evaluation of queryset before clear; ticket #19816
value = tuple(value)
being_removed, being_added = self.get_current_m2m_diff(instance, value)
timestamp = get_utc_now()
manager.remove_at(timestamp, *being_removed)
manager.add_at(timestamp, *being_added)
def get_current_m2m_diff(self, instance, new_objects):
"""
:param instance: Versionable object
:param new_objects: objects which are about to be associated with
instance
:return: (being_removed id list, being_added id list)
:rtype : tuple
"""
new_ids = self.pks_from_objects(new_objects)
relation_manager = self.__get__(instance)
filter = Q(**{relation_manager.source_field.attname: instance.pk})
qs = self.through.objects.current.filter(filter)
try:
# Django 1.7
target_name = relation_manager.target_field.attname
except AttributeError:
# Django 1.6
target_name = relation_manager.through._meta.get_field_by_name(
relation_manager.target_field_name)[0].attname
current_ids = set(qs.values_list(target_name, flat=True))
being_removed = current_ids - new_ids
being_added = new_ids - current_ids
return list(being_removed), list(being_added)
def pks_from_objects(self, objects):
"""
Extract all the primary key strings from the given objects.
Objects may be Versionables, or bare primary keys.
:rtype : set
"""
return {o.pk if isinstance(o, Model) else o for o in objects}
def create_versioned_forward_many_to_many_manager(superclass, rel,
reverse=None):
many_related_manager_klass = create_forward_many_to_many_manager(
superclass, rel, reverse)
class VersionedManyRelatedManager(many_related_manager_klass):
def __init__(self, *args, **kwargs):
super(VersionedManyRelatedManager, self).__init__(*args, **kwargs)
# Additional core filters are:
# version_start_date <= t &
# (version_end_date > t | version_end_date IS NULL)
# but we cannot work with the Django core filters, since they
# don't support ORing filters, which is a thing we need to
# consider the "version_end_date IS NULL" case;
# So, we define our own set of core filters being applied when
# versioning
try:
_ = self.through._meta.get_field('version_start_date')
_ = self.through._meta.get_field('version_end_date')
except FieldDoesNotExist as e:
fields = [f.name for f in self.through._meta.get_fields()]
print(str(e) + "; available fields are " + ", ".join(fields))
raise e
# FIXME: this probably does not work when auto-referencing
def get_queryset(self):
"""
Add a filter to the queryset, limiting the results to be pointed
by relationship that are valid for the given timestamp (which is
taken at the current instance, or set to now, if not available).
Long story short, apply the temporal validity filter also to the
intermediary model.
"""
queryset = super(VersionedManyRelatedManager, self).get_queryset()
if hasattr(queryset, 'querytime'):
if self.instance._querytime.active and \
self.instance._querytime != queryset.querytime:
queryset = queryset.as_of(self.instance._querytime.time)
return queryset
def _remove_items(self, source_field_name, target_field_name, *objs):
"""
Instead of removing items, we simply set the version_end_date of
the current item to the current timestamp --> t[now].
Like that, there is no more current entry having that identity -
which is equal to not existing for timestamps greater than t[now].
"""
return self._remove_items_at(None, source_field_name,
target_field_name, *objs)
def _remove_items_at(self, timestamp, source_field_name,
target_field_name, *objs):
if objs:
if timestamp is None:
timestamp = get_utc_now()
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
# The Django 1.7-way is preferred
if hasattr(self, 'target_field'):
fk_val = \
self.target_field \
.get_foreign_related_value(obj)[0]
else:
raise TypeError(
"We couldn't find the value of the foreign "
"key, this might be due to the use of an "
"unsupported version of Django")
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
qs = self.through._default_manager.using(db).filter(**{
source_field_name: self.instance.id,
'%s__in' % target_field_name: old_ids
}).as_of(timestamp)
for relation in qs:
relation._delete_at(timestamp)
if 'add' in dir(many_related_manager_klass):
def add(self, *objs):
if not self.instance.is_current:
raise SuspiciousOperation(
"Adding many-to-many related objects is only possible "
"on the current version")
# The ManyRelatedManager.add() method uses the through model's
# default manager to get a queryset when looking at which
# objects already exist in the database.
# In order to restrict the query to the current versions when
# that is done, we temporarily replace the queryset's using
# method so that the version validity condition can be
# specified.
klass = self.through._default_manager.get_queryset().__class__
__using_backup = klass.using
def using_replacement(self, *args, **kwargs):
qs = __using_backup(self, *args, **kwargs)
return qs.as_of(None)
klass.using = using_replacement
super(VersionedManyRelatedManager, self).add(*objs)
klass.using = __using_backup
def add_at(self, timestamp, *objs):
"""
This function adds an object at a certain point in time
(timestamp)
"""
# First off, define the new constructor
def _through_init(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.version_birth_date = timestamp
self.version_start_date = timestamp
# Through-classes have an empty constructor, so it can easily
# be overwritten when needed;
# This is not the default case, so the overwrite only takes
# place when we "modify the past"
self.through.__init_backup__ = self.through.__init__
self.through.__init__ = _through_init
# Do the add operation
self.add(*objs)
# Remove the constructor again (by replacing it with the
# original empty constructor)
self.through.__init__ = self.through.__init_backup__
del self.through.__init_backup__
add_at.alters_data = True
if 'remove' in dir(many_related_manager_klass):
def remove_at(self, timestamp, *objs):
"""
Performs the act of removing specified relationships at a
specified time (timestamp);
So, not the objects at a given time are removed, but their
relationship!
"""
self._remove_items_at(timestamp, self.source_field_name,
self.target_field_name, *objs)
# For consistency, also handle the symmetrical case
if self.symmetrical:
self._remove_items_at(timestamp, self.target_field_name,
self.source_field_name, *objs)
remove_at.alters_data = True
return VersionedManyRelatedManager
| apache-2.0 | 2,862,264,786,888,047,600 | 44.741007 | 79 | 0.557093 | false |
SlashRoot/WHAT | settings/logging_settings.py | 1 | 2154 | import sys
if not 'test' in sys.argv or 'test_coverage' in sys.argv:
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'standard': {
'format': '%(levelname)s %(name)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(name)s: %(message)s'
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'django.utils.log.NullHandler',
},
'file':{
'level':'WARNING',
'class':'logging.handlers.TimedRotatingFileHandler',
'when': 'midnight',
'filename': '/var/log/what/general.log',
'formatter': 'standard'
},
'comm_file':{
'level':'INFO',
'class':'logging.handlers.TimedRotatingFileHandler',
'when': 'midnight',
'filename': '/var/log/what/comm.log',
'formatter': 'standard'
},
'email_file':{
'level':'INFO',
'class':'logging.handlers.TimedRotatingFileHandler',
'when': 'midnight',
'filename': '/var/log/what/email.log',
'formatter': 'standard'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django': {
'handlers':['null'],
'propagate': True,
'level':'INFO',
},
'django.request': {
'handlers': ['file', 'mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'': {
'handlers': ['file', 'mail_admins'],
'level': 'ERROR',
},
'comm': {
'handlers': ['comm_file'],
'level': 'INFO',
},
'email': {
'handlers': ['email_file'],
'level': 'INFO',
},
}
} | mit | -5,500,976,408,281,466,000 | 26.628205 | 95 | 0.423863 | false |
jrbl/invenio | modules/bibauthorid/lib/bibauthorid_cluster_set.py | 1 | 8369 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from itertools import chain, groupby
from operator import itemgetter
from bibauthorid_matrix_optimization import maximized_mapping
from bibauthorid_backinterface import save_cluster
from bibauthorid_backinterface import get_all_papers_of_pids
from bibauthorid_backinterface import get_bib10x, get_bib70x
from bibauthorid_backinterface import get_all_valid_bibrecs
from bibauthorid_backinterface import get_bibrefrec_subset
from bibauthorid_backinterface import remove_result_cluster
from bibauthorid_name_utils import generate_last_name_cluster_str
class Blob:
def __init__(self, personid_records):
'''
@param personid_records:
A list of tuples: (personid, bibrefrec, flag).
Notice that all bibrefrecs should be the same
since the Blob represents only one bibrefrec.
'''
self.bib = personid_records[0][1]
assert all(p[1] == self.bib for p in personid_records)
self.claimed = set()
self.assigned = set()
self.rejected = set()
for pid, unused, flag in personid_records:
if flag > 1:
self.claimed.add(pid)
elif flag >= -1:
self.assigned.add(pid)
else:
self.rejected.add(pid)
def create_blobs_by_pids(pids):
'''
Returs a list of blobs by a given set of personids.
Blob is an object which describes all information
for a bibrefrec in the personid table.
@type pids: iterable of integers
'''
all_bibs = get_all_papers_of_pids(pids)
all_bibs = ((x[0], (int(x[1]), x[2], x[3]), x[4]) for x in all_bibs)
bibs_dict = groupby(sorted(all_bibs, key=itemgetter(1)), key=itemgetter(1))
blobs = [Blob(list(bibs)) for unused, bibs in bibs_dict]
return blobs
def group_blobs(blobs):
'''
Separates the blobs into two groups
of objects - those with claims and
those without.
'''
# created from blobs, which are claimed
# [(bibrefrec, personid)]
union = []
# created from blobs, which are not claimed
# [(bibrefrec, personid/None, [personid])]
independent = []
for blob in blobs:
assert len(blob.claimed) + len(blob.assigned) == 1
if len(blob.claimed) > 0:
union.append((blob.bib, list(blob.claimed)[0]))
else:
independent.append((blob.bib, list(blob.assigned)[0], list(blob.rejected)))
return (union, independent)
class Cluster_set:
class Cluster:
def __init__(self, bibs, hate = []):
# hate is a symetrical relation
self.bibs = set(bibs)
self.hate = set(hate)
def hates(self, other):
return other in self.hate
def quarrel(self, cl2):
self.hate.add(cl2)
cl2.hate.add(self)
def _debug_test_hate_relation(self):
for cl2 in self.hate:
if not self.hates(cl2) or not cl2.hates(self):
return False
return True
def __init__(self):
self.clusters = []
def create_skeleton(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
union, independent = group_blobs(blobs)
union_clusters = {}
for uni in union:
union_clusters[uni[1]] = union_clusters.get(uni[1], []) + [uni[0]]
cluster_dict = dict((personid, self.Cluster(bibs)) for personid, bibs in union_clusters.items())
self.clusters = cluster_dict.values()
for i, cl in enumerate(self.clusters):
cl.hate = set(chain(self.clusters[:i], self.clusters[i+1:]))
for ind in independent:
bad_clusters = [cluster_dict[i] for i in ind[2] if i in cluster_dict]
cl = self.Cluster([ind[0]], bad_clusters)
for bcl in bad_clusters:
bcl.hate.add(cl)
self.clusters.append(cl)
# Creates a cluster set, ignoring the claims and the
# rejected papers.
def create_pure(self, personids, last_name):
blobs = create_blobs_by_pids(personids)
self.last_name = last_name
self.clusters = [self.Cluster((blob.bib,)) for blob in blobs]
# no longer used
def create_body(self, blobs):
union, independent = group_blobs(blobs)
arranged_clusters = {}
for cls in chain(union, independent):
arranged_clusters[cls[1]] = arranged_clusters.get(cls[1], []) + [cls[0]]
for pid, bibs in arranged_clusters.items():
cl = self.Cluster(bibs)
cl.personid = pid
self.clusters.append(cl)
# a *very* slow fucntion checking when the hate relation is no longer symetrical
def _debug_test_hate_relation(self):
for cl1 in self.clusters:
if not cl1._debug_test_hate_relation():
return False
return True
# similar to the function above
def _debug_duplicated_recs(self, mapping=None):
for cl in self.clusters:
if mapping:
setty = set(mapping[x][2] for x in cl.bibs)
else:
setty = set(x[2] for x in cl.bibs)
if len(cl.bibs) != len(setty):
return False
return True
# No longer used but it might be handy.
@staticmethod
def match_cluster_sets(cs1, cs2):
"""
This functions tries to generate the best matching
between cs1 and cs2 acoarding to the shared bibrefrecs.
It returns a dictionary with keys, clsuters in cs1,
and values, clusters in cs2.
@param and type of cs1 and cs2: cluster_set
@return: dictionary with the matching clusters.
@return type: { cluster : cluster }
"""
matr = [[len(cl1.bibs & cl2.bibs) for cl2 in cs2.clusters] for cl1 in cs1.clusters]
mapping = maximized_mapping(matr)
return dict((cs1.clusters[mappy[0]], cs2.clusters[mappy[1]]) for mappy in mapping)
def store(self):
'''
Stores the cluster set in a special table.
This is used to store the results of
tortoise/wedge in a table and later merge them
with personid.
'''
remove_result_cluster("%s." % self.last_name)
named_clusters = (("%s.%d" % (self.last_name, idx), cl) for idx, cl in enumerate(self.clusters))
map(save_cluster, named_clusters)
def cluster_sets_from_marktables():
# { (100, 123) -> name }
ref100 = get_bib10x()
ref700 = get_bib70x()
bibref_2_name = dict([((100, ref), generate_last_name_cluster_str(name)) for ref, name in ref100] +
[((700, ref), generate_last_name_cluster_str(name)) for ref, name in ref700])
all_recs = get_all_valid_bibrecs()
all_bibrefrecs = chain(set((100, ref, rec) for rec, ref in get_bibrefrec_subset(100, all_recs, map(itemgetter(0), ref100))),
set((700, ref, rec) for rec, ref in get_bibrefrec_subset(700, all_recs, map(itemgetter(0), ref700))))
last_name_2_bibs = {}
for bibrefrec in all_bibrefrecs:
table, ref, unused = bibrefrec
name = bibref_2_name[(table, ref)]
last_name_2_bibs[name] = last_name_2_bibs.get(name, []) + [bibrefrec]
cluster_sets = []
for name, bibrecrefs in last_name_2_bibs.items():
new_cluster_set = Cluster_set()
new_cluster_set.clusters = [Cluster_set.Cluster([bib]) for bib in bibrecrefs]
new_cluster_set.last_name = name
cluster_sets.append(new_cluster_set)
return cluster_sets
| gpl-2.0 | -7,200,099,607,998,172,000 | 33.870833 | 128 | 0.6168 | false |
chundongwang/Guess2014 | minimizeJs.py | 1 | 3325 | import os
import time
from httplib import HTTPConnection
from urllib import urlencode
files = [
"js/main.js",
"js/service/guesser.js",
"js/service/miner.js",
"js/directive/navbar.js",
"js/directive/notice.js",
"js/directive/footer.js",
"js/directive/matchdiv.js",
"js/directive/betmodal.js",
"js/directive/betmodalextra.js",
"js/directive/eulamodal.js",
"js/directive/chartwin.js",
"js/directive/chartfav.js",
"js/directive/chartleast.js",
"js/directive/spinner.js",
"js/directive/chartallbets.js",
"js/directive/chartpop.js",
"js/directive/chartbetscoredist.js",
"js/directive/chartbetmatchdist.js",
"js/directive/charttopbet.js",
"js/view/topview.js",
"js/view/home.js",
"js/view/date.js",
"js/view/my.js",
"js/view/betanalysis.js",
"js/view/bestbet.js",
"js/view/carlnan.js"
]
raw_files = [
"js/third-party/Chart.min.js",
"js/third-party/moment.min.js"
]
copyright = '/*! GuessWorldCup2014 (c) 2014 */'
index_template = """{%% extends "base.html" %%}
{%% block script %%}
<script src="//ajax.aspnetcdn.com/ajax/jQuery/jquery-1.11.0.min.js"></script>
<script src="//ajax.aspnetcdn.com/ajax/bootstrap/3.1.1/bootstrap.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-route.min.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-animate.js"></script>
<script src="//ajax.googleapis.com/ajax/libs/angularjs/1.2.16/angular-cookies.min.js"></script>
<script src="%s"></script>
{%% endblock %%}"""
def replaceIndexHtml(root,filename):
with open(os.path.join(root, 'templates/index.html'), 'w+') as f:
f.write(index_template % filename)
def minimizeAllJs(root):
minimized_content = minimizeJsHelper(combineFiles(root, files))
raw_content = combineFiles(root, raw_files)
filename = 'js/%s.js'%str(int(time.time()))
with open(os.path.join(root, filename), 'w+') as f:
f.write(raw_content)
f.write('\n'+copyright+'\n')
f.write(minimized_content)
return filename
def combineFiles(root, file_list):
combined_content = ''
for file in file_list:
with open(os.path.join(root,file),'r+') as f:
combined_content += f.read()
combined_content += '\n'
return combined_content
def minimizeJs(path):
js_content = None
with open(path,'r+') as f:
js_content = f.read()
return minimizeJsHelper(js_content)
def minimizeJsHelper(js_content):
headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
params = urlencode({
'js_code': js_content,
'compilation_level': 'SIMPLE_OPTIMIZATIONS',
'output_format': 'text',
'output_info': 'compiled_code'
})
conn = HTTPConnection('closure-compiler.appspot.com');
conn.request('POST', '/compile', params, headers)
r = conn.getresponse()
if r.status == 200:
data = r.read()
if not data.startswith('Error'):
return data
return None
if __name__ == '__main__':
root = os.path.dirname(os.path.abspath(__file__))
replaceIndexHtml(root, minimizeAllJs(root))
| apache-2.0 | -6,989,141,133,847,485,000 | 31.920792 | 97 | 0.640602 | false |
bulax41/Commands | scripts/cme_decode_pcap.py | 1 | 1258 | #!/usr/bin/env python
import dpkt
import argparse
import struct
import sys
import datetime
def main():
parser = argparse.ArgumentParser(description='Read PCAP file, decode CME data and output message sequence gaps')
parser.add_argument('-f','--file',help="PCAP File to read")
args = parser.parse_args()
with open(args.file, 'rb') as f:
pcap = dpkt.pcap.Reader(f)
Packets = 0
Gaps = 0
MsgSeqNum = 0
for timestamp, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
(seqnum,pcktime) = struct.unpack_from(">IQ",ip.data[:5])
diff = int(seqnum) - MsgSeqNum
if MsgSeqNum == 0:
print "Initial sequence number: %s" % int(seqnum)
elif diff!=1:
Gaps = Gaps + diff - 1
now = datetime.datetime.utcfromtimestamp(timestamp).strftime("%b %d %Y %X.%f")
print "Gapped Detected, %s Packets, Sequence Numbers %s-%s at %s" % (diff-1,MsgSeqNum+1,int(Num)-1,now)
MsgSeqNum = int(seqnum)
Packets = Packets + 1
pcap.close()
print "Ending Sequence number: %s, total packets %s" % (MsgSeqNum,Packets)
if __name__ == '__main__':
main()
| gpl-3.0 | 7,447,482,395,164,316,000 | 29.682927 | 120 | 0.573132 | false |
ging/horizon | openstack_dashboard/dashboards/idm/utils.py | 1 | 4929 | # Copyright (C) 2014 Universidad Politecnica de Madrid
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
from horizon import exceptions
from django.conf import settings
from django.core import urlresolvers
from openstack_dashboard import api
from openstack_dashboard import fiware_api
from openstack_dashboard.local import local_settings
from django_gravatar.helpers import get_gravatar_url, has_gravatar
LOG = logging.getLogger('idm_logger')
DEFAULT_ORG_MEDIUM_AVATAR = 'dashboard/img/logos/medium/group.png'
DEFAULT_APP_MEDIUM_AVATAR = 'dashboard/img/logos/medium/app.png'
DEFAULT_USER_MEDIUM_AVATAR = 'dashboard/img/logos/medium/user.png'
DEFAULT_ORG_SMALL_AVATAR = 'dashboard/img/logos/small/group.png'
DEFAULT_APP_SMALL_AVATAR = 'dashboard/img/logos/small/app.png'
DEFAULT_USER_SMALL_AVATAR = 'dashboard/img/logos/small/user.png'
AVATAR_SIZE = {'img_small': 25,
'img_medium': 36,
'img_original': 100}
def filter_default(items):
"""Remove from a list the automated created project for a user. This project
is created during the user registration step and is needed for the user to be
able to perform operations in the cloud, as a work around the Keystone-OpenStack
project behaviour. We don't want the user to be able to do any operations to this
project nor even notice it exists.
Also filters other default items we dont want to show, like internal
applications.
"""
filtered = [i for i in items if not getattr(i, 'is_default', False)]
return filtered
def check_elements(elements, valid_elements):
"""Checks a list of elements are present in an allowed elements list"""
invalid_elements = [k for k in elements if k not in valid_elements]
if invalid_elements:
raise TypeError('The elements {0} are not defined \
in {1}'.format(invalid_elements, valid_elements))
def swap_dict(old_dict):
"""Returns a new dictionary in wich the keys are all the values of the old
dictionary and the values are lists of keys that had that value.
Example:
d = { 'a':['c','v','b'], 's':['c','v','d']}
swap_dict(d) -> {'c': ['a', 's'], 'b': ['a'], 'd': ['s'], 'v': ['a', 's']}
"""
new_dict = {}
for key in old_dict:
for value in old_dict[key]:
new_dict[value] = new_dict.get(value, [])
new_dict[value].append(key)
return new_dict
def get_avatar(obj, avatar_type, default_avatar):
"""Gets the object avatar or a default one."""
if type(obj) == dict:
use_gravatar = obj.get('use_gravatar', None)
email = obj.get('name', None)
avatar = obj.get(avatar_type, None)
else:
use_gravatar = getattr(obj, 'use_gravatar', None)
email = getattr(obj, 'name', None)
avatar = getattr(obj, avatar_type, None)
if use_gravatar and has_gravatar(email):
return get_gravatar_url(email, size=AVATAR_SIZE[avatar_type])
if avatar and avatar != '':
return settings.MEDIA_URL + avatar
else:
return settings.STATIC_URL + default_avatar
def get_switch_url(organization, check_switchable=True):
if check_switchable and not getattr(organization, 'switchable', False):
return False
if type(organization) == dict:
organization_id = organization['id']
else:
organization_id = organization.id
return urlresolvers.reverse('switch_tenants',
kwargs={'tenant_id': organization_id})
def page_numbers(elements, page_size):
return range(1, int(math.ceil(float(len(elements))/page_size)) + 1)
def total_pages(elements, page_size):
if not elements:
return 0
return page_numbers(elements, page_size)[-1]
def paginate_list(elements, page_number, page_size):
index = (page_number - 1) * page_size
return elements[index:index + page_size]
class PickleObject():
"""Extremely simple class that holds the very little information we need
to cache. Keystoneclient resource objects are not pickable.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def obj_to_jsonable_dict(obj, attrs):
"""converts a object into a json-serializable dict, geting the
specified attributes.
"""
as_dict = {}
for attr in attrs:
if hasattr(obj, attr):
as_dict[attr] = getattr(obj, attr)
return as_dict
| apache-2.0 | 8,689,818,560,495,523,000 | 32.530612 | 86 | 0.673159 | false |
lhupfeldt/multiconf | test/mixed_test.py | 1 | 2458 | # Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import mc_config, ConfigItem, RepeatableConfigItem, ConfigBuilder, MC_REQUIRED
from multiconf.decorators import nested_repeatables, named_as, required
from multiconf.envs import EnvFactory
from .utils.tstclasses import ItemWithName
ef = EnvFactory()
pprd = ef.Env('pprd')
prod = ef.Env('prod')
def test_configbuilders_alternating_with_items_repeatable_multilevel_required():
class some_item(ConfigItem):
xx = 1
class another_item(ConfigItem):
xx = 2
@required('some_item')
@named_as('inners')
class InnerItem(RepeatableConfigItem):
def __init__(self, name, some_attribute=MC_REQUIRED):
super().__init__(mc_key=name)
self.name = name
self.some_attribute = some_attribute
class InnerBuilder(ConfigBuilder):
def __init__(self):
super().__init__()
self.some_attribute = MC_REQUIRED
def mc_build(self):
InnerItem('innermost', self.some_attribute)
@nested_repeatables('inners')
@required('another_item')
class MiddleItem(RepeatableConfigItem):
def __init__(self, mc_key):
super().__init__(mc_key=mc_key)
self.id = mc_key
self.another_attribute = MC_REQUIRED
class MiddleBuilder(ConfigBuilder):
def __init__(self, name):
super().__init__()
self.name = name
self.builder_attribute = MC_REQUIRED
def mc_build(self):
with MiddleItem(self.name) as mi:
mi.setattr('another_attribute', default=9)
another_item()
class OuterBuilder(ConfigBuilder):
def __init__(self):
super().__init__()
def mc_build(self):
with MiddleBuilder('base') as mb:
mb.builder_attribute = 1
with InnerBuilder() as ib:
ib.some_attribute = 1
some_item()
class another_item(ConfigItem):
xx = 2
@nested_repeatables('MiddleItems')
class OuterItem(ConfigItem):
pass
@mc_config(ef, load_now=True)
def config(_):
with ItemWithName('myp'):
with OuterItem():
OuterBuilder()
cr = config(prod).ItemWithName
cr.json(builders=True)
# TODO, verify values
| bsd-3-clause | 8,689,661,886,113,094,000 | 27.917647 | 93 | 0.594793 | false |
FLYKingdom/MyCode | PycharmProjects/PythonTest/FunctionalProgramming.py | 1 | 3606 | # 函数式编程
# 高阶函数
# map函数
def square(x):
return x * x * x
r = map(square, [1, 2, 3])
print(list(r))
# reduce 函数
from functools import reduce
def fn(x, y):
return x * 10 + y
print(reduce(fn, [1, 2, 3]))
is_str = isinstance(reduce(fn, [1, 2, 3]), int)
print(is_str)
DIGITS = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9}
def str2int(s):
def fn(x, y):
return x * 10 + y
def char2num(s):
return DIGITS[s]
return reduce(fn, map(char2num, s))
def char2num1(s):
return DIGITS[s]
def str2int1(s):
return reduce(lambda x, y: x * 10 + y, map(char2num1, s))
print(str2int('6450131'))
print(str2int1('6450131'))
# 求积
def prod(l):
return reduce(lambda x, y: x * y, l)
print(prod([1, 2, 3]))
# filter
def not_empty(s):
return s and s.strip()
l = list(filter(not_empty, ['junjun', None, '', 'A', ' ']))
print(l)
# 素数
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n > 0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n), it)
s = ''
for n in primes():
if n < 30:
s = s + str(n) + ','
else:
break
print('s:', s)
# 回数
import math
def is_palindrome(n):
strs = str(n)
count = len(strs)
center = math.ceil(count // 2)
i = 0
j = count - 1
while True:
if j <= i:
return True
if strs[i] == strs[j]:
i = i + 1
j = j - 1
else:
return False
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101,
111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
# 排序函数
L = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
def by_name(t):
return t[0]
def by_score(t):
return t[1]
print('sorted t1:', sorted(L, key=by_name))
print('sorted t2:', sorted(L, key=by_score, reverse=True))
# 返回函数 闭包 (没太掌握呀 写递增函数没搞定)
# 递增整数
def count():
fs = []
for i in range(1, 4):
def f():
return i * i
fs.append(f)
return fs
def createCounter():
a = 0
def counter():
nonlocal a
a += 1
return a
return counter
counterA = createCounter()
print(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5
counterB = createCounter()
if [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:
print('测试通过!')
else:
print('测试失败!')
# 匿名函数 lambda x: x + 1
counter = lambda x: x + 1
print(counter(1))
# 装饰器
# __name__
print('count: name', count.__name__, 'annotations ', count.__annotations__, 'class ', count.__class__, 'code',
count.__code__)
import functools
def log(text):
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
print('call %s() text:' % func.__name__, text)
return func(*args, **kw)
return wrapper
return decorator
@log('new text')
def now():
print('2018-8-27')
f = now
f()
print(f.__name__)
# 便函数
import functools
int2 = functools.partial(int, base=2)
print(int2('100'))
max10 = functools.partial(max, 10)
print(max10(1, 2, 3))
| mit | 1,903,475,979,759,218,700 | 14.594595 | 117 | 0.519931 | false |
roatienza/Deep-Learning-Experiments | keras/regularization/mlp-mnist-l2.py | 1 | 2077 | '''
MLP network for MNIST digits classification with L2 reg
Test accuracy: 95.2%
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# numpy package
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.datasets import mnist
from keras.regularizers import l2
from keras.utils import to_categorical
# load mnist dataset
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# compute the number of labels
num_labels = len(np.unique(y_train))
# convert to one-hot vector
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# image dimensions (assumed square)
image_size = x_train.shape[1]
input_size = image_size * image_size
# for mlp, the input dim is a vector, so we reshape
x_train = np.reshape(x_train, [-1, input_size])
# we train our network using float data
x_train = x_train.astype('float32') / 255
x_test = np.reshape(x_test, [-1, input_size])
x_test = x_test.astype('float32') / 255
# network parameters
batch_size = 128
hidden_units = 256
kernel_regularizer = l2(0.0001)
# this is 3-layer MLP with ReLU and l2 kernel regularizer
model = Sequential()
model.add(Dense(hidden_units,
kernel_regularizer=kernel_regularizer,
input_dim=input_size))
model.add(Activation('relu'))
model.add(Dense(hidden_units,
kernel_regularizer=kernel_regularizer))
model.add(Activation('relu'))
model.add(Dense(num_labels))
# this is the output for one-hot vector
model.add(Activation('softmax'))
model.summary()
# loss function for one-hot vector
# use of sgd optimizer
# accuracy is good metric for classification tasks
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# train the network
model.fit(x_train, y_train, epochs=20, batch_size=batch_size)
# validate the model on test dataset to determine generalization
score = model.evaluate(x_test, y_test, batch_size=batch_size)
print("\nTest accuracy: %.1f%%" % (100.0 * score[1]))
| mit | 8,363,157,154,739,262,000 | 29.544118 | 64 | 0.718825 | false |
drtuxwang/system-config | bin/plink.py | 1 | 4176 | #!/usr/bin/env python3
"""
Create links to picture/video files.
"""
import argparse
import glob
import os
import signal
import sys
from typing import List
import config_mod
import file_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_depth(self) -> int:
"""
Return directory depth
"""
return self._args.depth[0]
def get_directories(self) -> List[str]:
"""
Return list of directories.
"""
return self._args.directories
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(
description='Create links to picture/video files.',
)
parser.add_argument(
'-depth',
nargs=1,
type=int,
default=[1],
help='Number of directories to ad to link name.'
)
parser.add_argument(
'directories',
nargs='+',
metavar='directory',
help='Directory containing JPEG files to link.'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
for directory in self._args.directories:
if not os.path.isdir(directory):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" does not exist.'
)
if os.path.samefile(directory, os.getcwd()):
raise SystemExit(
sys.argv[0] + ': Source directory "' + directory +
'" cannot be current directory.'
)
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if os.name == 'nt':
argv = []
for arg in sys.argv:
files = glob.glob(arg) # Fixes Windows globbing bug
if files:
argv.extend(files)
else:
argv.append(arg)
sys.argv = argv
@staticmethod
def run() -> int:
"""
Start program
"""
options = Options()
depth = options.get_depth()
config = config_mod.Config()
images_extensions = (
config.get('image_extensions') + config.get('video_extensions')
)
for directory in options.get_directories():
linkdir = '_'.join(directory.split(os.sep)[-depth:])
for file in sorted(glob.glob(os.path.join(directory, '*'))):
if os.path.splitext(file)[1].lower() in images_extensions:
link = linkdir + '_' + os.path.basename(file)
if link.endswith(('.mp4', '.webm')):
link += '.gif'
if not os.path.islink(link):
try:
os.symlink(file, link)
except OSError as exception:
raise SystemExit(
sys.argv[0] + ': Cannot create "' +
link + '" link.'
) from exception
file_stat = file_mod.FileStat(file)
file_time = file_stat.get_time()
os.utime(
link,
(file_time, file_time),
follow_symlinks=False,
)
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | -4,907,487,420,833,180,000 | 26.655629 | 75 | 0.462644 | false |
sunqm/pyscf | pyscf/tools/dump_mat.py | 1 | 8525 | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.lib.parameters import OUTPUT_DIGITS, OUTPUT_COLS
from pyscf import __config__
BASE = getattr(__config__, 'BASE', 0)
def dump_tri(stdout, c, label=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Format print for the lower triangular part of an array
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
coefficients
Kwargs:
label : list of strings
Row labels (default is 1,2,3,4,...)
ncol : int
Number of columns in the format output (default 5)
digits : int
Number of digits of precision for floating point output (default 5)
start : int
The number to start to count the index (default 0)
Examples:
>>> import sys, numpy
>>> dm = numpy.eye(3)
>>> dump_tri(sys.stdout, dm)
#0 #1 #2
0 1.00000
1 0.00000 1.00000
2 0.00000 0.00000 1.00000
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> dm = numpy.eye(mol.nao_nr())
>>> dump_tri(sys.stdout, dm, label=mol.ao_labels(), ncol=9, digits=2)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00
0 C 2s 0.00 1.00
0 C 3s 0.00 0.00 1.00
0 C 2px 0.00 0.00 0.00 1.00
0 C 2py 0.00 0.00 0.00 0.00 1.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
nc = c.shape[1]
for ic in range(0, nc, ncol):
dc = c[:,ic:ic+ncol]
m = dc.shape[1]
fmt = (' %%%d.%df'%(digits+4,digits))*m + '\n'
if label is None:
stdout.write(((' '*(digits+3))+'%s\n') %
(' '*(digits)).join(['#%-4d'%i for i in range(start+ic,start+ic+m)]))
for k, v in enumerate(dc[ic:ic+m]):
fmt = (' %%%d.%df'%(digits+4,digits))*(k+1) + '\n'
stdout.write(('%-5d' % (ic+k+start)) + (fmt % tuple(v[:k+1])))
for k, v in enumerate(dc[ic+m:]):
stdout.write(('%-5d' % (ic+m+k+start)) + (fmt % tuple(v)))
else:
stdout.write(((' '*(digits+10))+'%s\n') %
(' '*(digits)).join(['#%-4d'%i for i in range(start+ic,start+ic+m)]))
#stdout.write(' ')
#stdout.write(((' '*(digits)+'#%-5d')*m) % tuple(range(ic+start,ic+m+start)) + '\n')
for k, v in enumerate(dc[ic:ic+m]):
fmt = (' %%%d.%df'%(digits+4,digits))*(k+1) + '\n'
stdout.write(('%12s' % label[ic+k]) + (fmt % tuple(v[:k+1])))
for k, v in enumerate(dc[ic+m:]):
stdout.write(('%12s' % label[ic+m+k]) + (fmt % tuple(v)))
def dump_rec(stdout, c, label=None, label2=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Print an array in rectangular format
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
coefficients
Kwargs:
label : list of strings
Row labels (default is 1,2,3,4,...)
label2 : list of strings
Col labels (default is 1,2,3,4,...)
ncol : int
Number of columns in the format output (default 5)
digits : int
Number of digits of precision for floating point output (default 5)
start : int
The number to start to count the index (default 0)
Examples:
>>> import sys, numpy
>>> dm = numpy.eye(3)
>>> dump_rec(sys.stdout, dm)
#0 #1 #2
0 1.00000 0.00000 0.00000
1 0.00000 1.00000 0.00000
2 0.00000 0.00000 1.00000
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> dm = numpy.eye(mol.nao_nr())
>>> dump_rec(sys.stdout, dm, label=mol.ao_labels(), ncol=9, digits=2)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2s 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 3s 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2px 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00
0 C 2py 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
nc = c.shape[1]
if label2 is None:
fmt = '#%%-%dd' % (digits+3)
label2 = [fmt%i for i in range(start,nc+start)]
else:
fmt = '%%-%ds' % (digits+4)
label2 = [fmt%i for i in label2]
for ic in range(0, nc, ncol):
dc = c[:,ic:ic+ncol]
m = dc.shape[1]
fmt = (' %%%d.%df'%(digits+4,digits))*m + '\n'
if label is None:
stdout.write(((' '*(digits+3))+'%s\n') % ' '.join(label2[ic:ic+m]))
for k, v in enumerate(dc):
stdout.write(('%-5d' % (k+start)) + (fmt % tuple(v)))
else:
stdout.write(((' '*(digits+10))+'%s\n') % ' '.join(label2[ic:ic+m]))
for k, v in enumerate(dc):
stdout.write(('%12s' % label[k]) + (fmt % tuple(v)))
def dump_mo(mol, c, label=None,
ncol=OUTPUT_COLS, digits=OUTPUT_DIGITS, start=BASE):
''' Format print for orbitals
Args:
stdout : file object
eg sys.stdout, or stdout = open('/path/to/file') or
mol.stdout if mol is an object initialized from :class:`gto.Mole`
c : numpy.ndarray
Orbitals, each column is an orbital
Kwargs:
label : list of strings
Row labels (default is AO labels)
Examples:
>>> from pyscf import gto
>>> mol = gto.M(atom='C 0 0 0')
>>> mo = numpy.eye(mol.nao_nr())
>>> dump_mo(mol, mo)
#0 #1 #2 #3 #4 #5 #6 #7 #8
0 C 1s 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2s 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 3s 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00 0.00
0 C 2px 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00 0.00
0 C 2py 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00 0.00
0 C 2pz 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00 0.00
0 C 3px 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00 0.00
0 C 3py 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00 0.00
0 C 3pz 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 1.00
'''
if label is None:
label = mol.ao_labels()
dump_rec(mol.stdout, c, label, None, ncol, digits, start)
del(BASE)
if __name__ == '__main__':
import sys
import numpy
c = numpy.random.random((16,16))
label = ['A%5d' % i for i in range(16)]
dump_tri(sys.stdout, c, label, 10, 2, 1)
dump_rec(sys.stdout, c, None, label, start=1)
| apache-2.0 | -8,020,765,779,473,733,000 | 41.20297 | 96 | 0.482111 | false |
bayesimpact/bob-emploi | frontend/server/asynchronous/test/update_email_sent_status_test.py | 1 | 9477 | """Tests for the update_email_sent_status module."""
import datetime
import typing
import unittest
from unittest import mock
import mailjet_rest
import mongomock
from bob_emploi.frontend.server.asynchronous import update_email_sent_status
from bob_emploi.frontend.server.test import mailjetmock
@mailjetmock.patch()
class MainTestCase(unittest.TestCase):
"""Unit tests for the update_email_sent_status module."""
def setUp(self) -> None:
super().setUp()
self.database = mongomock.MongoClient().test
db_patcher = mock.patch(update_email_sent_status.__name__ + '._DB', self.database)
db_patcher.start()
self.addCleanup(db_patcher.stop)
def _send_email(self, email_address: str = '[email protected]') -> int:
return typing.cast(int, mailjet_rest.Client(version='v3.1').send.create({'Messages': [{
'To': [{'Email': email_address}],
'TemplateID': 123456,
}]}).json()['Messages'][0]['To'][0]['MessageID'])
def test_with_message_id(self) -> None:
"""Test retrieving info when message ID is present."""
message_id = self._send_email('[email protected]')
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
# Mark the message as opened.
mailjetmock.get_message(message_id).open()
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
message_id, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_data.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.__name__ + '.now')
def test_refresh_old_status(self, mock_now: mock.MagicMock) -> None:
"""Test refreshing old status."""
# On Nov. the 5th, the email had been opened.
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
mock_now.get.return_value = datetime.datetime(2017, 11, 5, 15, 13)
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-11-01T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
update_email_sent_status.main(['--disable-sentry'])
# A week later the email link had been clicked.
mock_now.get.return_value = datetime.datetime(2017, 11, 13, 15, 13)
mailjetmock.get_message(message_id).click()
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual(
'EMAIL_SENT_CLICKED',
updated_data.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.mail_blast.__name__ + '.campaign')
def test_campaign_specific(self, mock_campaigns: mock.MagicMock) -> None:
"""Test retrieving info for a specific campaign."""
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
mock_campaigns.list_all_campaigns.return_value = ['this-campaign', 'other-campaign']
self.database.user.insert_many([
{
'profile': {'email': '[email protected]'},
'emailsSent': [
{
'campaignId': 'this-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
},
{
'campaignId': 'other-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': self._send_email('[email protected]'),
},
],
},
{
'profile': {'email': '[email protected]'},
'emailsSent': [{
'campaignId': 'other-campaign',
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': self._send_email('[email protected]'),
}],
},
])
update_email_sent_status.main(['--campaigns', 'this-campaign', '--disable-sentry'])
updated_user = self.database.user.find_one({'profile.email': '[email protected]'})
assert updated_user
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_user.get('emailsSent')[0].get('status'))
self.assertIsNone(updated_user.get('emailsSent')[1].get('status'))
not_updated_user = self.database.user.find_one({'profile.email': '[email protected]'})
assert not_updated_user
self.assertIsNone(not_updated_user.get('emailsSent')[0].get('status'))
@mock.patch(update_email_sent_status.__name__ + '.now')
@mock.patch(update_email_sent_status.__name__ + '.mail_send')
def test_multiple_checks(self, mock_mail: mock.MagicMock, mock_now: mock.MagicMock) -> None:
"""Test checking the status of an email several times."""
# Note that in this test we do not use mailjetmock because what's
# important is to check when calls to Mailjet are made (i.e. not too often).
mock_now.get.return_value = datetime.datetime(2017, 9, 8, 15, 13)
mock_mail.get_message.return_value = {
'ArrivedAt': '2017-09-08T09:25:48Z',
'ID': 6789,
'Comment': 'Right message, arrived 2 seconds after being sent',
'Status': 'opened',
}
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': 6789,
}],
})
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.reset_mock()
# Check again, an hour later.
mock_now.get.return_value = datetime.datetime(2017, 9, 8, 16, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 9, 17, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again an hour later the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 9, 18, 13)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_not_called()
# Check again 15 days later.
mock_now.get.return_value = datetime.datetime(2017, 9, 24, 18, 14)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_called_once()
mock_mail.get_message.reset_mock()
# Check again the next day.
mock_now.get.return_value = datetime.datetime(2017, 9, 25, 18, 14)
update_email_sent_status.main(['--disable-sentry'])
mock_mail.get_message.assert_not_called()
def test_update_helper(self) -> None:
"""Test updating the sent emails for another collection."""
message_id = self._send_email('[email protected]')
mailjetmock.get_message(message_id).open()
self.database.other_users.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': message_id,
}],
})
update_email_sent_status.main(['--mongo-collection', 'other_users', '--disable-sentry'])
updated_data = self.database.other_users.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
message_id, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertEqual(
'EMAIL_SENT_OPENED',
updated_data.get('emailsSent')[0].get('status'))
def test_mailjet_unknown(self) -> None:
"""Test retrieving info but MailJet never heard of the message."""
self.database.user.insert_one({
'other': 'field',
'profile': {'email': '[email protected]'},
'emailsSent': [{
'sentAt': '2017-09-08T09:25:46.145001Z',
'mailjetMessageId': 9876554,
}],
})
update_email_sent_status.main(['--disable-sentry'])
updated_data = self.database.user.find_one()
assert updated_data
self.assertEqual('field', updated_data.get('other'))
self.assertEqual(
9876554, int(updated_data.get('emailsSent')[0].get('mailjetMessageId')))
self.assertNotIn('status', updated_data.get('emailsSent')[0])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 1,764,484,004,379,997,400 | 38.65272 | 96 | 0.574549 | false |
WhatWorksWhenForWhom/nlppln | nlppln/commands/save_ner_data.py | 1 | 1125 | #!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
from nlppln.utils import create_dirs, get_files
@click.command()
@click.argument('in_dir', type=click.Path(exists=True))
@click.option('--out_dir', '-o', default=os.getcwd(), type=click.Path())
@click.option('--name', '-n', default='ner_stats.csv')
def nerstats(in_dir, out_dir, name):
create_dirs(out_dir)
frames = []
in_files = get_files(in_dir)
for fi in in_files:
with codecs.open(fi, encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [os.path.basename(fi)
for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(os.path.join(out_dir, name), encoding='utf-8')
if __name__ == '__main__':
nerstats()
| apache-2.0 | -5,148,481,464,431,719,000 | 27.846154 | 77 | 0.580444 | false |
nictuku/nwu | nwu/common/scheduler.py | 1 | 3753 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2008 Stephan Peijnik ([email protected])
#
# This file is part of NWU.
#
# NWU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NWU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NWU. If not, see <http://www.gnu.org/licenses/>.
from threading import Thread, Lock, Event
from time import time
class Task:
""" Task base class. """
TYPE_ONESHOT = 0
TYPE_RECURRING = 1
def __init__(self, name, type, exec_time):
self.name = name
self.type = type
self.exec_time = exec_time
def execute(self):
""" Method that is executed by the scheduler.
Override to add your own code.
"""
pass
class RecurringTask(Task):
""" A recurring task.
Is executed all <interval> seconds"""
def __init__(self, name, interval):
self.interval = interval
Task.__init__(self, name, Task.TYPE_RECURRING, int(time())+interval)
class OneshotTask(Task):
""" A one shot task.
Is executed at <exec_time>.
"""
def __init__(self, name, exec_time):
Task.__init__(self, name, Task.TYPE_ONESHOT, exec_time)
class Scheduler(Thread):
""" Manages scheduled tasks """
def __init__(self, app, name='Scheduler'):
Thread.__init__(self)
self.setName(name)
self.app = app
self.tasks = []
self.taskLock = Lock()
self.exitEvent = Event()
def init_thread(self):
""" Custom thread initialization code.
This method can be overridden to, for example, establish
a database connection.
"""
pass
def stop(self):
""" Stop the Scheduler. """
self.exitEvent.set()
def add_task(self, task):
""" Add a task to the scheduler """
if self.exitEvent.isSet():
return False
self.taskLock.acquire()
self.tasks.append(task)
self.taskLock.release()
return True
def remove_task(self, task):
""" Remove a task from the scheduler """
if self.exitEvent.isSet():
return False
self.taskLock.acquire()
self.tasks.remove(task)
self.taskLock.release()
return True
def run(self):
""" Thread main loop. """
self.init_thread()
while not self.exitEvent.isSet():
exec_tasks = []
# Keep lock time as short as possible!
self.taskLock.acquire()
for ac in self.tasks:
if ac.exec_time <= int(time()):
exec_tasks.append(ac)
self.taskLock.release()
for ac in exec_tasks:
try:
ac.execute()
except Exception, e:
# TODO: Log this rather than printing it
print 'Task %s raised exception: %s' % (ac.name, e)
if ac.type == Task.TYPE_RECURRING:
ac.exec_time = int(time()) + ac.interval
self.taskLock.acquire()
for ac in exec_tasks:
if ac.type == Task.TYPE_ONESHOT:
self.tasks.remove(ac)
self.taskLock.release()
self.exitEvent.wait(0.1)
| gpl-3.0 | 219,441,531,832,932,300 | 28.320313 | 76 | 0.563016 | false |
Shu-Ji/multi-supervisord-web-admin | src/models.py | 1 | 2943 | # coding: u8
from tornado.util import ObjectDict
from sqlalchemy import create_engine
from sqlalchemy import (Column, Integer, Text, String, Boolean)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.attributes import InstrumentedAttribute
import settings
import utils
params = dict(
encoding='utf8',
echo=False,
pool_recycle=7200,
)
conn_str = 'sqlite:///%s' % settings.DB_PATH
engine = create_engine(conn_str, **params)
db_factory = lambda: sessionmaker(bind=engine)()
_Base = declarative_base()
class Base(_Base):
__abstract__ = True
id = Column(Integer, primary_key=True, autoincrement=True)
def as_dict(self):
r = {c: getattr(self, c) for c in self.columns()}
return ObjectDict(r)
@classmethod
def get_columns(cls):
c = {}
for k, v in vars(cls).iteritems():
if type(v) is InstrumentedAttribute:
c[k] = v
return ObjectDict(c)
@classmethod
def columns(cls):
return cls.get_columns().keys()
class User(Base):
__tablename__ = 'user'
name = Column(Text, index=True)
pwd = Column(String(32))
@staticmethod
def reset_password(handler, old, new):
db = handler.db
user = db.query(User).filter_by(name=handler.username).first()
if user.pwd != utils.md5(old):
return False
user.pwd = utils.md5(new)
return True
class Host(Base):
__tablename__ = 'host'
user = Column(Text)
pwd = Column(Text)
host = Column(Text)
port = Column(Integer)
is_active = Column(Boolean, server_default='1')
@staticmethod
def delete(db, id):
return bool(db.query(Host).filter_by(id=id).delete())
@staticmethod
def update(db, id, user, pwd, host, port):
return bool(db.query(Host).filter_by(id=id).update(
{'user': user, 'pwd': pwd, 'host': host, 'port': port}
))
@staticmethod
def add(handler, user, pwd, host, port):
db = handler.db
if db.query(Host).filter_by(host=host, port=port).first() is not None:
return False
db.add(Host(user=user, pwd=pwd, host=host, port=port))
return True
@staticmethod
def get_all_active_hosts(handler):
return handler.db.query(Host).filter_by(is_active=True)
@staticmethod
def get_one_host_info_by_id(db, id):
return db.query(Host).filter_by(id=id).first()
@staticmethod
def get_one_host_info(handler, host, port):
return handler.db.query(Host).filter_by(host=host, port=port).first()
@staticmethod
def get_all_hosts(handler):
return handler.db.query(Host)
if __name__ == '__main__':
metadata = Base.metadata
metadata.create_all(engine)
db = db_factory()
db.merge(User(id=1, name='admin', pwd=utils.md5('AdminDemo')))
db.commit()
db.close()
| unlicense | -1,953,575,648,705,451,000 | 23.525 | 78 | 0.622834 | false |
huiyiqun/check_mk | cmk/log.py | 1 | 7362 | #!/usr/bin/env python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2016 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import sys
import logging as _logging
# Just for reference, the predefined logging levels:
#
# syslog/CMC Python added to Python
# --------------------------------------------
# emerg 0
# alert 1
# crit 2 CRITICAL 50
# err 3 ERROR 40
# warn 4 WARNING 30 <= default level in Python
# notice 5 <= default level in CMC
# info 6 INFO 20
# VERBOSE 15
# debug 7 DEBUG 10
#
# NOTE: VERBOSE is a bit confusing and suffers from the not-invented-here
# syndrome. If we really insist on 3 verbosity levels (normal, verbose, very
# verbose), we should probably do the following:
#
# * Nuke VERBOSE.
# * Introduce NOTICE (25).
# * Make NOTICE the default level.
# * Optionally introduce EMERGENCY (70) and ALERT (60) for consistency.
#
# This would make our whole logging story much more consistent internally
# (code) and externally (GUI always offers the same levels). Nevertheless, we
# should keep in mind that the Python documentation strongly discourages
# introducing new log levels, at least for libraries. OTOH, with 3 verbosity
# levels, this would force us to log normal stuff with a WARNING level, which
# looks wrong.
# Users should be able to set log levels without importing "logging"
CRITICAL = _logging.CRITICAL
ERROR = _logging.ERROR
WARNING = _logging.WARNING
INFO = _logging.INFO
DEBUG = _logging.DEBUG
# We need an additional log level between INFO and DEBUG to reflect the
# verbose() and vverbose() mechanisms of Check_MK.
VERBOSE = 15
class CMKLogger(_logging.getLoggerClass()):
def __init__(self, name, level=_logging.NOTSET):
super(CMKLogger, self).__init__(name, level)
_logging.addLevelName(VERBOSE, "VERBOSE")
def verbose(self, msg, *args, **kwargs):
if self.is_verbose():
self._log(VERBOSE, msg, args, **kwargs)
def is_verbose(self):
return self.isEnabledFor(VERBOSE)
def is_very_verbose(self):
return self.isEnabledFor(DEBUG)
def set_format(self, fmt):
handler = _logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(get_formatter(fmt))
del self.handlers[:] # Remove all previously existing handlers
self.addHandler(handler)
_logging.setLoggerClass(CMKLogger)
# Set default logging handler to avoid "No handler found" warnings.
# Python 2.7+
logger = _logging.getLogger("cmk")
logger.addHandler(_logging.NullHandler())
logger.setLevel(INFO)
def get_logger(name):
"""This function provides the logging object for client code.
It returns a child logger of the "cmk" main logger, identified
by the given name. The name of the child logger will be prefixed
with "cmk.", for example "cmk.mkeventd" in case of "mkeventd".
"""
return logger.getChild(name)
def get_formatter(format="%(asctime)s [%(levelno)s] [%(name)s %(process)d] %(message)s"):
"""Returns a new message formater instance that uses the standard
Check_MK log format by default. You can also set another format
if you like."""
return _logging.Formatter(format)
def setup_console_logging():
"""This method enables all log messages to be written to the console
without any additional information like date/time, logger-name. Just
the log line is written.
This can be used for existing command line applications which were
using sys.stdout.write() or print() before.
"""
handler = _logging.StreamHandler(stream=sys.stdout)
formatter = _logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def open_log(log_file_path, fallback_to=None):
"""Open logfile and fall back to stderr if this is not successfull
The opened file() object is returned.
"""
if fallback_to is None:
fallback_to = sys.stderr
logfile = None
try:
logfile = file(log_file_path, "a")
logfile.flush()
except Exception, e:
logger.exception("Cannot open log file '%s': %s" % (log_file_path , e))
if fallback_to:
logfile = fallback_to
if logfile:
setup_logging_handler(logfile)
return logfile
def setup_logging_handler(stream):
"""This method enables all log messages to be written to the given
stream file object. The messages are formated in Check_MK standard
logging format.
"""
handler = _logging.StreamHandler(stream=stream)
handler.setFormatter(get_formatter("%(asctime)s [%(levelno)s] [%(name)s] %(message)s"))
del logger.handlers[:] # Remove all previously existing handlers
logger.addHandler(handler)
def set_verbosity(verbosity):
"""Values for "verbosity":
0: enables INFO and above
1: enables VERBOSE and above
2: enables DEBUG and above (ALL messages)
"""
if verbosity == 0:
logger.setLevel(INFO)
elif verbosity == 1:
logger.setLevel(VERBOSE)
elif verbosity == 2:
logger.setLevel(DEBUG)
else:
raise NotImplementedError()
# TODO: Experiment. Not yet used.
class LogMixin(object):
"""Inherit from this class to provide logging support.
Makes a logger available via "self.logger" for objects and
"self.cls_logger" for the class.
"""
__parent_logger = None
__logger = None
__cls_logger = None
@property
def _logger(self):
if not self.__logger:
parent = self.__parent_logger or logger
self.__logger = parent.getChild('.'.join([self.__class__.__name__]))
return self.__logger
@classmethod
def _cls_logger(cls):
if not cls.__cls_logger:
parent = cls.__parent_logger or logger
cls.__cls_logger = parent.getChild('.'.join([cls.__name__]))
return cls.__cls_logger
| gpl-2.0 | -2,892,559,792,175,105,500 | 31.72 | 91 | 0.609617 | false |
mgracer48/panda3d | direct/src/gui/DirectWaitBar.py | 1 | 4632 | """Undocumented Module"""
__all__ = ['DirectWaitBar']
from panda3d.core import *
import DirectGuiGlobals as DGG
from DirectFrame import *
import types
"""
import DirectWaitBar
d = DirectWaitBar(borderWidth=(0, 0))
"""
class DirectWaitBar(DirectFrame):
""" DirectWaitBar - A DirectWidget that shows progress completed
towards a task. """
def __init__(self, parent = None, **kw):
# Inherits from DirectFrame
# A Direct Frame can have:
# - A background texture (pass in path to image, or Texture Card)
# - A midground geometry item (pass in geometry)
# - A foreground text Node (pass in text string or Onscreen Text)
optiondefs = (
# Define type of DirectGuiWidget
('pgFunc', PGWaitBar, None),
('frameSize', (-1, 1, -0.08, 0.08), None),
('borderWidth', (0, 0), None),
('range', 100, self.setRange),
('value', 0, self.setValue),
('barBorderWidth', (0, 0), self.setBarBorderWidth),
('barColor', (1, 0, 0, 1), self.setBarColor),
('barTexture', None, self.setBarTexture),
('barRelief', DGG.FLAT, self.setBarRelief),
('sortOrder', NO_FADE_SORT_INDEX, None),
)
if 'text' in kw:
textoptiondefs = (
('text_pos', (0, -0.025), None),
('text_scale', 0.1, None)
)
else:
textoptiondefs = ()
# Merge keyword options with default options
self.defineoptions(kw, optiondefs + textoptiondefs)
# Initialize superclasses
DirectFrame.__init__(self, parent)
self.barStyle = PGFrameStyle()
# Call option initialization functions
self.initialiseoptions(DirectWaitBar)
self.updateBarStyle()
def destroy(self):
self.barStyle = None
DirectFrame.destroy(self)
def setRange(self):
"""Updates the bar range which you can set using bar['range'].
This is the value at which the WaitBar indicates 100%."""
self.guiItem.setRange(self['range'])
def setValue(self):
"""Updates the bar value which you can set using bar['value'].
The value should range between 0 and bar['range']."""
self.guiItem.setValue(self['value'])
def getPercent(self):
"""Returns the percentage complete."""
return self.guiItem.getPercent()
def updateBarStyle(self):
if not self.fInit:
self.guiItem.setBarStyle(self.barStyle)
def setBarRelief(self):
"""Updates the bar relief, which you can set using bar['barRelief']."""
self.barStyle.setType(self['barRelief'])
self.updateBarStyle()
def setBarBorderWidth(self):
"""Updates the bar's border width, which you can set using bar['barBorderWidth']."""
self.barStyle.setWidth(*self['barBorderWidth'])
self.updateBarStyle()
def setBarColor(self):
"""Updates the bar color, which you can set using bar['barColor']."""
color = self['barColor']
self.barStyle.setColor(color[0], color[1], color[2], color[3])
self.updateBarStyle()
def setBarTexture(self):
"""Updates the bar texture, which you can set using bar['barTexture']."""
# this must be a single texture (or a string).
texture = self['barTexture']
if isinstance(texture, types.StringTypes):
texture = loader.loadTexture(texture)
if texture:
self.barStyle.setTexture(texture)
else:
self.barStyle.clearTexture()
self.updateBarStyle()
def update(self, value):
"""Updates the bar with the given value and renders a frame."""
self['value'] = value
# Render a frame out-of-sync with the igLoop to update the
# window right now. This allows the wait bar to be updated
# even though we are not normally rendering frames.
base.graphicsEngine.renderFrame()
def finish(self, N = 10):
"""Fill the bar in N frames. This call is blocking."""
remaining = self['range'] - self['value']
if remaining:
step = max(1, int(remaining / N))
count = self['value']
while count != self['range']:
count += step
if count > self['range']:
count = self['range']
self.update(count)
| bsd-3-clause | 8,693,055,676,541,782,000 | 36.354839 | 92 | 0.564551 | false |
anthonyalmarza/trex | tests/test_watch.py | 1 | 4071 | # from trex import redis
#
# from .mixins import REDIS_HOST, REDIS_PORT
#
#
# class TestRedisConnections(unittest.TestCase):
# _KEYS = ['trex:testwatch1', 'trex:testwatch2']
#
# @defer.inlineCallbacks
# def setUp(self):
# self.connections = []
# self.db = yield self._getRedisConnection()
# yield self.db.delete(self._KEYS)
#
# @defer.inlineCallbacks
# def tearDown(self):
# for connection in self.connections:
# l = [connection.delete(k) for k in self._KEYS]
# yield defer.DeferredList(l)
# yield connection.disconnect()
#
# def _db_connected(self, connection):
# self.connections.append(connection)
# return connection
#
# def _getRedisConnection(self, host=REDIS_HOST, port=REDIS_PORT, db=0):
# return redis.Connection(
# host, port, dbid=db, reconnect=False).addCallback(
# self._db_connected)
#
# def _check_watcherror(self, response, shouldError=False):
# if shouldError:
# self.assertIsInstance(response, Failure)
# self.assertIsInstance(response.value, redis.WatchError)
# else:
# self.assertNotIsInstance(response, Failure)
#
# @defer.inlineCallbacks
# def testRedisWatchFail(self):
# db1 = yield self._getRedisConnection()
# yield self.db.set(self._KEYS[0], 'foo')
# t = yield self.db.multi(self._KEYS[0])
# self.assertIsInstance(t, redis.RedisProtocol)
# yield t.set(self._KEYS[1], 'bar')
# # This should trigger a failure
# yield db1.set(self._KEYS[0], 'bar1')
# yield t.commit().addBoth(self._check_watcherror, shouldError=True)
#
# @defer.inlineCallbacks
# def testRedisWatchSucceed(self):
# yield self.db.set(self._KEYS[0], 'foo')
# t = yield self.db.multi(self._KEYS[0])
# self.assertIsInstance(t, redis.RedisProtocol)
# yield t.set(self._KEYS[0], 'bar')
# yield t.commit().addBoth(self._check_watcherror, shouldError=False)
#
# @defer.inlineCallbacks
# def testRedisMultiNoArgs(self):
# yield self.db.set(self._KEYS[0], 'foo')
# t = yield self.db.multi()
# self.assertIsInstance(t, redis.RedisProtocol)
# yield t.set(self._KEYS[1], 'bar')
# yield t.commit().addBoth(self._check_watcherror, shouldError=False)
#
# @defer.inlineCallbacks
# def testRedisWithBulkCommands_transactions(self):
# t = yield self.db.watch(self._KEYS)
# yield t.mget(self._KEYS)
# t = yield t.multi()
# yield t.commit()
# self.assertEqual(0, t.transactions)
# self.assertFalse(t.inTransaction)
#
# @defer.inlineCallbacks
# def testRedisWithBulkCommands_inTransaction(self):
# t = yield self.db.watch(self._KEYS)
# yield t.mget(self._KEYS)
# self.assertTrue(t.inTransaction)
# yield t.unwatch()
#
# @defer.inlineCallbacks
# def testRedisWithBulkCommands_mget(self):
# yield self.db.set(self._KEYS[0], "foo")
# yield self.db.set(self._KEYS[1], "bar")
#
# m0 = yield self.db.mget(self._KEYS)
# t = yield self.db.watch(self._KEYS)
# m1 = yield t.mget(self._KEYS)
# t = yield t.multi()
# yield t.mget(self._KEYS)
# (m2,) = yield t.commit()
#
# self.assertEqual(["foo", "bar"], m0)
# self.assertEqual(m0, m1)
# self.assertEqual(m0, m2)
#
# @defer.inlineCallbacks
# def testRedisWithBulkCommands_hgetall(self):
# yield self.db.hset(self._KEYS[0], "foo", "bar")
# yield self.db.hset(self._KEYS[0], "bar", "foo")
#
# h0 = yield self.db.hgetall(self._KEYS[0])
# t = yield self.db.watch(self._KEYS[0])
# h1 = yield t.hgetall(self._KEYS[0])
# t = yield t.multi()
# yield t.hgetall(self._KEYS[0])
# (h2,) = yield t.commit()
#
# self.assertEqual({"foo": "bar", "bar": "foo"}, h0)
# self.assertEqual(h0, h1)
# self.assertEqual(h0, h2)
| mit | 3,752,007,576,016,985,600 | 35.675676 | 77 | 0.590764 | false |
ptitdoc/Archive-qubes-core | dom0/qvm-core/qubesutils.py | 1 | 79072 | #!/usr/bin/python
#
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2011 Marek Marczykowski <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
from qubes import QubesVm,QubesException,QubesVmCollection
from qubes import QubesVmClasses
from qubes import xs, xl_ctx, qubes_guid_path, qubes_clipd_path, qrexec_client_path
from qubes import qubes_store_filename, qubes_base_dir
from qubes import qubes_servicevms_dir, qubes_templates_dir, qubes_appvms_dir
import sys
import os
import subprocess
import re
import time
import grp,pwd
from datetime import datetime
from qmemman_client import QMemmanClient
import xen.lowlevel.xc
import xen.lowlevel.xl
import xen.lowlevel.xs
def mbytes_to_kmg(size):
if size > 1024:
return "%d GiB" % (size/1024)
else:
return "%d MiB" % size
def kbytes_to_kmg(size):
if size > 1024:
return mbytes_to_kmg(size/1024)
else:
return "%d KiB" % size
def bytes_to_kmg(size):
if size > 1024:
return kbytes_to_kmg(size/1024)
else:
return "%d B" % size
def size_to_human (size):
"""Humane readable size, with 1/10 precission"""
if size < 1024:
return str (size);
elif size < 1024*1024:
return str(round(size/1024.0,1)) + ' KiB'
elif size < 1024*1024*1024:
return str(round(size/(1024.0*1024),1)) + ' MiB'
else:
return str(round(size/(1024.0*1024*1024),1)) + ' GiB'
def parse_size(size):
units = [ ('K', 1024), ('KB', 1024),
('M', 1024*1024), ('MB', 1024*1024),
('G', 1024*1024*1024), ('GB', 1024*1024*1024),
]
size = size.strip().upper()
if size.isdigit():
return int(size)
for unit, multiplier in units:
if size.endswith(unit):
size = size[:-len(unit)].strip()
return int(size)*multiplier
raise QubesException("Invalid size: {0}.".format(size))
def print_stdout(text):
print (text)
def print_stderr(text):
print >> sys.stderr, (text)
###### Block devices ########
def block_devid_to_name(devid):
major = devid / 256
minor = devid % 256
dev_class = ""
if major == 202:
dev_class = "xvd"
elif major == 8:
dev_class = "sd"
else:
raise QubesException("Unknown device class %d" % major)
if minor % 16 == 0:
return "%s%c" % (dev_class, ord('a')+minor/16)
else:
return "%s%c%d" % (dev_class, ord('a')+minor/16, minor%16)
def block_name_to_majorminor(name):
# check if it is already devid
if isinstance(name, int):
return (name / 256, name % 256)
if name.isdigit():
return (int(name) / 256, int(name) % 256)
major = 0
minor = 0
dXpY_style = False
disk = True
if name.startswith("xvd"):
major = 202
elif name.startswith("sd"):
major = 8
elif name.startswith("mmcblk"):
dXpY_style = True
major = 179
elif name.startswith("scd"):
disk = False
major = 11
elif name.startswith("sr"):
disk = False
major = 11
elif name.startswith("loop"):
disk = False
major = 7
elif name.startswith("md"):
disk = False
major = 9
else:
# Unknown device
return (0, 0)
if not dXpY_style:
name_match = re.match(r"^([a-z]+)([a-z])([0-9]*)$", name)
else:
name_match = re.match(r"^([a-z]+)([0-9]*)(?:p([0-9]+))?$", name)
if not name_match:
raise QubesException("Invalid device name: %s" % name)
if disk:
if dXpY_style:
minor = int(name_match.group(2))*8
else:
minor = (ord(name_match.group(2))-ord('a')) * 16
else:
minor = 0
if name_match.group(3):
minor += int(name_match.group(3))
return (major, minor)
def block_name_to_devid(name):
# check if it is already devid
if isinstance(name, int):
return name
if name.isdigit():
return int(name)
(major, minor) = block_name_to_majorminor(name)
return major << 8 | minor
def block_find_unused_frontend(vm = None):
assert vm is not None
assert vm.is_running()
vbd_list = xs.ls('', '/local/domain/%d/device/vbd' % vm.xid)
# xvd* devices
major = 202
# prefer xvdi
for minor in range(8*16,254,16)+range(0,8*16,16):
if vbd_list is None or str(major << 8 | minor) not in vbd_list:
return block_devid_to_name(major << 8 | minor)
return None
def block_list(vm = None, system_disks = False):
device_re = re.compile(r"^[a-z0-9]{1,12}$")
# FIXME: any better idea of desc_re?
desc_re = re.compile(r"^.{1,255}$")
mode_re = re.compile(r"^[rw]$")
xs_trans = xs.transaction_start()
vm_list = []
if vm is not None:
if not vm.is_running():
xs.transaction_end(xs_trans)
return []
else:
vm_list = [ str(vm.xid) ]
else:
vm_list = xs.ls(xs_trans, '/local/domain')
devices_list = {}
for xid in vm_list:
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-block-devices' % xid)
if vm_devices is None:
continue
for device in vm_devices:
# Sanitize device name
if not device_re.match(device):
print >> sys.stderr, "Invalid device name in VM '%s'" % vm_name
continue
device_size = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/size' % (xid, device))
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/desc' % (xid, device))
device_mode = xs.read(xs_trans, '/local/domain/%s/qubes-block-devices/%s/mode' % (xid, device))
if device_size is None or device_desc is None or device_mode is None:
print >> sys.stderr, "Missing field in %s device parameters" % device
continue
if not device_size.isdigit():
print >> sys.stderr, "Invalid %s device size in VM '%s'" % (device, vm_name)
continue
if not desc_re.match(device_desc):
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
continue
if not mode_re.match(device_mode):
print >> sys.stderr, "Invalid %s device mode in VM '%s'" % (device, vm_name)
continue
# Check if we know major number for this device; attach will work without this, but detach and check_attached don't
if block_name_to_majorminor(device) == (0, 0):
print >> sys.stderr, "Unsupported device %s:%s" % (vm_name, device)
continue
if not system_disks:
if xid == '0' and device_desc.startswith(qubes_base_dir):
continue
visible_name = "%s:%s" % (vm_name, device)
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
"vm": vm_name, "device":device, "size":int(device_size),
"desc":device_desc, "mode":device_mode}
xs.transaction_end(xs_trans)
return devices_list
def block_check_attached(backend_vm, device, backend_xid = None):
if backend_xid is None:
backend_xid = backend_vm.xid
xs_trans = xs.transaction_start()
vm_list = xs.ls(xs_trans, '/local/domain/%d/backend/vbd' % backend_xid)
if vm_list is None:
xs.transaction_end(xs_trans)
return None
device_majorminor = None
try:
device_majorminor = block_name_to_majorminor(device)
except:
# Unknown devices will be compared directly - perhaps it is a filename?
pass
for vm_xid in vm_list:
for devid in xs.ls(xs_trans, '/local/domain/%d/backend/vbd/%s' % (backend_xid, vm_xid)):
(tmp_major, tmp_minor) = (0, 0)
phys_device = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/physical-device' % (backend_xid, vm_xid, devid))
dev_params = xs.read(xs_trans, '/local/domain/%d/backend/vbd/%s/%s/params' % (backend_xid, vm_xid, devid))
if phys_device and phys_device.find(':'):
(tmp_major, tmp_minor) = phys_device.split(":")
tmp_major = int(tmp_major, 16)
tmp_minor = int(tmp_minor, 16)
else:
# perhaps not ready yet - check params
if not dev_params:
# Skip not-phy devices
continue
elif not dev_params.startswith('/dev/'):
# will compare params directly
pass
else:
(tmp_major, tmp_minor) = block_name_to_majorminor(dev_params.lstrip('/dev/'))
if (device_majorminor and (tmp_major, tmp_minor) == device_majorminor) or \
(device_majorminor is None and dev_params == device):
vm_name = xl_ctx.domid_to_name(int(vm_xid))
frontend = block_devid_to_name(int(devid))
xs.transaction_end(xs_trans)
return {"xid":int(vm_xid), "frontend": frontend, "devid": int(devid), "vm": vm_name}
xs.transaction_end(xs_trans)
return None
def block_attach(vm, backend_vm, device, frontend=None, mode="w", auto_detach=False, wait=True):
device_attach_check(vm, backend_vm, device, frontend)
do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait)
def device_attach_check(vm, backend_vm, device, frontend):
""" Checks all the parameters, dies on errors """
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
if not backend_vm.is_running():
raise QubesException("VM %s not running" % backend_vm.name)
def do_block_attach(vm, backend_vm, device, frontend, mode, auto_detach, wait):
if frontend is None:
frontend = block_find_unused_frontend(vm)
if frontend is None:
raise QubesException("No unused frontend found")
else:
# Check if any device attached at this frontend
if xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm.xid, block_name_to_devid(frontend))) == '4':
raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
# Check if this device is attached to some domain
attached_vm = block_check_attached(backend_vm, device)
if attached_vm:
if auto_detach:
block_detach(None, attached_vm['devid'], vm_xid=attached_vm['xid'])
else:
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
if device.startswith('/'):
backend_dev = 'script:file:' + device
else:
backend_dev = 'phy:/dev/' + device
xl_cmd = [ '/usr/sbin/xl', 'block-attach', vm.name, backend_dev, frontend, mode, str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
if wait:
be_path = '/local/domain/%d/backend/vbd/%d/%d' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend))
# There is no way to use xenstore watch with a timeout, so must check in a loop
interval = 0.100
# 5sec timeout
timeout = 5/interval
while timeout > 0:
be_state = xs.read('', be_path + '/state')
hotplug_state = xs.read('', be_path + '/hotplug-status')
if be_state is None:
raise QubesException("Backend device disappeared, something weird happened")
elif int(be_state) == 4:
# Ok
return
elif int(be_state) > 4:
# Error
error = xs.read('', '/local/domain/%d/error/backend/vbd/%d/%d/error' % (backend_vm.xid, vm.xid, block_name_to_devid(frontend)))
if error is not None:
raise QubesException("Error while connecting block device: " + error)
else:
raise QubesException("Unknown error while connecting block device")
elif hotplug_state == 'error':
hotplug_error = xs.read('', be_path + '/hotplug-error')
if hotplug_error:
raise QubesException("Error while connecting block device: " + hotplug_error)
else:
raise QubesException("Unknown hotplug error while connecting block device")
time.sleep(interval)
timeout -= interval
raise QubesException("Timeout while waiting for block defice connection")
def block_detach(vm, frontend = "xvdi", vm_xid = None):
# Get XID if not provided already
if vm_xid is None:
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
# FIXME: potential race
vm_xid = vm.xid
# Check if this device is really connected
if not xs.read('', '/local/domain/%d/device/vbd/%d/state' % (vm_xid, block_name_to_devid(frontend))) == '4':
# Do nothing - device already detached
return
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), str(frontend)]
subprocess.check_call(xl_cmd)
def block_detach_all(vm, vm_xid = None):
""" Detach all non-system devices"""
# Get XID if not provided already
if vm_xid is None:
if not vm.is_running():
raise QubesException("VM %s not running" % vm.name)
# FIXME: potential race
vm_xid = vm.xid
xs_trans = xs.transaction_start()
devices = xs.ls(xs_trans, '/local/domain/%d/device/vbd' % vm_xid)
if devices is None:
return
devices_to_detach = []
for devid in devices:
# check if this is system disk
be_path = xs.read(xs_trans, '/local/domain/%d/device/vbd/%s/backend' % (vm_xid, devid))
assert be_path is not None
be_params = xs.read(xs_trans, be_path + '/params')
if be_path.startswith('/local/domain/0/') and be_params is not None and be_params.startswith(qubes_base_dir):
# system disk
continue
devices_to_detach.append(devid)
xs.transaction_end(xs_trans)
for devid in devices_to_detach:
xl_cmd = [ '/usr/sbin/xl', 'block-detach', str(vm_xid), devid]
subprocess.check_call(xl_cmd)
####### USB devices ######
usb_ver_re = re.compile(r"^(1|2)$")
usb_device_re = re.compile(r"^[0-9]+-[0-9]+(_[0-9]+)?$")
usb_port_re = re.compile(r"^$|^[0-9]+-[0-9]+(\.[0-9]+)?$")
def usb_setup(backend_vm_xid, vm_xid, devid, usb_ver):
"""
Attach frontend to the backend.
backend_vm_xid - id of the backend domain
vm_xid - id of the frontend domain
devid - id of the pvusb controller
"""
num_ports = 8
trans = xs.transaction_start()
be_path = "/local/domain/%d/backend/vusb/%d/%d" % (backend_vm_xid, vm_xid, devid)
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, devid)
be_perm = [{'dom': backend_vm_xid}, {'dom': vm_xid, 'read': True} ]
fe_perm = [{'dom': vm_xid}, {'dom': backend_vm_xid, 'read': True} ]
# Create directories and set permissions
xs.write(trans, be_path, "")
xs.set_permissions(trans, be_path, be_perm)
xs.write(trans, fe_path, "")
xs.set_permissions(trans, fe_path, fe_perm)
# Write backend information into the location that frontend looks for
xs.write(trans, "%s/backend-id" % fe_path, str(backend_vm_xid))
xs.write(trans, "%s/backend" % fe_path, be_path)
# Write frontend information into the location that backend looks for
xs.write(trans, "%s/frontend-id" % be_path, str(vm_xid))
xs.write(trans, "%s/frontend" % be_path, fe_path)
# Write USB Spec version field.
xs.write(trans, "%s/usb-ver" % be_path, usb_ver)
# Write virtual root hub field.
xs.write(trans, "%s/num-ports" % be_path, str(num_ports))
for port in range(1, num_ports+1):
# Set all port to disconnected state
xs.write(trans, "%s/port/%d" % (be_path, port), "")
# Set state to XenbusStateInitialising
xs.write(trans, "%s/state" % fe_path, "1")
xs.write(trans, "%s/state" % be_path, "1")
xs.write(trans, "%s/online" % be_path, "1")
xs.transaction_end(trans)
def usb_decode_device_from_xs(xs_encoded_device):
""" recover actual device name (xenstore doesn't allow dot in key names, so it was translated to underscore) """
return xs_encoded_device.replace('_', '.')
def usb_encode_device_for_xs(device):
""" encode actual device name (xenstore doesn't allow dot in key names, so translated it into underscore) """
return device.replace('.', '_')
def usb_list():
"""
Returns a dictionary of USB devices (for PVUSB backends running in all VM).
The dictionary is keyed by 'name' (see below), each element is a dictionary itself:
vm = name of the backend domain
xid = xid of the backend domain
device = <frontend device number>-<frontend port number>
name = <name of backend domain>:<frontend device number>-<frontend port number>
desc = description
"""
# FIXME: any better idea of desc_re?
desc_re = re.compile(r"^.{1,255}$")
devices_list = {}
xs_trans = xs.transaction_start()
vm_list = xs.ls(xs_trans, '/local/domain')
for xid in vm_list:
vm_name = xs.read(xs_trans, '/local/domain/%s/name' % xid)
vm_devices = xs.ls(xs_trans, '/local/domain/%s/qubes-usb-devices' % xid)
if vm_devices is None:
continue
# when listing devices in xenstore we get encoded names
for xs_encoded_device in vm_devices:
# Sanitize device id
if not usb_device_re.match(xs_encoded_device):
print >> sys.stderr, "Invalid device id in backend VM '%s'" % vm_name
continue
device = usb_decode_device_from_xs(xs_encoded_device)
device_desc = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/desc' % (xid, xs_encoded_device))
if not desc_re.match(device_desc):
print >> sys.stderr, "Invalid %s device desc in VM '%s'" % (device, vm_name)
continue
visible_name = "%s:%s" % (vm_name, device)
# grab version
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (xid, xs_encoded_device))
if usb_ver is None or not usb_ver_re.match(usb_ver):
print >> sys.stderr, "Invalid %s device USB version in VM '%s'" % (device, vm_name)
continue
devices_list[visible_name] = {"name": visible_name, "xid":int(xid),
"vm": vm_name, "device":device,
"desc":device_desc,
"usb_ver":usb_ver}
xs.transaction_end(xs_trans)
return devices_list
def usb_check_attached(xs_trans, backend_vm, device):
"""
Checks if the given device in the given backend attached to any frontend.
Parameters:
backend_vm - xid of the backend domain
device - device name in the backend domain
Returns None or a dictionary:
vm - the name of the frontend domain
xid - xid of the frontend domain
frontend - frontend device number FIXME
devid - frontend port number FIXME
"""
# sample xs content: /local/domain/0/backend/vusb/4/0/port/1 = "7-5"
attached_dev = None
vms = xs.ls(xs_trans, '/local/domain/%d/backend/vusb' % backend_vm)
if vms is None:
return None
for vm in vms:
if not vm.isdigit():
print >> sys.stderr, "Invalid VM id"
continue
frontend_devs = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s' % (backend_vm, vm))
if frontend_devs is None:
continue
for frontend_dev in frontend_devs:
if not frontend_dev.isdigit():
print >> sys.stderr, "Invalid frontend in VM %s" % vm
continue
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port' % (backend_vm, vm, frontend_dev))
if ports is None:
continue
for port in ports:
# FIXME: refactor, see similar loop in usb_find_unused_frontend(), use usb_list() instead?
if not port.isdigit():
print >> sys.stderr, "Invalid port in VM %s frontend %s" % (vm, frontend)
continue
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%s/%s/port/%s' % (backend_vm, vm, frontend_dev, port))
if dev == "":
continue
# Sanitize device id
if not usb_port_re.match(dev):
print >> sys.stderr, "Invalid device id in backend VM %d @ %s/%s/port/%s" % \
(backend_vm, vm, frontend_dev, port)
continue
if dev == device:
frontend = "%s-%s" % (frontend_dev, port)
vm_name = xl_ctx.domid_to_name(int(vm))
if vm_name is None:
# FIXME: should we wipe references to frontends running on nonexistent VMs?
continue
attached_dev = {"xid":int(vm), "frontend": frontend, "devid": device, "vm": vm_name}
break
return attached_dev
#def usb_check_frontend_busy(vm, front_dev, port):
# devport = frontend.split("-")
# if len(devport) != 2:
# raise QubesException("Malformed frontend syntax, must be in device-port format")
# # FIXME:
# # return xs.read('', '/local/domain/%d/device/vusb/%d/state' % (vm.xid, frontend)) == '4'
# return False
def usb_find_unused_frontend(xs_trans, backend_vm_xid, vm_xid, usb_ver):
"""
Find an unused frontend/port to link the given backend with the given frontend.
Creates new frontend if needed.
Returns frontend specification in <device>-<port> format.
"""
# This variable holds an index of last frontend scanned by the loop below.
# If nothing found, this value will be used to derive the index of a new frontend.
last_frontend_dev = -1
frontend_devs = xs.ls(xs_trans, "/local/domain/%d/device/vusb" % vm_xid)
if frontend_devs is not None:
for frontend_dev in frontend_devs:
if not frontend_dev.isdigit():
print >> sys.stderr, "Invalid frontend_dev in VM %d" % vm_xid
continue
frontend_dev = int(frontend_dev)
fe_path = "/local/domain/%d/device/vusb/%d" % (vm_xid, frontend_dev)
if xs.read(xs_trans, "%s/backend-id" % fe_path) == str(backend_vm_xid):
if xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/usb-ver' % (backend_vm_xid, vm_xid, frontend_dev)) != usb_ver:
last_frontend_dev = frontend_dev
continue
# here: found an existing frontend already connected to right backend using an appropriate USB version
ports = xs.ls(xs_trans, '/local/domain/%d/backend/vusb/%d/%d/port' % (backend_vm_xid, vm_xid, frontend_dev))
if ports is None:
print >> sys.stderr, "No ports in VM %d frontend_dev %d?" % (vm_xid, frontend_dev)
last_frontend_dev = frontend_dev
continue
for port in ports:
# FIXME: refactor, see similar loop in usb_check_attached(), use usb_list() instead?
if not port.isdigit():
print >> sys.stderr, "Invalid port in VM %d frontend_dev %d" % (vm_xid, frontend_dev)
continue
port = int(port)
dev = xs.read(xs_trans, '/local/domain/%d/backend/vusb/%d/%s/port/%s' % (backend_vm_xid, vm_xid, frontend_dev, port))
# Sanitize device id
if not usb_port_re.match(dev):
print >> sys.stderr, "Invalid device id in backend VM %d @ %d/%d/port/%d" % \
(backend_vm_xid, vm_xid, frontend_dev, port)
continue
if dev == "":
return '%d-%d' % (frontend_dev, port)
last_frontend_dev = frontend_dev
# create a new frontend_dev and link it to the backend
frontend_dev = last_frontend_dev + 1
usb_setup(backend_vm_xid, vm_xid, frontend_dev, usb_ver)
return '%d-%d' % (frontend_dev, 1)
def usb_attach(vm, backend_vm, device, frontend=None, auto_detach=False, wait=True):
device_attach_check(vm, backend_vm, device, frontend)
xs_trans = xs.transaction_start()
xs_encoded_device = usb_encode_device_for_xs(device)
usb_ver = xs.read(xs_trans, '/local/domain/%s/qubes-usb-devices/%s/usb-ver' % (backend_vm.xid, xs_encoded_device))
if usb_ver is None or not usb_ver_re.match(usb_ver):
xs.transaction_end(xs_trans)
raise QubesException("Invalid %s device USB version in VM '%s'" % (device, backend_vm.name))
if frontend is None:
frontend = usb_find_unused_frontend(xs_trans, backend_vm.xid, vm.xid, usb_ver)
else:
# Check if any device attached at this frontend
#if usb_check_frontend_busy(vm, frontend):
# raise QubesException("Frontend %s busy in VM %s, detach it first" % (frontend, vm.name))
xs.transaction_end(xs_trans)
raise NotImplementedError("Explicit USB frontend specification is not implemented yet")
# Check if this device is attached to some domain
attached_vm = usb_check_attached(xs_trans, backend_vm.xid, device)
xs.transaction_end(xs_trans)
if attached_vm:
if auto_detach:
usb_detach(backend_vm, attached_vm)
else:
raise QubesException("Device %s from %s already connected to VM %s as %s" % (device, backend_vm.name, attached_vm['vm'], attached_vm['frontend']))
# Run helper script
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-attach.py', str(vm.xid), device, frontend, str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
def usb_detach(backend_vm, attachment):
xl_cmd = [ '/usr/lib/qubes/xl-qvm-usb-detach.py', str(attachment['xid']), attachment['devid'], attachment['frontend'], str(backend_vm.xid) ]
subprocess.check_call(xl_cmd)
def usb_detach_all(vm):
raise NotImplementedError("Detaching all devices from a given VM is not implemented yet")
####### QubesWatch ######
def only_in_first_list(l1, l2):
ret=[]
for i in l1:
if not i in l2:
ret.append(i)
return ret
class QubesWatch(object):
class WatchType(object):
def __init__(self, fn, param):
self.fn = fn
self.param = param
def __init__(self):
self.xs = xen.lowlevel.xs.xs()
self.watch_tokens_block = {}
self.watch_tokens_vbd = {}
self.block_callback = None
self.domain_callback = None
self.xs.watch('@introduceDomain', QubesWatch.WatchType(self.domain_list_changed, None))
self.xs.watch('@releaseDomain', QubesWatch.WatchType(self.domain_list_changed, None))
def setup_block_watch(self, callback):
old_block_callback = self.block_callback
self.block_callback = callback
if old_block_callback is not None and callback is None:
# remove watches
self.update_watches_vbd([])
self.update_watches_block([])
else:
# possibly add watches
self.domain_list_changed(None)
def setup_domain_watch(self, callback):
self.domain_callback = callback
def get_block_key(self, xid):
return '/local/domain/%s/qubes-block-devices' % xid
def get_vbd_key(self, xid):
return '/local/domain/%s/device/vbd' % xid
def update_watches_block(self, xid_list):
for i in only_in_first_list(xid_list, self.watch_tokens_block.keys()):
#new domain has been created
watch = QubesWatch.WatchType(self.block_callback, i)
self.watch_tokens_block[i] = watch
self.xs.watch(self.get_block_key(i), watch)
for i in only_in_first_list(self.watch_tokens_block.keys(), xid_list):
#domain destroyed
self.xs.unwatch(self.get_block_key(i), self.watch_tokens_block[i])
self.watch_tokens_block.pop(i)
def update_watches_vbd(self, xid_list):
for i in only_in_first_list(xid_list, self.watch_tokens_vbd.keys()):
#new domain has been created
watch = QubesWatch.WatchType(self.block_callback, i)
self.watch_tokens_vbd[i] = watch
self.xs.watch(self.get_vbd_key(i), watch)
for i in only_in_first_list(self.watch_tokens_vbd.keys(), xid_list):
#domain destroyed
self.xs.unwatch(self.get_vbd_key(i), self.watch_tokens_vbd[i])
self.watch_tokens_vbd.pop(i)
def domain_list_changed(self, param):
curr = self.xs.ls('', '/local/domain')
if curr == None:
return
if self.domain_callback:
self.domain_callback()
if self.block_callback:
self.update_watches_block(curr)
self.update_watches_vbd(curr)
def watch_single(self):
result = self.xs.read_watch()
token = result[1]
token.fn(token.param)
def watch_loop(self):
while True:
self.watch_single()
######## Backups #########
def get_disk_usage(file_or_dir):
if not os.path.exists(file_or_dir):
return 0
p = subprocess.Popen (["du", "-s", "--block-size=1", file_or_dir],
stdout=subprocess.PIPE)
result = p.communicate()
m = re.match(r"^(\d+)\s.*", result[0])
sz = int(m.group(1)) if m is not None else 0
return sz
def file_to_backup (file_path, sz = None):
if sz is None:
sz = os.path.getsize (qubes_store_filename)
abs_file_path = os.path.abspath (file_path)
abs_base_dir = os.path.abspath (qubes_base_dir) + '/'
abs_file_dir = os.path.dirname (abs_file_path) + '/'
(nothing, dir, subdir) = abs_file_dir.partition (abs_base_dir)
assert nothing == ""
assert dir == abs_base_dir
return [ { "path" : file_path, "size": sz, "subdir": subdir} ]
def backup_prepare(base_backup_dir, vms_list = None, exclude_list = [], print_callback = print_stdout):
"""If vms = None, include all (sensible) VMs; exclude_list is always applied"""
'''
if not os.path.exists (base_backup_dir):
raise QubesException("The target directory doesn't exist!")
'''
files_to_backup = file_to_backup (qubes_store_filename)
if exclude_list is None:
exclude_list = []
qvm_collection = None
if vms_list is None:
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_writing()
qvm_collection.load()
all_vms = [vm for vm in qvm_collection.values()]
appvms_to_backup = [vm for vm in all_vms if vm.is_appvm() and not vm.internal]
netvms_to_backup = [vm for vm in all_vms if vm.is_netvm() and not vm.qid == 0]
template_vms_worth_backingup = [vm for vm in all_vms if (vm.is_template() and not vm.installed_by_rpm)]
vms_list = appvms_to_backup + netvms_to_backup + template_vms_worth_backingup
vms_for_backup = vms_list
# Apply exclude list
if exclude_list:
vms_for_backup = [vm for vm in vms_list if vm.name not in exclude_list]
no_vms = len (vms_for_backup)
there_are_running_vms = False
fields_to_display = [
{ "name": "VM", "width": 16},
{ "name": "type","width": 12 },
{ "name": "size", "width": 12}
]
# Display the header
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(f["width"] + 1)
s += fmt.format(f["name"])
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
for vm in vms_for_backup:
if vm.is_template():
# handle templates later
continue
if vm.private_img is not None:
vm_sz = vm.get_disk_usage (vm.private_img)
files_to_backup += file_to_backup(vm.private_img, vm_sz )
if vm.is_appvm():
files_to_backup += file_to_backup(vm.icon_path)
if vm.updateable:
if os.path.exists(vm.dir_path + "/apps.templates"):
# template
files_to_backup += file_to_backup(vm.dir_path + "/apps.templates")
else:
# standaloneVM
files_to_backup += file_to_backup(vm.dir_path + "/apps")
if os.path.exists(vm.dir_path + "/kernels"):
files_to_backup += file_to_backup(vm.dir_path + "/kernels")
if os.path.exists (vm.firewall_conf):
files_to_backup += file_to_backup(vm.firewall_conf)
if os.path.exists(vm.dir_path + '/whitelisted-appmenus.list'):
files_to_backup += file_to_backup(vm.dir_path + '/whitelisted-appmenus.list')
if vm.updateable:
sz = vm.get_disk_usage(vm.root_img)
files_to_backup += file_to_backup(vm.root_img, sz)
vm_sz += sz
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format(vm.name)
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
if vm.is_netvm():
s += fmt.format("NetVM" + (" + Sys" if vm.updateable else ""))
else:
s += fmt.format("AppVM" + (" + Sys" if vm.updateable else ""))
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(vm_sz))
if vm.is_running():
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
there_are_running_vms = True
print_callback(s)
for vm in vms_for_backup:
if not vm.is_template():
# already handled
continue
vm_sz = vm.get_disk_utilization()
files_to_backup += file_to_backup (vm.dir_path, vm_sz)
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format(vm.name)
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
s += fmt.format("Template VM")
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(vm_sz))
if vm.is_running():
s += " <-- The VM is running, please shut it down before proceeding with the backup!"
there_are_running_vms = True
print_callback(s)
# Initialize backup flag on all VMs
for vm in qvm_collection.values():
vm.backup_content = False
if vm in vms_for_backup:
vm.backup_content = True
vm.backup_size = vm.get_disk_utilization()
vm.backup_path = vm.dir_path.split(os.path.normpath(qubes_base_dir)+"/")[1]
qvm_collection.save()
# FIXME: should be after backup completed
qvm_collection.unlock_db()
# Dom0 user home
if not 'dom0' in exclude_list:
local_user = grp.getgrnam('qubes').gr_mem[0]
home_dir = pwd.getpwnam(local_user).pw_dir
# Home dir should have only user-owned files, so fix it now to prevent
# permissions problems - some root-owned files can left after
# 'sudo bash' and similar commands
subprocess.check_call(['sudo', 'chown', '-R', local_user, home_dir])
home_sz = get_disk_usage(home_dir)
home_to_backup = [ { "path" : home_dir, "size": home_sz, "subdir": 'dom0-home'} ]
files_to_backup += home_to_backup
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format('Dom0')
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1)
s += fmt.format("User home")
fmt="{{0:>{0}}} |".format(fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(home_sz))
print_callback(s)
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
fmt="{{0:>{0}}} |".format(fields_to_display[0]["width"] + 1)
s += fmt.format("Total size:")
fmt="{{0:>{0}}} |".format(fields_to_display[1]["width"] + 1 + 2 + fields_to_display[2]["width"] + 1)
s += fmt.format(size_to_human(total_backup_sz))
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(f["width"] + 1)
s += fmt.format('-')
print_callback(s)
'''
stat = os.statvfs(base_backup_dir)
backup_fs_free_sz = stat.f_bsize * stat.f_bavail
print_callback("")
if (total_backup_sz > backup_fs_free_sz):
raise QubesException("Not enough space available on the backup filesystem!")
if (there_are_running_vms):
raise QubesException("Please shutdown all VMs before proceeding.")
print_callback("-> Available space: {0}".format(size_to_human(backup_fs_free_sz)))
'''
return files_to_backup
def backup_do(base_backup_dir, files_to_backup, progress_callback = None):
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
backup_dir = base_backup_dir + "/qubes-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
if os.path.exists (backup_dir):
raise QubesException("ERROR: the path {0} already exists?!".format(backup_dir))
os.mkdir (backup_dir)
if not os.path.exists (backup_dir):
raise QubesException("Strange: couldn't create backup dir: {0}?!".format(backup_dir))
bytes_backedup = 0
for file in files_to_backup:
# We prefer to use Linux's cp, because it nicely handles sparse files
progress = bytes_backedup * 100 / total_backup_sz
progress_callback(progress)
dest_dir = backup_dir + '/' + file["subdir"]
if file["subdir"] != "":
retcode = subprocess.call (["mkdir", "-p", dest_dir])
if retcode != 0:
raise QubesException("Cannot create directory: {0}?!".format(dest_dir))
retcode = subprocess.call (["cp", "-rp", file["path"], dest_dir])
if retcode != 0:
raise QubesException("Error while copying file {0} to {1}".format(file["path"], dest_dir))
bytes_backedup += file["size"]
progress = bytes_backedup * 100 / total_backup_sz
progress_callback(progress)
def backup_do_copy(base_backup_dir, files_to_backup, passphrase, progress_callback = None, encrypt=False, appvm=None):
total_backup_sz = 0
for file in files_to_backup:
total_backup_sz += file["size"]
vmproc = None
if appvm != None:
# Prepare the backup target (Qubes service call)
backup_target = "QUBESRPC qubes.Backup none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = backup_target, passio_popen = True)
vmproc.stdin.write(base_backup_dir.replace("\r","").replace("\n","")+"\n")
backup_stdout = vmproc.stdin
else:
# Prepare the backup target (local file)
backup_target = base_backup_dir + "/qubes-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
# Create the target directory
if not os.path.exists (base_backup_dir):
raise QubesException("ERROR: the backup directory {0} does not exists".format(base_backup_dir))
# If not APPVM, STDOUT is a local file
backup_stdout = open(backup_target,'wb')
blocks_backedup = 0
progress = blocks_backedup * 11 / total_backup_sz
progress_callback(progress)
import tempfile
feedback_file = tempfile.NamedTemporaryFile()
backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/backup_")
# Tar with tapelength does not deals well with stdout (close stdout between two tapes)
# For this reason, we will use named pipes instead
print "Working in",backup_tmpdir
backup_pipe = os.path.join(backup_tmpdir,"backup_pipe")
print "Creating pipe in:",backup_pipe
print os.mkfifo(backup_pipe)
print "Will backup:",files_to_backup
# Setup worker to send encrypted data chunks to the backup_target
from multiprocessing import Queue,Process
class Send_Worker(Process):
def __init__(self,queue,base_dir,backup_stdout):
super(Send_Worker, self).__init__()
self.queue = queue
self.base_dir = base_dir
self.backup_stdout = backup_stdout
def run(self):
print "Started sending thread"
print "Moving to temporary dir",self.base_dir
os.chdir(self.base_dir)
for filename in iter(self.queue.get,None):
if filename == "FINISHED":
break
print "Sending file",filename
# This tar used for sending data out need to be as simple, as simple, as featureless as possible. It will not be verified before untaring.
tar_final_cmd = ["tar", "-cO", "--posix", "-C", self.base_dir, filename]
final_proc = subprocess.Popen (tar_final_cmd, stdin=subprocess.PIPE, stdout=self.backup_stdout)
final_proc.wait()
# Delete the file as we don't need it anymore
print "Removing file",filename
os.remove(filename)
print "Finished sending thread"
global blocks_backedup
blocks_backedup = 0
def compute_progress(new_size, total_backup_sz):
global blocks_backedup
blocks_backedup += new_size
progress = blocks_backedup / float(total_backup_sz)
progress_callback(int(round(progress*100,2)))
to_send = Queue()
send_proc = Send_Worker(to_send, backup_tmpdir, backup_stdout)
send_proc.start()
for filename in files_to_backup:
print "Backing up",filename
backup_tempfile = os.path.join(backup_tmpdir,filename["path"].split(os.path.normpath(qubes_base_dir)+"/")[1])
print "Using temporary location:",backup_tempfile
# Ensure the temporary directory exists
if not os.path.isdir(os.path.dirname(backup_tempfile)):
os.makedirs(os.path.dirname(backup_tempfile))
# The first tar cmd can use any complex feature as we want. Files will be verified before untaring this.
tar_cmdline = ["tar", "-Pc", "-f", backup_pipe,'--sparse','--tape-length',str(1000000),'-C',qubes_base_dir,
filename["path"].split(os.path.normpath(qubes_base_dir)+"/")[1]
]
print " ".join(tar_cmdline)
# Tips: Popen(bufsize=0)
# Pipe: tar-sparse | encryptor [| hmac] | tar | backup_target
# Pipe: tar-sparse [| hmac] | tar | backup_target
tar_sparse = subprocess.Popen (tar_cmdline,stdin=subprocess.PIPE)
# Wait for compressor (tar) process to finish or for any error of other subprocesses
i=0
run_error = "paused"
running = []
while run_error == "paused":
pipe = open(backup_pipe,'rb')
# Start HMAC
hmac = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Prepare a first chunk
chunkfile = backup_tempfile + "." + "%03d" % i
i += 1
chunkfile_p = open(chunkfile,'wb')
if encrypt:
# Start encrypt
# If no cipher is provided, the data is forwarded unencrypted !!!
# Also note that the
encryptor = subprocess.Popen (["openssl", "enc", "-e", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=pipe, stdout=subprocess.PIPE)
run_error = wait_backup_feedback(compute_progress, encryptor.stdout, encryptor, chunkfile_p, total_backup_sz, hmac=hmac, vmproc=vmproc, addproc=tar_sparse)
else:
run_error = wait_backup_feedback(compute_progress, pipe, None, chunkfile_p, total_backup_sz, hmac=hmac, vmproc=vmproc, addproc=tar_sparse)
chunkfile_p.close()
print "Wait_backup_feedback returned:",run_error
if len(run_error) > 0:
send_proc.terminate()
raise QubesException("Failed to perform backup: error with "+run_error)
# Send the chunk to the backup target
to_send.put(chunkfile.split(os.path.normpath(backup_tmpdir)+"/")[1])
# Close HMAC
hmac.stdin.close()
hmac.wait()
print "HMAC proc return code:",hmac.poll()
# Write HMAC data next to the chunk file
hmac_data = hmac.stdout.read()
print "Writing hmac to",chunkfile+".hmac"
hmac_file = open(chunkfile+".hmac",'w')
hmac_file.write(hmac_data)
hmac_file.flush()
hmac_file.close()
# Send the HMAC to the backup target
to_send.put(chunkfile.split(os.path.normpath(backup_tmpdir)+"/")[1]+".hmac")
if tar_sparse.poll() == None:
# Release the next chunk
print "Release next chunk for process:",tar_sparse.poll()
#tar_sparse.stdout = subprocess.PIPE
tar_sparse.stdin.write("\n")
run_error="paused"
else:
print "Finished tar sparse with error",tar_sparse.poll()
pipe.close()
# Close the backup target and wait for it to finish
#backup_stdout.close()
to_send.put("FINISHED")
send_proc.join()
if send_proc.exitcode != 0:
raise QubesException("Failed to send backup: error in the sending process")
if vmproc:
print "VMProc1 proc return code:",vmproc.poll()
print "Sparse1 proc return code:",tar_sparse.poll()
vmproc.stdin.close()
'''
' Wait for backup chunk to finish
' - Monitor all the processes (streamproc, hmac, vmproc, addproc) for errors
' - Copy stdout of streamproc to backup_target and hmac stdin if available
' - Compute progress based on total_backup_sz and send progress to progress_callback function
' - Returns if
' - one of the monitored processes error out (streamproc, hmac, vmproc, addproc), along with the processe that failed
' - all of the monitored processes except vmproc finished successfully (vmproc termination is controlled by the python script)
' - streamproc does not delivers any data anymore (return with the error "paused")
'''
def wait_backup_feedback(progress_callback, in_stream, streamproc, backup_target, total_backup_sz, hmac=None, vmproc=None, addproc=None, remove_trailing_bytes=0):
buffer_size = 4096
run_error = None
run_count = 1
blocks_backedup = 0
while run_count > 0 and run_error == None:
buffer = in_stream.read(buffer_size)
progress_callback(len(buffer),total_backup_sz)
run_count = 0
if hmac:
retcode=hmac.poll()
if retcode != None:
if retcode != 0:
run_error = "hmac"
else:
run_count += 1
if addproc:
retcode=addproc.poll()
#print "Tar proc status:",retcode
if retcode != None:
if retcode != 0:
run_error = "addproc"
else:
run_count += 1
if vmproc:
retcode = vmproc.poll()
if retcode != None:
if retcode != 0:
run_error = "VM"
print vmproc.stdout.read()
else:
# VM should run until the end
pass
if streamproc:
retcode=streamproc.poll()
if retcode != None:
if retcode != 0:
run_error = "streamproc"
elif retcode == 0 and len(buffer) <= 0:
return ""
else:
#print "INFO: last packet"
#if remove_trailing_bytes > 0:
# print buffer.encode("hex")
# buffer = buffer[:-remove_trailing_bytes]
# print buffer.encode("hex")
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
run_count += 1
else:
#print "Process running:",len(buffer)
# Process still running
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
run_count += 1
else:
if len(buffer) <= 0:
return ""
else:
backup_target.write(buffer)
if hmac:
hmac.stdin.write(buffer)
return run_error
def restore_vm_dirs (backup_dir, backup_tmpdir, passphrase, vms_dirs, vms, vms_size, print_callback=None, error_callback=None, progress_callback=None, encrypted=False, appvm=None):
# Setup worker to extract encrypted data chunks to the restore dirs
from multiprocessing import Queue,Process
class Extract_Worker(Process):
def __init__(self,queue,base_dir,passphrase,encrypted,total_size,print_callback,error_callback,progress_callback,vmproc=None):
super(Extract_Worker, self).__init__()
self.queue = queue
self.base_dir = base_dir
self.passphrase = passphrase
self.encrypted = encrypted
self.total_size = total_size
self.blocks_backedup = 0
self.tar2_command = None
self.print_callback = print_callback
self.error_callback = error_callback
self.progress_callback = progress_callback
self.vmproc = vmproc
self.restore_pipe = os.path.join(self.base_dir,"restore_pipe")
print "Creating pipe in:",self.restore_pipe
print os.mkfifo(self.restore_pipe)
def compute_progress(self, new_size, total_size):
self.blocks_backedup += new_size
progress = self.blocks_backedup / float(self.total_size)
progress = int(round(progress*100,2))
self.progress_callback(progress)
def run(self):
self.print_callback("Started sending thread")
self.print_callback("Moving to dir "+self.base_dir)
os.chdir(self.base_dir)
for filename in iter(self.queue.get,None):
if filename == "FINISHED":
break
self.print_callback("Extracting file "+filename+" to "+qubes_base_dir)
if self.tar2_command == None:
# FIXME: Make the extraction safer by avoiding to erase other vms:
# - extracting directly to the target directory (based on the vm name and by using the --strip=2).
# - ensuring that the leading slashs are ignored when extracting (can also be obtained by running with --strip ?)
self.tar2_command = ['tar', '--tape-length','1000000', '-C', qubes_base_dir, '-xvf', self.restore_pipe]
self.print_callback("Running command "+str(self.tar2_command))
self.tar2_command = subprocess.Popen(self.tar2_command,stdin=subprocess.PIPE)
pipe = open(self.restore_pipe,'r+b')
if self.encrypted:
# Start decrypt
encryptor = subprocess.Popen (["openssl", "enc", "-d", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=open(filename,'rb'), stdout=subprocess.PIPE)
# progress_callback, in_stream, streamproc, backup_target, total_backup_sz, hmac=None, vmproc=None, addproc=None, remove_trailing_bytes=0):
run_error = wait_backup_feedback(self.compute_progress, encryptor.stdout, encryptor, pipe, self.total_size, hmac=None, vmproc=self.vmproc, addproc=self.tar2_command)
#print "End wait_backup_feedback",run_error,self.tar2_command.poll(),encryptor.poll()
else:
run_error = wait_backup_feedback(self.compute_progress, open(filename,"rb"), None, pipe, self.total_size, hmac=None, vmproc=self.vmproc, addproc=self.tar2_command)
pipe.close()
self.print_callback("Run error:"+run_error)
self.print_callback(str(self.tar2_command.poll()))
if self.tar2_command.poll() != None:
if self.tar2_command.poll() != 0:
raise QubesException("ERROR: unable to extract files for {0}.".format(filename))
else:
# Finished extracting the tar file
self.tar2_command = None
else:
self.print_callback("Releasing next chunck")
self.tar2_command.stdin.write("\n")
# Delete the file as we don't need it anymore
self.print_callback("Removing file "+filename)
os.remove(filename)
self.print_callback("Finished extracting thread")
if progress_callback == None:
def progress_callback(data):
pass
to_extract = Queue()
extract_proc = Extract_Worker(to_extract, backup_tmpdir, passphrase, encrypted, vms_size, print_callback, error_callback, progress_callback)
extract_proc.start()
print_callback("Working in temporary dir:"+backup_tmpdir)
print_callback(str(vms_size)+" bytes to restore")
vmproc = None
if appvm != None:
# Prepare the backup target (Qubes service call)
backup_target = "QUBESRPC qubes.Restore none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = backup_target, passio_popen = True)
vmproc.stdin.write(backup_dir.replace("\r","").replace("\n","")+"\n")
backup_stdin = vmproc.stdout
else:
backup_stdin = open(backup_dir,'rb')
# FIXME: Use a safer program such as cpio, modified uncompress.c, or try to extract it from
tar1_command = ['tar', '-i', '-xv', '-C', backup_tmpdir]
tar1_command.extend(vms_dirs)
print_callback("Run command"+str(tar1_command))
command = subprocess.Popen(tar1_command, stdin=backup_stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while command.poll() == None and vmproc.poll() == None:
filename = command.stdout.readline().strip(" \t\r\n")
print_callback("Getting new file:"+filename)
hmacfile = command.stdout.readline().strip(" \t\r\n")
print_callback("Getting hmac:"+hmacfile)
print_callback("Verifying file"+filename)
hmac_proc = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = hmac_proc.communicate()
if len(stderr) > 0:
raise QubesException("ERROR: verify file {0}: {1}".format((filename,stderr)))
else:
print_callback("Loading hmac for file"+filename)
hmac = load_hmac(open(os.path.join(backup_tmpdir,filename+".hmac"),'r').read())
if len(hmac) > 0 and load_hmac(stdout) == hmac:
print_callback("File verification OK -> Sending file "+filename+" for extraction")
# Send the chunk to the backup target
to_extract.put(os.path.join(backup_tmpdir,filename))
else:
raise QubesException("ERROR: invalid hmac for file {0}: {1}. Is the passphrase correct?".format(filename,load_hmac(stdout)))
if command.poll() != 0:
raise QubesException("ERROR: unable to read the qubes backup file {0}. Is it really a backup?".format(restore_target))
if vmproc.poll() != 0:
raise QubesException("ERROR: unable to read the qubes backup {0} because of a VM error: {1}".format(restore_target,vmproc.stderr.read()))
print "Extraction process status:",extract_proc.exitcode
to_extract.put("FINISHED")
print_callback("Waiting for the extraction process to finish...")
extract_proc.join()
print_callback("Extraction process finished with code:"+str(extract_proc.exitcode))
if extract_proc.exitcode != 0:
raise QubesException("ERROR: unable to extract the qubes backup. Check extracting process errors.")
def backup_restore_set_defaults(options):
if 'use-default-netvm' not in options:
options['use-default-netvm'] = False
if 'use-none-netvm' not in options:
options['use-none-netvm'] = False
if 'use-default-template' not in options:
options['use-default-template'] = False
if 'dom0-home' not in options:
options['dom0-home'] = True
if 'replace-template' not in options:
options['replace-template'] = []
return options
def load_hmac(hmac):
hmac = hmac.strip(" \t\r\n").split("=")
if len(hmac) > 1:
hmac = hmac[1].strip()
else:
raise QubesException("ERROR: invalid hmac file content")
return hmac
import struct
def get_qfile_error(buffer):
error = struct.unpack("I",buffer[0:4])[0]
error_msg = { 0: "COPY_FILE_OK",
1: "COPY_FILE_READ_EOF",
2: "COPY_FILE_READ_ERROR",
3: "COPY_FILE_WRITE_ERROR",
}
if error in error_msg.keys():
return error_msg[error]
else:
return "UNKNOWN_ERROR_"+str(error)
def backup_restore_header(restore_target, passphrase, encrypt=False, appvm=None):
# Simulate dd if=backup_file count=10 | file -
# Simulate dd if=backup_file count=10 | gpg2 -d | tar xzv -O
# analysis = subprocess.Popen()
vmproc = None
import tempfile
feedback_file = tempfile.NamedTemporaryFile()
backup_tmpdir = tempfile.mkdtemp(prefix="/var/tmp/restore_")
os.chdir(backup_tmpdir)
# Tar with tapelength does not deals well with stdout (close stdout between two tapes)
# For this reason, we will use named pipes instead
print "Working in",backup_tmpdir
if appvm != None:
# Prepare the backup target (Qubes service call)
restore_command = "QUBESRPC qubes.Restore none"
# does the vm exist?
qvm_collection = QubesVmCollection()
qvm_collection.lock_db_for_reading()
qvm_collection.load()
vm = qvm_collection.get_vm_by_name(appvm)
if vm is None or vm.qid not in qvm_collection:
raise QubesException("VM {0} does not exist".format(appvm))
qvm_collection.unlock_db()
# If APPVM, STDOUT is a PIPE
vmproc = vm.run(command = restore_command, passio_popen = True, passio_stderr = True)
vmproc.stdin.write(restore_target.replace("\r","").replace("\n","")+"\n")
else:
# Create the target directory
if not os.path.exists (restore_target):
raise QubesException("ERROR: the backup directory {0} does not exists".format(restore_target))
fp = open(restore_target,'rb')
headers = fp.read(4096*16)
tar1_command = ['/usr/lib/qubes/qfile-dom0-unpacker', str(os.getuid()), backup_tmpdir]
command = subprocess.Popen(tar1_command,stdin=vmproc.stdout,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result_header = command.stdout.read()
if vmproc.poll() != None:
error = vmproc.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: Immediate VM error while retrieving backup headers:{0}".format(error))
filename = "qubes.xml.000"
print result_header.encode("hex")
error_msg = get_qfile_error(result_header)
if error_msg != "COPY_FILE_OK":
print vmproc.stdout.read()
raise QubesException("ERROR: unpacking backup headers: {0}".format(error_msg))
if not os.path.exists(os.path.join(backup_tmpdir,filename+".hmac")):
raise QubesException("ERROR: header not extracted correctly: {0}".format(os.path.join(backup_tmpdir,filename+".hmac")))
command.terminate()
command.wait()
if vmproc.poll() != None and vmproc.poll() != 0:
error = vmproc.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: VM error retrieving backup headers")
elif command.poll() != None and command.poll() not in [0,-15]:
error = command.stderr.read()
print error
print vmproc.poll(),command.poll()
raise QubesException("ERROR: retrieving backup headers:{0}".format(error))
if vmproc.poll() == None:
vmproc.terminate()
vmproc.wait()
print "Loading hmac for file",filename
hmac = load_hmac(open(os.path.join(backup_tmpdir,filename+".hmac"),'r').read())
print "Successfully retrieved headers"
print "Verifying file",filename
hmac_proc = subprocess.Popen (["openssl", "dgst", "-hmac", passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout,stderr = hmac_proc.communicate()
if len(stderr) > 0:
raise QubesException("ERROR: verify file {0}: {1}".format((filename,stderr)))
else:
if len(hmac) > 0 and load_hmac(stdout) == hmac:
print "File verification OK -> Extracting archive",filename
if encrypt:
print "Starting decryption process"
encryptor = subprocess.Popen (["openssl", "enc", "-d", "-aes-256-cbc", "-pass", "pass:"+passphrase], stdin=open(os.path.join(backup_tmpdir,filename),'rb'), stdout=subprocess.PIPE)
tarhead_command = subprocess.Popen(['tar', '--tape-length','1000000', '-xv'],stdin=encryptor.stdout)
else:
print "No decryption process required"
encryptor = None
tarhead_command = subprocess.Popen(['tar', '--tape-length','1000000', '-xvf', os.path.join(backup_tmpdir,filename)])
tarhead_command.wait()
if encryptor:
if encryptor.poll() != 0:
raise QubesException("ERROR: unable to decrypt file {0}".format(filename))
if tarhead_command.poll() != 0:
raise QubesException("ERROR: unable to extract the qubes.xml file. Is archive encrypted?")
return (backup_tmpdir,"qubes.xml")
else:
raise QubesException("ERROR: unable to verify the qubes.xml file. Is the passphrase correct?")
return None
def backup_restore_prepare(backup_dir, qubes_xml, passphrase, options = {}, host_collection = None, encrypt=False, appvm=None):
# Defaults
backup_restore_set_defaults(options)
#### Private functions begin
def is_vm_included_in_backup (backup_dir, vm):
if vm.qid == 0:
# Dom0 is not included, obviously
return False
if vm.backup_content:
return True
else:
return False
def find_template_name(template, replaces):
rx_replace = re.compile("(.*):(.*)")
for r in replaces:
m = rx_replace.match(r)
if m.group(1) == template:
return m.group(2)
return template
#### Private functions end
print "Loading file",qubes_xml
backup_collection = QubesVmCollection(store_filename = qubes_xml)
backup_collection.lock_db_for_reading()
backup_collection.load()
if host_collection is None:
host_collection = QubesVmCollection()
host_collection.lock_db_for_reading()
host_collection.load()
host_collection.unlock_db()
backup_vms_list = [vm for vm in backup_collection.values()]
host_vms_list = [vm for vm in host_collection.values()]
vms_to_restore = {}
there_are_conflicting_vms = False
there_are_missing_templates = False
there_are_missing_netvms = False
dom0_username_mismatch = False
restore_home = False
# ... and the actual data
for vm in backup_vms_list:
if is_vm_included_in_backup (backup_dir, vm):
print vm.name,"is included in backup"
vms_to_restore[vm.name] = {}
vms_to_restore[vm.name]['vm'] = vm;
if 'exclude' in options.keys():
vms_to_restore[vm.name]['excluded'] = vm.name in options['exclude']
vms_to_restore[vm.name]['good-to-go'] = False
if host_collection.get_vm_by_name (vm.name) is not None:
vms_to_restore[vm.name]['already-exists'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if vm.template is None:
vms_to_restore[vm.name]['template'] = None
else:
templatevm_name = find_template_name(vm.template.name, options['replace-template'])
vms_to_restore[vm.name]['template'] = templatevm_name
template_vm_on_host = host_collection.get_vm_by_name (templatevm_name)
# No template on the host?
if not ((template_vm_on_host is not None) and template_vm_on_host.is_template()):
# Maybe the (custom) template is in the backup?
template_vm_on_backup = backup_collection.get_vm_by_name (templatevm_name)
if template_vm_on_backup is None or not \
(is_vm_included_in_backup(backup_dir, template_vm_on_backup) and \
template_vm_on_backup.is_template()):
if options['use-default-template']:
vms_to_restore[vm.name]['orig-template'] = templatevm_name
vms_to_restore[vm.name]['template'] = host_collection.get_default_template().name
else:
vms_to_restore[vm.name]['missing-template'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if vm.netvm is None:
vms_to_restore[vm.name]['netvm'] = None
else:
netvm_name = vm.netvm.name
vms_to_restore[vm.name]['netvm'] = netvm_name
# Set to None to not confuse QubesVm object from backup
# collection with host collection (further in clone_attrs). Set
# directly _netvm to suppress setter action, especially
# modifying firewall
vm._netvm = None
netvm_on_host = host_collection.get_vm_by_name (netvm_name)
# No netvm on the host?
if not ((netvm_on_host is not None) and netvm_on_host.is_netvm()):
# Maybe the (custom) netvm is in the backup?
netvm_on_backup = backup_collection.get_vm_by_name (netvm_name)
if not ((netvm_on_backup is not None) and netvm_on_backup.is_netvm() and is_vm_included_in_backup(backup_dir, netvm_on_backup)):
if options['use-default-netvm']:
vms_to_restore[vm.name]['netvm'] = host_collection.get_default_netvm().name
vm.uses_default_netvm = True
elif options['use-none-netvm']:
vms_to_restore[vm.name]['netvm'] = None
else:
vms_to_restore[vm.name]['missing-netvm'] = True
vms_to_restore[vm.name]['good-to-go'] = False
if 'good-to-go' not in vms_to_restore[vm.name].keys():
vms_to_restore[vm.name]['good-to-go'] = True
# ...and dom0 home
# FIXME, replace this part of code to handle the new backup format using tar
if options['dom0-home'] and os.path.exists(backup_dir + '/dom0-home'):
vms_to_restore['dom0'] = {}
local_user = grp.getgrnam('qubes').gr_mem[0]
dom0_homes = os.listdir(backup_dir + '/dom0-home')
if len(dom0_homes) > 1:
raise QubesException("More than one dom0 homedir in backup")
vms_to_restore['dom0']['username'] = dom0_homes[0]
if dom0_homes[0] != local_user:
vms_to_restore['dom0']['username-mismatch'] = True
if not options['ignore-dom0-username-mismatch']:
vms_to_restore['dom0']['good-to-go'] = False
if 'good-to-go' not in vms_to_restore['dom0']:
vms_to_restore['dom0']['good-to-go'] = True
return vms_to_restore
def backup_restore_print_summary(restore_info, print_callback = print_stdout):
fields = {
"qid": {"func": "vm.qid"},
"name": {"func": "('[' if vm.is_template() else '')\
+ ('{' if vm.is_netvm() else '')\
+ vm.name \
+ (']' if vm.is_template() else '')\
+ ('}' if vm.is_netvm() else '')"},
"type": {"func": "'Tpl' if vm.is_template() else \
'HVM' if vm.type == 'HVM' else \
vm.type.replace('VM','')"},
"updbl" : {"func": "'Yes' if vm.updateable else ''"},
"template": {"func": "'n/a' if vm.is_template() or vm.template is None else\
vm_info['template']"},
"netvm": {"func": "'n/a' if vm.is_netvm() and not vm.is_proxyvm() else\
('*' if vm.uses_default_netvm else '') +\
vm_info['netvm'] if vm_info['netvm'] is not None else '-'"},
"label" : {"func" : "vm.label.name"},
}
fields_to_display = ["name", "type", "template", "updbl", "netvm", "label" ]
# First calculate the maximum width of each field we want to display
total_width = 0;
for f in fields_to_display:
fields[f]["max_width"] = len(f)
for vm_info in restore_info.values():
if 'vm' in vm_info.keys():
vm = vm_info['vm']
l = len(str(eval(fields[f]["func"])))
if l > fields[f]["max_width"]:
fields[f]["max_width"] = l
total_width += fields[f]["max_width"]
print_callback("")
print_callback("The following VMs are included in the backup:")
print_callback("")
# Display the header
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
s += fmt.format('-')
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
s += fmt.format(f)
print_callback(s)
s = ""
for f in fields_to_display:
fmt="{{0:-^{0}}}-+".format(fields[f]["max_width"] + 1)
s += fmt.format('-')
print_callback(s)
for vm_info in restore_info.values():
# Skip non-VM here
if not 'vm' in vm_info:
continue
vm = vm_info['vm']
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
s += fmt.format(eval(fields[f]["func"]))
if 'excluded' in vm_info and vm_info['excluded']:
s += " <-- Excluded from restore"
elif 'already-exists' in vm_info:
s += " <-- A VM with the same name already exists on the host!"
elif 'missing-template' in vm_info:
s += " <-- No matching template on the host or in the backup found!"
elif 'missing-netvm' in vm_info:
s += " <-- No matching netvm on the host or in the backup found!"
elif 'orig-template' in vm_info:
s += " <-- Original template was '%s'" % (vm_info['orig-template'])
print_callback(s)
if 'dom0' in restore_info.keys():
s = ""
for f in fields_to_display:
fmt="{{0:>{0}}} |".format(fields[f]["max_width"] + 1)
if f == "name":
s += fmt.format("Dom0")
elif f == "type":
s += fmt.format("Home")
else:
s += fmt.format("")
if 'username-mismatch' in restore_info['dom0']:
s += " <-- username in backup and dom0 mismatch"
print_callback(s)
def backup_restore_do(backup_dir, restore_tmpdir, passphrase, restore_info, host_collection = None, print_callback = print_stdout, error_callback = print_stderr, progress_callback = None, encrypted=False, appvm=None):
lock_obtained = False
if host_collection is None:
host_collection = QubesVmCollection()
host_collection.lock_db_for_writing()
host_collection.load()
lock_obtained = True
# Perform VM restoration in backup order
vms_dirs = []
vms_size = 0
vms = {}
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
vms_size += vm.backup_size
vms_dirs.append(vm.backup_path+"*")
vms[vm.name] = vm
restore_vm_dirs (backup_dir, restore_tmpdir, passphrase, vms_dirs, vms, vms_size, print_callback, error_callback, progress_callback, encrypted, appvm)
# Add VM in right order
for (vm_class_name, vm_class) in sorted(QubesVmClasses.items(),
key=lambda _x: _x[1].load_order):
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
if not vm.__class__ == vm_class:
continue
print_callback("-> Restoring {type} {0}...".format(vm.name, type=vm_class_name))
retcode = subprocess.call (["mkdir", "-p", vm.dir_path])
if retcode != 0:
error_callback("*** Cannot create directory: {0}?!".format(dest_dir))
error_callback("Skipping...")
continue
template = None
if vm.template is not None:
template_name = vm_info['template']
template = host_collection.get_vm_by_name(template_name)
new_vm = None
try:
new_vm = host_collection.add_new_vm(vm_class_name, name=vm.name,
conf_file=vm.conf_file,
dir_path=vm.dir_path,
template=template,
installed_by_rpm=False)
new_vm.verify_files()
except Exception as err:
error_callback("ERROR: {0}".format(err))
error_callback("*** Skipping VM: {0}".format(vm.name))
if new_vm:
host_collection.pop(new_vm.qid)
continue
try:
new_vm.clone_attrs(vm)
except Exception as err:
error_callback("ERROR: {0}".format(err))
error_callback("*** Some VM property will not be restored")
try:
new_vm.create_appmenus(verbose=True)
except Exception as err:
error_callback("ERROR during appmenu restore: {0}".format(err))
error_callback("*** VM '{0}' will not have appmenus".format(vm.name))
# Set network dependencies - only non-default netvm setting
for vm_info in restore_info.values():
if not vm_info['good-to-go']:
continue
if 'vm' not in vm_info:
continue
vm = vm_info['vm']
host_vm = host_collection.get_vm_by_name(vm.name)
if host_vm is None:
# Failed/skipped VM
continue
if not vm.uses_default_netvm:
host_vm.netvm = host_collection.get_vm_by_name (vm_info['netvm']) if vm_info['netvm'] is not None else None
host_collection.save()
if lock_obtained:
host_collection.unlock_db()
# ... and dom0 home as last step
if 'dom0' in restore_info.keys() and restore_info['dom0']['good-to-go']:
backup_info = restore_info['dom0']
local_user = grp.getgrnam('qubes').gr_mem[0]
home_dir = pwd.getpwnam(local_user).pw_dir
backup_dom0_home_dir = backup_dir + '/dom0-home/' + backup_info['username']
restore_home_backupdir = "home-pre-restore-{0}".format (time.strftime("%Y-%m-%d-%H%M%S"))
print_callback("-> Restoring home of user '{0}'...".format(local_user))
print_callback("--> Existing files/dirs backed up in '{0}' dir".format(restore_home_backupdir))
os.mkdir(home_dir + '/' + restore_home_backupdir)
for f in os.listdir(backup_dom0_home_dir):
home_file = home_dir + '/' + f
if os.path.exists(home_file):
os.rename(home_file, home_dir + '/' + restore_home_backupdir + '/' + f)
retcode = subprocess.call (["cp", "-nrp", backup_dom0_home_dir + '/' + f, home_file])
if retcode != 0:
error_callback("*** Error while copying file {0} to {1}".format(backup_dom0_home_dir + '/' + f, home_file))
retcode = subprocess.call(['sudo', 'chown', '-R', local_user, home_dir])
if retcode != 0:
error_callback("*** Error while setting home directory owner")
# vim:sw=4:et:
| gpl-2.0 | -220,990,330,861,483,870 | 38.955533 | 217 | 0.577195 | false |
hehongliang/tensorflow | tensorflow/python/kernel_tests/unstack_op_test.py | 1 | 6055 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Unstack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class UnstackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
with test_util.use_gpu():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [
np.bool, np.float16, np.float32, np.float64, np.int32, np.int64
]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
def testSimpleGpu(self):
if not test_util.is_gpu_available():
self.skipTest('No GPU available')
np.random.seed(7)
with test_util.force_gpu():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.float16, np.float32, np.float64, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Convert data to a single tensorflow tensor
x = constant_op.constant(data)
# Unstack into a list of tensors
cs = array_ops.unstack(x, num=shape[0])
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
cs = [self.evaluate(c) for c in cs]
self.assertAllEqual(cs, data)
def testGradientsAxis0(self):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
for i in xrange(shape[0]):
with self.cached_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[0])
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
shapes[i])
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
out_shape = list(shape)
del out_shape[1]
for i in xrange(shape[1]):
with self.cached_session(use_gpu=True):
x = constant_op.constant(data)
cs = array_ops.unstack(x, num=shape[1], axis=1)
err = gradient_checker.compute_gradient_error(x, shape, cs[i],
out_shape)
self.assertLess(err, 1e-6)
def testInferNum(self):
with self.cached_session():
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
x = array_ops.placeholder(np.float32, shape=shape)
cs = array_ops.unstack(x)
self.assertEqual(type(cs), list)
self.assertEqual(len(cs), shape[0])
def testCannotInferNumFromUnknownShape(self):
x = array_ops.placeholder(np.float32)
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape <unknown>'):
array_ops.unstack(x)
def testUnknownShapeOkWithNum(self):
x = array_ops.placeholder(np.float32)
array_ops.unstack(x, num=2)
def testCannotInferNumFromNoneShape(self):
x = array_ops.placeholder(np.float32, shape=(None,))
with self.assertRaisesRegexp(ValueError,
r'Cannot infer num from shape \((\?|None),\)'):
array_ops.unstack(x)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
a = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
expected = np_split_squeeze(a, j)
actual_unstack = self.evaluate(array_ops.unstack(a, axis=j))
self.assertAllEqual(expected, actual_unstack)
def testAxis0Default(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
unstacked = self.evaluate(array_ops.unstack(a))
self.assertEqual(len(unstacked), 2)
self.assertAllEqual(unstacked[0], [1, 2, 3])
self.assertAllEqual(unstacked[1], [4, 5, 6])
def testAxisOutOfRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = 2 not in \[-2, 2\)'):
array_ops.unstack(a, axis=2)
def testAxisOutOfNegativeRange(self):
a = constant_op.constant([[1, 2, 3], [4, 5, 6]], name='a')
with self.assertRaisesRegexp(ValueError, r'axis = -3 not in \[-2, 2\)'):
array_ops.unstack(a, axis=-3)
def testZeroLengthDim(self):
x = array_ops.zeros(shape=(0, 1, 2))
y = self.evaluate(array_ops.unstack(x, axis=1)[0])
self.assertEqual(y.shape, (0, 2))
if __name__ == '__main__':
test.main()
| apache-2.0 | -1,743,473,666,163,420,000 | 35.920732 | 80 | 0.608423 | false |
jenfly/atmos-read | scripts/merra-replace-data.py | 1 | 5275 | """
Replace corrupted data files with daily data re-downloaded with wget
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import shutil
import xarray as xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = '/net/eady/data1/jwalker/datastore/merra2/wget/'
savedir = '/net/eady/data1/jwalker/datastore/merra2/merged/'
probdata = pd.read_csv('scripts/merra_urls/merge_data.csv', index_col=0)
# For each corrupted data file:
# - load the corrupted data file
# - load the new downloaded file for the problem day
# - calculate d/dp and other stuff
# - merge the data for the affected day
# - save into data file for the year
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False, squeeze=True)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def var_calcs(filenm, varnm, plev, latlon=(-90, 90, 40, 120)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
if varnm == 'DUDP':
nm, dp = 'U', True
elif varnm == 'DOMEGADP':
nm, dp = 'OMEGA', True
else:
nm, dp = varnm, False
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if dp:
print('Computing d/dp')
var = pgradient(var, lat1, lat2, lon1, lon2, plev)
else:
var = latlon_data(var, lat1, lat2, lon1, lon2, plev)
return var
def process_row(row, datadir, savedir):
filenm1 = row['filename']
year = row['year']
varnm = row['varnm']
plev = row['plev']
jday = row['jday']
filenm2 = datadir + row['datfile']
savefile1 = filenm1
savefile2 = savedir + os.path.split(filenm1)[1]
print('%d, %s, plev=%d' % (year, varnm, plev))
print('Reading original data from ' + filenm1)
with xray.open_dataset(filenm1) as ds:
var1 = ds[varnm].load()
print('Processing new data from ' + filenm2)
var2 = var_calcs(filenm2, varnm, plev)
print('Merging data for jday %d' % jday)
var = var1.copy()
ind = jday - 1
days = atm.get_coord(var1, 'day')
if not days[ind] == jday:
raise ValueError('Days not indexed from 1, need to edit code to handle')
var[ind] = var2
print('Saving to ' + savefile1)
var.to_netcdf(savefile1)
print('Saving to ' + savefile2)
var.to_netcdf(savefile2)
data = {'orig' : var1, 'new' : var2, 'merged' : var}
return data
# Make a copy of each of the original files -- only run this code once!
# for filenm in probdata['filename']:
# shutil.copyfile(filenm, filenm.replace('.nc', '_orig.nc'))
for i, row in probdata.iterrows():
data = process_row(row, datadir, savedir)
# Plot data to check
def plot_data(probdata, savedir, i):
row = probdata.iloc[i]
filenm = row['filename']
filenm = savedir + os.path.split(filenm)[1]
jday = row['jday']
varnm = row['varnm']
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
plt.figure(figsize=(16, 8))
plt.suptitle(os.path.split(filenm)[1])
plt.subplot(1, 3, 1)
atm.pcolor_latlon(var.sel(day=(jday-1)))
plt.title(jday - 1)
plt.subplot(1, 3, 2)
atm.pcolor_latlon(var.sel(day=jday))
plt.title(jday)
plt.subplot(1, 3, 3)
atm.pcolor_latlon(var.sel(day=(jday+1)))
plt.title(jday + 1) | mit | -9,150,407,698,078,713,000 | 31.975 | 80 | 0.609289 | false |
ivanyu/rosalind | algorithmic_heights/sc/sc.py | 1 | 1421 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def main(argv):
from sc_logic import check_semi_connectedness
graphs = []
if len(argv) < 2:
print('k = 2')
k = 2
print('Graph 1:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('2 1')
g[1][0] = 1
graphs.append(g)
print('Graph 2:')
print('n = 3')
n = 3
print('m = 2')
m = 2
g = [[0 for i in range(n)] for _ in range(n)]
print('3 2')
g[2][1] = 1
print('1 2')
g[0][1] = 1
graphs.append(g)
else:
with open(argv[1]) as f:
k = int(f.readline().strip())
for _ in range(k):
f.readline()
line = f.readline()
n, m = [int(x.strip()) for x in line.strip().split()]
g = [[0 for _ in range(n)] for _ in range(n)]
for edge in range(m):
line = f.readline()
i, j = [int(x.strip()) for x in line.strip().split()]
g[i - 1][j - 1] = 1
graphs.append(g)
for g in graphs:
r = check_semi_connectedness(g)
print('1' if r else -1, end=' ')
if __name__ == "__main__":
import sys
main(sys.argv)
| mit | -6,796,745,880,054,113,000 | 23.084746 | 73 | 0.398311 | false |
openstack/tacker | tacker/objects/grant.py | 1 | 11110 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker import objects
from tacker.objects import base
from tacker.objects import fields
@base.TackerObjectRegistry.register
class Grant(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'vnf_instance_id': fields.StringField(nullable=False),
'vnf_lcm_op_occ_id': fields.StringField(nullable=False),
'vim_connections': fields.ListOfObjectsField(
'VimConnectionInfo', nullable=True, default=[]),
'zones': fields.ListOfObjectsField(
'ZoneInfo', nullable=True, default=[]),
'add_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'remove_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'update_resources': fields.ListOfObjectsField(
'GrantInfo', nullable=True, default=[]),
'vim_assets': fields.ObjectField(
'VimAssets', nullable=True),
'ext_virtual_links': fields.ListOfObjectsField(
'ExtVirtualLinkData', nullable=True, default=[]),
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant = super(
Grant, cls).obj_from_primitive(primitive, context)
else:
if 'vim_connections' in primitive.keys():
obj_data = [objects.VimConnectionInfo._from_dict(
vim_conn) for vim_conn in primitive.get(
'vim_connections', [])]
primitive.update({'vim_connections': obj_data})
if 'zones' in primitive.keys():
obj_data = [ZoneInfo._from_dict(
zone) for zone in primitive.get(
'zones', [])]
primitive.update({'zones': obj_data})
if 'add_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
add_rsc) for add_rsc in primitive.get(
'add_resources', [])]
primitive.update({'add_resources': obj_data})
if 'remove_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
remove_rsc) for remove_rsc in primitive.get(
'remove_resources', [])]
primitive.update({'remove_resources': obj_data})
if 'update_resources' in primitive.keys():
obj_data = [GrantInfo._from_dict(
update_rsc) for update_rsc in primitive.get(
'update_resources', [])]
primitive.update({'update_resources': obj_data})
if 'vim_assets' in primitive.keys():
obj_data = VimAssets.obj_from_primitive(
primitive.get('vim_assets'), context)
primitive.update({'vim_assets': obj_data})
if 'ext_virtual_links' in primitive.keys():
obj_data = [objects.ExtVirtualLinkData.obj_from_primitive(
ext_vir_link, context) for ext_vir_link in primitive.get(
'ext_virtual_links', [])]
primitive.update({'ext_virtual_links': obj_data})
obj_grant = Grant._from_dict(primitive)
return obj_grant
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
vnf_instance_id = data_dict.get('vnf_instance_id')
vnf_lcm_op_occ_id = data_dict.get('vnf_lcm_op_occ_id')
vim_connections = data_dict.get('vim_connections', [])
zones = data_dict.get('zones', [])
add_resources = data_dict.get('add_resources', [])
remove_resources = data_dict.get('remove_resources', [])
update_resources = data_dict.get('update_resources', [])
vim_assets = data_dict.get('vim_assets')
ext_virtual_links = data_dict.get('ext_virtual_links', [])
obj = cls(
id=id,
vnf_instance_id=vnf_instance_id,
vnf_lcm_op_occ_id=vnf_lcm_op_occ_id,
vim_connections=vim_connections,
zones=zones,
add_resources=add_resources,
remove_resources=remove_resources,
update_resources=update_resources,
vim_assets=vim_assets,
ext_virtual_links=ext_virtual_links)
return obj
@base.TackerObjectRegistry.register
class ZoneInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'id': fields.StringField(nullable=False),
'zone_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_zone_info = super(
ZoneInfo, cls).obj_from_primitive(primitive, context)
else:
obj_zone_info = ZoneInfo._from_dict(primitive)
return obj_zone_info
@classmethod
def _from_dict(cls, data_dict):
id = data_dict.get('id')
zone_id = data_dict.get('zone_id')
vim_connection_id = data_dict.get('vim_connection_id')
obj = cls(
id=id,
zone_id=zone_id,
vim_connection_id=vim_connection_id)
return obj
@base.TackerObjectRegistry.register
class GrantInfo(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'resource_definition_id': fields.StringField(nullable=False),
'vim_connection_id': fields.StringField(nullable=True),
'zone_id': fields.StringField(nullable=True)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_grant_info = super(
GrantInfo, cls).obj_from_primitive(primitive, context)
else:
obj_grant_info = GrantInfo._from_dict(primitive)
return obj_grant_info
@classmethod
def _from_dict(cls, data_dict):
resource_definition_id = data_dict.get('resource_definition_id')
vim_connection_id = data_dict.get('vim_connection_id')
zone_id = data_dict.get('zone_id')
obj = cls(
resource_definition_id=resource_definition_id,
vim_connection_id=vim_connection_id,
zone_id=zone_id)
return obj
@base.TackerObjectRegistry.register
class VimAssets(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'compute_resource_flavours': fields.ListOfObjectsField(
'VimComputeResourceFlavour', nullable=True, default=[]),
'software_images': fields.ListOfObjectsField(
'VimSoftwareImage', nullable=True, default=[])
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_vim_assets = super(
VimAssets, cls).obj_from_primitive(primitive, context)
else:
if 'compute_resource_flavours' in primitive.keys():
obj_data = [VimComputeResourceFlavour._from_dict(
flavour) for flavour in primitive.get(
'compute_resource_flavours', [])]
primitive.update({'compute_resource_flavours': obj_data})
if 'software_images' in primitive.keys():
obj_data = [VimSoftwareImage._from_dict(
img) for img in primitive.get(
'software_images', [])]
primitive.update({'software_images': obj_data})
obj_vim_assets = VimAssets._from_dict(primitive)
return obj_vim_assets
@classmethod
def _from_dict(cls, data_dict):
compute_resource_flavours = data_dict.get(
'compute_resource_flavours', [])
software_images = data_dict.get('software_images', [])
obj = cls(
compute_resource_flavours=compute_resource_flavours,
software_images=software_images)
return obj
@base.TackerObjectRegistry.register
class VimComputeResourceFlavour(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_virtual_compute_desc_id': fields.StringField(nullable=False),
'vim_flavour_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_flavour = super(
VimComputeResourceFlavour,
cls).obj_from_primitive(
primitive,
context)
else:
obj_flavour = VimComputeResourceFlavour._from_dict(primitive)
return obj_flavour
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_virtual_compute_desc_id = data_dict.get(
'vnfd_virtual_compute_desc_id')
vim_flavour_id = data_dict.get('vim_flavour_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_virtual_compute_desc_id=vnfd_virtual_compute_desc_id,
vim_flavour_id=vim_flavour_id)
return obj
@base.TackerObjectRegistry.register
class VimSoftwareImage(base.TackerObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'vim_connection_id': fields.StringField(nullable=True),
'vnfd_software_image_id': fields.StringField(nullable=False),
'vim_software_image_id': fields.StringField(nullable=False)
}
@classmethod
def obj_from_primitive(cls, primitive, context):
if 'tacker_object.name' in primitive:
obj_img = super(
VimSoftwareImage, cls).obj_from_primitive(primitive, context)
else:
obj_img = VimSoftwareImage._from_dict(primitive)
return obj_img
@classmethod
def _from_dict(cls, data_dict):
vim_connection_id = data_dict.get('vim_connection_id')
vnfd_software_image_id = data_dict.get('vnfd_software_image_id')
vim_software_image_id = data_dict.get('vim_software_image_id')
obj = cls(
vim_connection_id=vim_connection_id,
vnfd_software_image_id=vnfd_software_image_id,
vim_software_image_id=vim_software_image_id)
return obj
| apache-2.0 | -1,107,050,491,365,563,900 | 35.546053 | 78 | 0.59883 | false |
rmanoni/mi-instrument | mi/core/instrument/test/test_port_agent_client.py | 1 | 29762 | #!/usr/bin/env python
"""
@package ion.services.mi.test.test_port_agent_client
@file ion/services/mi/test/test_port_agent_client.py
@author David Everett
@brief Some unit tests for R2 port agent client
"""
__author__ = 'David Everett'
__license__ = 'Apache 2.0'
# Ensure the test class is monkey patched for gevent
from gevent import monkey
monkey.patch_all()
import gevent
import logging
import unittest
import time
import datetime
import array
import struct
import ctypes
from nose.plugins.attrib import attr
from mi.core.port_agent_process import PortAgentProcess
from mi.core.port_agent_process import PortAgentProcessType
from mi.core.tcp_client import TcpClient
from mi.core.port_agent_simulator import TCPSimulatorServer
from mi.core.unit_test import MiUnitTest
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.core.instrument.port_agent_client import PortAgentClient, PortAgentPacket, Listener
from mi.core.instrument.port_agent_client import HEADER_SIZE
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.exceptions import InstrumentConnectionException
from mi.instrument.seabird.sbe37smb.ooicore.driver import SBE37Driver
# MI logger
from mi.core.log import get_logger
log = get_logger()
SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3])
NTP_EPOCH = datetime.date(1900, 1, 1)
NTP_DELTA = (SYSTEM_EPOCH - NTP_EPOCH).total_seconds()
# Initialize the test parameters
# Use the SBE37 here because this is a generic port_agent_client test not
# necessarily associated with any driver.
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.seabird.sbe37smb.ooicore.driver',
driver_class="SBE37Driver",
instrument_agent_resource_id='123xyz',
instrument_agent_preload_id='IA2',
instrument_agent_name='Agent007',
driver_startup_config={}
)
@attr('UNIT', group='mi')
class PAClientUnitTestCase(InstrumentDriverUnitTestCase):
def setUp(self):
self.ipaddr = "localhost"
self.cmd_port = 9001
self.data_port = 9002
self.device_port = 9003
def resetTestVars(self):
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.listenerCallbackCalled = False
def myGotData(self, pa_packet):
self.dataCallbackCalled = True
if pa_packet.is_valid():
validity = "valid"
else:
validity = "invalid"
log.info("Got %s port agent data packet with data length %d: %s", validity, pa_packet.get_data_length(),
pa_packet.get_data())
def myGotRaw(self, pa_packet):
self.rawCallbackCalled = True
if pa_packet.is_valid():
validity = "valid"
else:
validity = "invalid"
log.info("Got %s port agent raw packet with data length %d: %s", validity, pa_packet.get_data_length(),
pa_packet.get_data())
def myGotError(self, error_string="No error string passed in."):
self.errorCallbackCalled = True
log.info("Got error: %s", error_string)
def myGotListenerError(self, exception):
self.listenerCallbackCalled = True
log.info("Got listener exception: %s", exception)
def raiseException(self, packet):
raise Exception("Boom")
def test_handle_packet(self):
"""
Test that a default PortAgentPacket creates a DATA_FROM_DRIVER packet,
and that the handle_packet method invokes the raw callback
"""
pa_listener = Listener(None, None, 0, 0, 5, self.myGotData, self.myGotRaw, self.myGotListenerError,
self.myGotError)
test_data = "This is a great big test"
self.resetTestVars()
pa_packet = PortAgentPacket()
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
###
# Test DATA_FROM_INSTRUMENT; handle_packet should invoke data and raw
# callbacks.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.DATA_FROM_INSTRUMENT)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_COMMAND; handle_packet should invoke raw callback.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.PORT_AGENT_COMMAND)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_STATUS; handle_packet should invoke raw callback.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.PORT_AGENT_STATUS)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test PORT_AGENT_FAULT; handle_packet should invoke raw callback.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.PORT_AGENT_FAULT)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test INSTRUMENT_COMMAND; handle_packet should invoke raw callback.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.DIGI_CMD)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertTrue(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
###
# Test HEARTBEAT; handle_packet should not invoke any callback.
###
self.resetTestVars()
pa_packet = PortAgentPacket(PortAgentPacket.HEARTBEAT)
pa_packet.attach_data(test_data)
pa_packet.pack_header()
pa_packet.verify_checksum()
pa_listener.handle_packet(pa_packet)
self.assertFalse(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
def test_heartbeat_timeout(self):
"""
Initialize the Listener with a heartbeat value, then
start the heartbeat. Wait long enough for the heartbeat
to timeout MAX_MISSED_HEARTBEATS times, and then assert
that the error_callback was called.
"""
self.resetTestVars()
test_recovery_attempts = 1
test_heartbeat = 1
test_max_missed_heartbeats = 5
pa_listener = Listener(None, test_recovery_attempts, delim=None, heartbeat=test_heartbeat,
max_missed_heartbeats=test_max_missed_heartbeats,
callback_data=self.myGotData, callback_raw=self.myGotRaw,
default_callback_error=self.myGotListenerError, local_callback_error=None,
user_callback_error=self.myGotError)
pa_listener.start_heartbeat_timer()
gevent.sleep((test_max_missed_heartbeats * pa_listener.heartbeat) + 4)
self.assertFalse(self.rawCallbackCalled)
self.assertFalse(self.dataCallbackCalled)
self.assertTrue(self.errorCallbackCalled)
self.assertFalse(self.listenerCallbackCalled)
def test_set_heartbeat(self):
"""
Test the set_heart_beat function; make sure it returns False when
passed invalid values, and true when valid. Also make sure it
adds the HEARTBEAT_FUDGE
"""
self.resetTestVars()
test_recovery_attempts = 1
test_heartbeat = 0
test_max_missed_heartbeats = 5
pa_listener = Listener(None, test_recovery_attempts, None, test_heartbeat, test_max_missed_heartbeats,
self.myGotData, self.myGotRaw, self.myGotListenerError, None, self.myGotError)
###
# Test valid values
###
test_heartbeat = 1
return_value = pa_listener.set_heartbeat(test_heartbeat)
self.assertTrue(return_value)
self.assertTrue(pa_listener.heartbeat == test_heartbeat + pa_listener.HEARTBEAT_FUDGE)
test_heartbeat = pa_listener.MAX_HEARTBEAT_INTERVAL
return_value = pa_listener.set_heartbeat(test_heartbeat)
self.assertTrue(return_value)
self.assertTrue(pa_listener.heartbeat == test_heartbeat + pa_listener.HEARTBEAT_FUDGE)
###
# Test that a heartbeat value of zero results in the listener.heartbeat being zero
# (and doesn't include HEARTBEAT_FUDGE)
###
test_heartbeat = 0
return_value = pa_listener.set_heartbeat(test_heartbeat)
self.assertTrue(return_value)
self.assertTrue(pa_listener.heartbeat == test_heartbeat)
###
# Test invalid values
###
test_heartbeat = -1
return_value = pa_listener.set_heartbeat(test_heartbeat)
self.assertFalse(return_value)
test_heartbeat = pa_listener.MAX_HEARTBEAT_INTERVAL + 1
return_value = pa_listener.set_heartbeat(test_heartbeat)
self.assertFalse(return_value)
def test_connect_failure(self):
"""
Test that when the the port agent client cannot initially connect, it
raises an InstrumentConnectionException
"""
driver = SBE37Driver(self._got_data_event_callback)
driver._autoconnect = False
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.UNCONFIGURED)
config = {'addr': self.ipaddr, 'port': self.data_port, 'cmd_port': self.cmd_port}
driver.configure(config=config)
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.DISCONNECTED)
# Try to connect: it should not because there is no port agent running.
# The state should remain DISCONNECTED
driver.connect()
current_state = driver.get_resource_state()
self.assertEqual(current_state, DriverConnectionState.DISCONNECTED)
@attr('UNIT', group='mi')
class PAClientTestPortAgentPacket(MiUnitTest):
@staticmethod
def ntp_to_system_time(date):
"""convert a NTP time to system time"""
return date - NTP_DELTA
@staticmethod
def system_to_ntp_time(date):
"""convert a system time to a NTP time"""
return date + NTP_DELTA
def setUp(self):
self.pap = PortAgentPacket()
# self.test_time = time.time()
# self.ntp_time = self.system_to_ntp_time(self.test_time)
# self.pap.set_timestamp(self.ntp_time)
def test_pack_header(self):
test_data = "Only the length of this matters?"
test_data_length = len(test_data)
self.pap.attach_data(test_data)
self.pap.pack_header()
self.assertEqual(self.pap.get_data_length(), test_data_length)
def test_get_length(self):
test_length = 100
self.pap.set_data_length(test_length)
got_length = self.pap.get_data_length()
self.assertEqual(got_length, test_length)
def test_checksum(self):
"""
This tests the checksum algorithm; if somebody changes the algorithm
this test should catch it. Had to jump through some hoops to do this;
needed to add set_data_length and set_header because we're building our
own header here (the one in PortAgentPacket includes the timestamp
so the checksum is not consistent).
"""
test_data = "This tests the checksum algorithm."
test_length = len(test_data)
self.pap.attach_data(test_data)
# Now build a header
variable_tuple = (0xa3, 0x9d, 0x7a, self.pap.DATA_FROM_DRIVER,
test_length + HEADER_SIZE, 0x0000,
0)
self.pap.set_data_length(test_length)
header_format = '>BBBBHHd'
size = struct.calcsize(header_format)
temp_header = ctypes.create_string_buffer(size)
struct.pack_into(header_format, temp_header, 0, *variable_tuple)
# Now set the header member in PortAgentPacket to the header
# we built
self.pap.set_header(temp_header.raw)
# Now get the checksum and verify it is what we expect it to be.
checksum = self.pap.calculate_checksum()
self.assertEqual(checksum, 2)
def test_unpack_header(self):
self.pap = PortAgentPacket()
data_length = 32
data = self.pap.unpack_header(array.array('B',
[163, 157, 122, 2, 0, data_length + HEADER_SIZE, 14, 145, 65, 234,
142, 154, 23, 155, 51, 51]))
got_timestamp = self.pap.get_timestamp()
self.assertEqual(self.pap.get_header_type(), self.pap.DATA_FROM_DRIVER)
self.assertEqual(self.pap.get_data_length(), data_length)
self.assertEqual(got_timestamp, 1105890970.092212)
self.assertEqual(self.pap.get_header_recv_checksum(), 3729)
@attr('INT', group='mi')
class PAClientIntTestCase(InstrumentDriverTestCase):
def setUp(self):
# InstrumentDriverIntegrationTestCase.setUp(self)
self.ipaddr = "localhost"
self.cmd_port = 9001
self.data_port = 9002
self.device_port = 9003
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.pa_packet = None
def tearDown(self):
"""
@brief Test teardown
"""
log.debug("PACClientIntTestCase tearDown")
InstrumentDriverTestCase.tearDown(self)
def startPortAgent(self):
pa_port = self.init_port_agent()
log.debug("port_agent started on port: %d" % pa_port)
time.sleep(2) # give it a chance to start responding
def resetTestVars(self):
log.debug("Resetting test variables...")
self.rawCallbackCalled = False
self.dataCallbackCalled = False
self.errorCallbackCalled = False
self.listenerCallbackCalled = False
def myGotData(self, pa_packet):
self.dataCallbackCalled = True
self.pa_packet = pa_packet
if pa_packet.is_valid():
validity = "valid"
else:
validity = "invalid"
log.debug("Got %s port agent data packet with data length %s: %s", validity, pa_packet.get_data_length(),
pa_packet.get_data())
def myGotRaw(self, pa_packet):
self.rawCallbackCalled = True
if pa_packet.is_valid():
validity = "valid"
else:
validity = "invalid"
log.debug("Got %s port agent raw packet with data length %s: %s", validity, pa_packet.get_data_length(),
pa_packet.get_data())
def myGotListenerError(self, exception):
self.listenerCallbackCalled = True
log.info("Got listener exception: %s", exception)
def myGotError(self, error_string="No error string passed in."):
self.errorCallbackCalled = True
log.info("myGotError got error: %s", error_string)
def init_instrument_simulator(self):
"""
Startup a TCP server that we can use as an instrument simulator
"""
self._instrument_simulator = TCPSimulatorServer()
self.addCleanup(self._instrument_simulator.close)
# Wait for the simulator to bind to a port
timeout = time.time() + 10
while timeout > time.time():
if self._instrument_simulator.port > 0:
log.debug("Instrument simulator initialized on port %s" % self._instrument_simulator.port)
return
log.debug("waiting for simulator to bind. sleeping")
time.sleep(1)
raise IDKException("Timeout waiting for simulator to bind")
def init_port_agent(self):
"""
@brief Launch the driver process and driver client. This is used in the
integration and qualification tests. The port agent abstracts the physical
interface with the instrument.
@retval return the pid to the logger process
"""
if self.port_agent:
log.error("Port agent already initialized")
return
log.debug("Startup Port Agent")
# comm_config = self.get_comm_config()
config = self.port_agent_config()
log.debug("port agent config: %s" % config)
port_agent = PortAgentProcess.launch_process(config, timeout=60, test_mode=True)
port = port_agent.get_data_port()
pid = port_agent.get_pid()
log.info('Started port agent pid %s listening at port %s' % (pid, port))
self.addCleanup(self.stop_port_agent)
self.port_agent = port_agent
return port
def port_agent_config(self):
"""
Overload the default port agent configuration so that
it connects to a simulated TCP connection.
"""
config = {'device_addr': 'localhost',
'device_port': self._instrument_simulator.port,
'command_port': self.cmd_port,
'data_port': self.data_port,
'process_type': PortAgentProcessType.UNIX,
'log_level': 5,
'heartbeat_interval': 3}
# Override the instrument connection information.
return config
def test_pa_client_retry(self):
"""
Test that the port agent client will not continually try to recover
when the port agent closes the connection gracefully because it has
another client connected.
"""
exception_raised = False
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
time.sleep(2)
# Start a TCP client that will connect to the data port; this sets up the
# situation where the Port Agent will immediately close the connection
# because it already has one
self.tcp_client = TcpClient("localhost", self.data_port)
time.sleep(2)
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException:
exception_raised = True
# Give it some time to retry
time.sleep(4)
self.assertTrue(exception_raised)
def test_pa_client_rx_heartbeat(self):
"""
Test that the port agent can send heartbeats when the pa_client has
a heartbeat_interval of 0. The port_agent_config() method above
sets the heartbeat interval.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
time.sleep(5)
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
time.sleep(10)
self.assertFalse(self.errorCallbackCalled)
def test_start_pa_client_no_port_agent(self):
self.resetTestVars()
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
self.assertRaises(InstrumentConnectionException,
pa_client.init_comms,
self.myGotData, self.myGotRaw,
self.myGotListenerError, self.myGotError)
self.assertFalse(self.errorCallbackCalled)
def test_start_pa_client_with_port_agent(self):
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
exception_caught = True
else:
exception_caught = False
data = "this is a great big test"
pa_client.send(data)
time.sleep(1)
self._instrument_simulator.send(data)
time.sleep(5)
pa_client.stop_comms()
# Assert that the error_callback was not called, that an exception was not
# caught, and that the data and raw callbacks were called.
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(exception_caught)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
def test_start_pa_client_no_port_agent_big_data(self):
self.resetTestVars()
logging.getLogger('mi.core.instrument.port_agent_client').setLevel(logging.DEBUG)
# I put this in here because PortAgentPacket cannot make a new packet
# with a valid checksum.
def makepacket(msgtype, timestamp, data):
from struct import Struct
SYNC = (0xA3, 0x9D, 0x7A)
HEADER_FORMAT = "!BBBBHHd"
header_struct = Struct(HEADER_FORMAT)
HEADER_SIZE = header_struct.size
def calculate_checksum(data, seed=0):
n = seed
for datum in data:
n ^= datum
return n
def pack_header(buf, msgtype, pktsize, checksum, timestamp):
sync1, sync2, sync3 = SYNC
header_struct.pack_into(buf, 0, sync1, sync2, sync3, msgtype, pktsize,
checksum, timestamp)
pktsize = HEADER_SIZE + len(data)
pkt = bytearray(pktsize)
pack_header(pkt, msgtype, pktsize, 0, timestamp)
pkt[HEADER_SIZE:] = data
checksum = calculate_checksum(pkt)
pack_header(pkt, msgtype, pktsize, checksum, timestamp)
return pkt
# Make a BIG packet
data = "A" * (2 ** 16 - HEADER_SIZE - 1)
txpkt = makepacket(PortAgentPacket.DATA_FROM_INSTRUMENT, 0.0, data)
def handle(sock, addr):
# Send it in pieces
sock.sendall(txpkt[:1500])
time.sleep(1)
sock.sendall(txpkt[1500:])
time.sleep(10)
import gevent.server
dataserver = gevent.server.StreamServer((self.ipaddr, self.data_port), handle)
cmdserver = gevent.server.StreamServer((self.ipaddr, self.cmd_port), lambda x, y: None)
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
try:
dataserver.start()
cmdserver.start()
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
raise
else:
time.sleep(5)
finally:
pa_client.stop_comms()
dataserver.kill()
cmdserver.kill()
# Assert that the error_callback was not called, that an exception was not
# caught, and that the data and raw callbacks were called.
self.assertFalse(self.errorCallbackCalled)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
self.assertEquals(self.pa_packet.get_data_length(), len(data))
self.assertEquals(len(self.pa_packet.get_data()), len(data))
# don't use assertEquals b/c it will print 64kb
self.assert_(self.pa_packet.get_data() == data)
def test_start_pa_client_lost_port_agent_tx_rx(self):
"""
This test starts the port agent and the instrument_simulator and
tests that data is sent and received first; then it stops the port
agent and tests that the error_callback was called.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
# Now send some data; there should be no errors.
try:
data = "this is a great big test"
pa_client.send(data)
time.sleep(1)
self._instrument_simulator.send(data)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
exception_caught = True
else:
exception_caught = False
time.sleep(1)
# Assert that the error_callback was NOT called, that an exception was NOT
# caught, and that the data and raw callbacks WERE called.
self.assertFalse(self.errorCallbackCalled)
self.assertFalse(exception_caught)
self.assertTrue(self.rawCallbackCalled)
self.assertTrue(self.dataCallbackCalled)
# Now reset the test variables and try again; this time after stopping
# the port agent. Should be errors
self.resetTestVars()
try:
self.stop_port_agent()
log.debug("Port agent stopped")
data = "this is another great big test"
pa_client.send(data)
time.sleep(1)
log.debug("Sending from simulator")
self._instrument_simulator.send(data)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
time.sleep(5)
# Assert that the error_callback WAS called. The listener usually
# is seeing the error first, and that does not call the exception, so
# only assert that the error callback was called.
self.assertTrue(self.errorCallbackCalled)
def test_start_pa_client_lost_port_agent_rx(self):
"""
This test starts the port agent and then stops the port agent and
verifies that the error callback was called (because the listener
is the only one that will see the error, since there is no send
operation).
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotListenerError, self.myGotError)
try:
self.stop_port_agent()
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
time.sleep(5)
# Assert that the error_callback was called. At this moment the listener
# is seeing the error first, and that does not call the exception, so
# don't test for that yet.
self.assertTrue(self.errorCallbackCalled)
@unittest.skip('Skip; this test does not work consistently.')
def test_start_pa_client_lost_port_agent_tx(self):
"""
This test starts the port agent and then starts the port agent client
in a special way that will not start the listener thread. This will
guarantee that the send context is the one the sees the error.
"""
self.resetTestVars()
self.init_instrument_simulator()
self.startPortAgent()
pa_client = PortAgentClient(self.ipaddr, self.data_port, self.cmd_port)
# Give the port agent time to initialize
time.sleep(5)
pa_client.init_comms(self.myGotData, self.myGotRaw, self.myGotError, self.myGotListenerError,
start_listener=False)
try:
self.stop_port_agent()
data = "this big ol' test should cause send context to fail"
pa_client.send(data)
time.sleep(1)
except InstrumentConnectionException as e:
log.error("Exception caught: %r" % e)
exception_caught = True
else:
exception_caught = False
time.sleep(5)
# Assert that the error_callback was called. For this test the listener
# should not be running, so the send context should see the error, and that
# should throw an exception. Assert that the callback WAS called and that
# an exception WAS thrown.
self.assertTrue(self.errorCallbackCalled)
self.assertTrue(exception_caught)
| bsd-2-clause | -7,409,158,328,854,062,000 | 33.931925 | 116 | 0.633627 | false |
ezietsman/msc-thesis | images/makeunflat2.py | 1 | 1059 | from pylab import *
import astronomy as ast
# to format the labels better
from matplotlib.ticker import FormatStrFormatter
fmt = FormatStrFormatter('%1.2g') # or whatever
X1 = load('ec2117ans_1_c.dat')
x1 = X1[:,0]
y1 = 10**(X1[:,2]/(-2.5))
y1 /= average(y1)
T0 = 2453964.3307097
P = 0.1545255
figure(figsize=(6,4))
subplots_adjust(hspace=0.6,left=0.16)
ax = subplot(211)
#plot(x1,y1,'.')
scatter((x1-T0)/P,y1,s=0.8,faceted=False)
xlabel('Orbital Phase')
ylabel('Intensity')
title('Original Lightcurve')
#ylim(min(y1)-0.0000005,max(y1)+0.0000005)
ax.yaxis.set_major_formatter(fmt)
ax = subplot(212)
x2,y2 = ast.signal.dft(x1,y1,0,7000,1)
plot(x2,y2,'k-')
xlabel('Frequency (cycles/day)')
ylabel('Amplitude')
#vlines(3560,0.000000025,0.00000003,color='k',linestyle='solid')
#vlines(950,0.000000025,0.00000003,color='k',linestyle='solid')
#text(3350,0.000000035,'DNO',fontsize=10)
#text(700,0.000000035,'lpDNO',fontsize=10)
xlim(0,7000)
ylim(0,0.004)
title('Periodogram')
#ax.yaxis.set_major_formatter(fmt)
savefig('unflattened.png')
show()
| mit | 3,372,714,419,916,034,000 | 18.981132 | 64 | 0.70255 | false |
WilJoey/tn_ckan | ckan/new_tests/lib/navl/test_validators.py | 1 | 9126 | # -*- coding: utf-8 -*-
'''Unit tests for ckan/lib/navl/validators.py.
'''
import copy
import nose.tools
import ckan.new_tests.factories as factories
def returns_None(function):
'''A decorator that asserts that the decorated function returns None.
:param function: the function to decorate
:type function: function
Usage:
@returns_None
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(*args, **kwargs):
original_args = copy.deepcopy(args)
original_kwargs = copy.deepcopy(kwargs)
result = function(*args, **kwargs)
assert result is None, (
'Should return None when called with args: {args} and '
'kwargs: {kwargs}'.format(args=original_args,
kwargs=original_kwargs))
return result
return call_and_assert
def raises_StopOnError(function):
'''A decorator that asserts that the decorated function raises
dictization_functions.StopOnError.
:param function: the function to decorate
:type function: function
Usage:
@raises_StopOnError
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(*args, **kwargs):
import ckan.lib.navl.dictization_functions as df
nose.tools.assert_raises(df.StopOnError, function, *args, **kwargs)
return call_and_assert
def does_not_modify_data_dict(validator):
'''A decorator that asserts that the decorated validator doesn't modify
its `data` dict param.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert data == original_data, (
'Should not modify data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def removes_key_from_data_dict(validator):
'''A decorator that asserts that the decorated validator removes its key
from the data dict.
:param validator: the validator function to decorate
:type validator: function
Usage:
@removes_key_from_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert key not in data, (
'Should remove key from data dict when called with: '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context} '.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def does_not_modify_other_keys_in_data_dict(validator):
'''A decorator that asserts that the decorated validator doesn't add,
modify the value of, or remove any other keys from its ``data`` dict param.
The function *may* modify its own data dict key.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_other_keys_in_data_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
# The validator function is allowed to modify its own key, so remove
# that key from both dicts for the purposes of the assertions below.
if key in data:
del data[key]
if key in original_data:
del original_data[key]
assert data.keys() == original_data.keys(), (
'Should not add or remove keys from data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
for key_ in data:
assert data[key_] == original_data[key_], (
'Should not modify other keys in data dict when called with '
'key: {key}, data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
def does_not_modify_errors_dict(validator):
'''A decorator that asserts that the decorated validator doesn't modify its
`errors` dict param.
:param validator: the validator function to decorate
:type validator: function
Usage:
@does_not_modify_errors_dict
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
'''
def call_and_assert(key, data, errors, context=None):
if context is None:
context = {}
original_data = copy.deepcopy(data)
original_errors = copy.deepcopy(errors)
original_context = copy.deepcopy(context)
result = validator(key, data, errors, context=context)
assert errors == original_errors, (
'Should not modify errors dict when called with key: {key}, '
'data: {data}, errors: {errors}, '
'context: {context}'.format(key=key, data=original_data,
errors=original_errors,
context=original_context))
return result
return call_and_assert
class TestValidators(object):
def test_ignore_missing_with_value_missing(self):
'''ignore_missing() should raise StopOnError if:
- data[key] is None, or
- data[key] is dictization_functions.missing, or
- key is not in data
'''
import ckan.lib.navl.dictization_functions as df
import ckan.lib.navl.validators as validators
for value in (None, df.missing, 'skip'):
# This is the key for the value that is going to be validated.
key = ('key to be validated',)
# The data to pass to the validator function for validation.
data = factories.validator_data_dict()
if value != 'skip':
data[key] = value
# The errors dict to pass to the validator function.
errors = factories.validator_errors_dict()
errors[key] = []
@does_not_modify_other_keys_in_data_dict
@does_not_modify_errors_dict
@removes_key_from_data_dict
@raises_StopOnError
def call_validator(*args, **kwargs):
return validators.ignore_missing(*args, **kwargs)
call_validator(key=key, data=data, errors=errors, context={})
def test_ignore_missing_with_a_value(self):
'''If data[key] is neither None or missing, ignore_missing() should do
nothing.
'''
import ckan.lib.navl.validators as validators
key = ('key to be validated',)
data = factories.validator_data_dict()
data[key] = 'value to be validated'
errors = factories.validator_errors_dict()
errors[key] = []
@returns_None
@does_not_modify_data_dict
@does_not_modify_errors_dict
def call_validator(*args, **kwargs):
return validators.ignore_missing(*args, **kwargs)
call_validator(key=key, data=data, errors=errors, context={})
| mit | 8,180,306,314,881,718,000 | 33.308271 | 79 | 0.595003 | false |
device42/warranty_check | starter.py | 1 | 6083 | #!/usr/bin/env python
import sys
from Files.shared import Config, Device42rest
from Files.warranty_cisco import Cisco
from Files.warranty_dell import Dell
from Files.warranty_hp import Hp
from Files.warranty_ibm_lenovo import IbmLenovo
from Files.warranty_meraki import Meraki
def get_hardware_by_vendor(name):
# Getting the hardware models, so we specifically target the manufacturer systems registered
hardware_models = d42_rest.get_hardware_models()
models = []
if hardware_models:
for model in hardware_models['models']:
manufacturer = model.get('manufacturer')
if manufacturer and name not in manufacturer.lower():
continue
model_name = model.get('name')
if model_name and model_name not in models:
models.append(model_name)
return ','.join(models)
def get_vendor_api(name):
current_cfg = cfg.get_config(name)
api = None
if vendor == 'cisco':
cisco_params = {
'url': current_cfg['url'],
'client_id': current_cfg['client_id'],
'client_secret': current_cfg['client_secret'],
'd42_rest': d42_rest
}
api = Cisco(cisco_params)
elif vendor == 'dell':
dell_params = {
'url': current_cfg['url'],
'client_id': current_cfg['client_id'],
'client_secret': current_cfg['client_secret'],
'd42_rest': d42_rest
}
api = Dell(dell_params)
elif vendor == 'hp':
hp_params = {
'url': current_cfg['url'],
'api_key': current_cfg['api_key'],
'api_secret': current_cfg['api_secret'],
'd42_rest': d42_rest
}
api = Hp(hp_params)
elif vendor == 'ibm' or vendor == 'lenovo':
ibm_lenovo_params = {
'url': current_cfg['url'],
'url2': current_cfg['url2'],
'd42_rest': d42_rest
}
api = IbmLenovo(vendor, ibm_lenovo_params)
elif vendor == "meraki":
meraki_params = {
'url': current_cfg['url'],
'api_key': current_cfg['api_key'],
'd42_rest': d42_rest
}
api = Meraki(meraki_params)
return api
def loader(name, api, d42):
# Locate the devices involved, based on the hardware models found, add offset with recursion
offset = 0
previous_batch = None
while True:
serials = []
current_hardware_models = get_hardware_by_vendor(name)
current_devices_batch = d42.get_devices(offset, current_hardware_models)
# If previous batch the same as current we finish
if previous_batch is not None:
if previous_batch == current_devices_batch:
print '\n[!] Finished'
break
previous_batch = current_devices_batch
if current_devices_batch and 'Devices' in current_devices_batch and len(current_devices_batch['Devices']) > 0:
items = [[x['device_id'], x['serial_no'], x['manufacturer']] for x in
current_devices_batch['Devices'] if x['serial_no'] and x['manufacturer']]
for item in items:
try:
d42_id, d42_serial, d42_vendor = item
if name in d42_vendor.lower():
print '[+] %s serial #: %s' % (name.title(), d42_serial)
serials.append(d42_serial)
except ValueError as e:
print '\n[!] Error in item: "%s", msg : "%s"' % (item, e)
inline_serials = ','.join(serials)
if len(serials) > 0:
result = vendor_api.run_warranty_check(inline_serials)
if result is not None:
api.process_result(result, purchases)
offset += 50
else:
print '\n[!] Finished'
break
if __name__ == '__main__':
# get settings from config file
cfg = Config()
d42_cfg = cfg.get_config('d42')
discover = cfg.get_config('discover')
# init
d42_params = {
'username': d42_cfg['username'],
'password': d42_cfg['password'],
'url': d42_cfg['url']
}
d42_rest = Device42rest(d42_params)
# get purchases data from Device42
orders = d42_rest.get_purchases()
purchases = {}
if orders and 'purchases' in orders:
for order in orders['purchases']:
if 'line_items' in order:
purchase_id = order.get('purchase_id')
order_no = order.get('order_no')
for line_item in order['line_items']:
line_no = line_item.get('line_no')
devices = line_item.get('devices')
contractid = line_item.get('line_notes')
# POs with no start and end dates will now be included and given a hasher key with date min and max
start = line_item.get('line_start_date')
end = line_item.get('line_end_date')
if start and end and devices:
for device in devices:
if 'serial_no' in device:
serial = device['serial_no']
hasher = serial + contractid + start + end
if hasher not in purchases:
purchases[hasher] = [purchase_id, order_no, line_no, contractid, start, end, discover['forcedupdate']]
APPS_ROW = []
if discover['cisco']:
APPS_ROW.append('cisco')
if discover['dell']:
APPS_ROW.append('dell')
if discover['hp']:
APPS_ROW.append('hp')
if discover['ibm']:
APPS_ROW.append('ibm')
if discover['lenovo']:
APPS_ROW.append('lenovo')
if discover['meraki']:
APPS_ROW.append('meraki')
for vendor in APPS_ROW:
print '\n[+] %s section' % vendor
vendor_api = get_vendor_api(vendor)
loader(vendor, vendor_api, d42_rest)
sys.exit()
| mit | 7,046,890,341,926,458,000 | 32.240437 | 138 | 0.536577 | false |
Parallel-in-Time/pySDC | pySDC/playgrounds/Allen_Cahn/AllenCahn_contracting_circle_standard_integrators.py | 1 | 5930 | import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit, allencahn_semiimplicit
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_problem():
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = (128, 128)
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-07
problem_params['lin_tol'] = 1E-08
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
return problem_params
def run_implicit_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
u_new = problem.solve_system(rhs=u, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_imex_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_semiimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=imex_mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
f = problem.eval_f(u, t)
rhs = u + dt * f.expl
u_new = problem.solve_system(rhs=rhs, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_CrankNicholson(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0)/dt)
startt = time.time()
t = t0
for n in range(nsteps):
rhs = u + dt / 2 * problem.eval_f(u, t)
u_new = problem.solve_system(rhs=rhs, factor=dt / 2, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def compute_radius(u, dx, t, init_radius):
c = np.count_nonzero(u >= 0.0)
radius = np.sqrt(c / np.pi) * dx
exact_radius = np.sqrt(max(init_radius ** 2 - 2.0 * t, 0))
return radius, exact_radius
def plot_radius(xcoords, exact_radius, radii):
fig, ax = plt.subplots()
plt.plot(xcoords, exact_radius, color='k', linestyle='--', linewidth=1, label='exact')
for type, radius in radii.items():
plt.plot(xcoords, radius, linestyle='-', linewidth=2, label=type)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
fname = 'data/AC_contracting_circle_standard_integrators'
plt.savefig('{}.pdf'.format(fname), bbox_inches='tight')
# plt.show()
def main_radius(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# setup parameters "in time"
t0 = 0.0
dt = 0.001
Tend = 0.032
radii = {}
_, radius, exact_radius = run_implicit_Euler(t0=t0, dt=dt, Tend=Tend)
radii['implicit-Euler'] = radius
_, radius, exact_radius = run_imex_Euler(t0=t0, dt=dt, Tend=Tend)
radii['imex-Euler'] = radius
_, radius, exact_radius = run_CrankNicholson(t0=t0, dt=dt, Tend=Tend)
radii['CrankNicholson'] = radius
xcoords = [t0 + i * dt for i in range(int((Tend - t0) / dt))]
plot_radius(xcoords, exact_radius, radii)
def main_error(cwd=''):
t0 = 0
Tend = 0.032
errors = {}
# err, _, _ = run_implicit_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['implicit-Euler'] = err
# err, _, _ = run_imex_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['imex-Euler'] = err
err, _, _ = run_CrankNicholson(t0=t0, dt=0.001/64, Tend=Tend)
errors['CrankNicholson'] = err
if __name__ == "__main__":
main_error()
# main_radius()
| bsd-2-clause | 7,824,124,446,833,600,000 | 25.008772 | 113 | 0.594266 | false |
mollie/mollie-api-python | mollie/api/objects/payment.py | 1 | 6068 | from .base import Base
class Payment(Base):
@classmethod
def get_resource_class(cls, client):
from ..resources.payments import Payments
return Payments(client)
STATUS_OPEN = "open"
STATUS_PENDING = "pending"
STATUS_CANCELED = "canceled"
STATUS_EXPIRED = "expired"
STATUS_FAILED = "failed"
STATUS_PAID = "paid"
STATUS_AUTHORIZED = "authorized"
SEQUENCETYPE_ONEOFF = "oneoff"
SEQUENCETYPE_FIRST = "first"
SEQUENCETYPE_RECURRING = "recurring"
# Documented properties
@property
def resource(self):
return self._get_property("resource")
@property
def id(self):
return self._get_property("id")
@property
def mode(self):
return self._get_property("mode")
@property
def created_at(self):
return self._get_property("createdAt")
@property
def status(self):
return self._get_property("status")
@property
def is_cancelable(self):
return self._get_property("isCancelable")
@property
def authorized_at(self):
return self._get_property("authorizedAt")
@property
def paid_at(self):
return self._get_property("paidAt")
@property
def canceled_at(self):
return self._get_property("canceledAt")
@property
def expires_at(self):
return self._get_property("expiresAt")
@property
def expired_at(self):
return self._get_property("expiredAt")
@property
def failed_at(self):
return self._get_property("failedAt")
@property
def amount(self):
return self._get_property("amount")
@property
def amount_refunded(self):
return self._get_property("amountRefunded")
@property
def amount_remaining(self):
return self._get_property("amountRemaining")
@property
def description(self):
return self._get_property("description")
@property
def redirect_url(self):
return self._get_property("redirectUrl")
@property
def webhook_url(self):
return self._get_property("webhookUrl")
@property
def method(self):
return self._get_property("method")
@property
def metadata(self):
return self._get_property("metadata")
@property
def locale(self):
return self._get_property("locale")
@property
def country_code(self):
return self._get_property("countryCode")
@property
def profile_id(self):
return self._get_property("profileId")
@property
def settlement_amount(self):
return self._get_property("settlementAmount")
@property
def settlement_id(self):
return self._get_property("settlementId")
@property
def customer_id(self):
return self._get_property("customerId")
@property
def sequence_type(self):
return self._get_property("sequenceType")
@property
def mandate_id(self):
return self._get_property("mandateId")
@property
def subscription_id(self):
return self._get_property("subscriptionId")
@property
def order_id(self):
return self._get_property("orderId")
@property
def application_fee(self):
return self._get_property("applicationFee")
@property
def details(self):
return self._get_property("details")
# documented _links
@property
def checkout_url(self):
return self._get_link("checkout")
@property
def refunds(self):
"""Return the refunds related to this payment."""
return self.client.payment_refunds.on(self).list()
@property
def chargebacks(self):
"""Return the chargebacks related to this payment."""
return self.client.payment_chargebacks.on(self).list()
@property
def captures(self):
"""Return the captures related to this payment"""
return self.client.captures.on(self).list()
@property
def settlement(self):
"""Return the settlement for this payment."""
return self.client.settlements.get(self.settlement_id)
@property
def mandate(self):
"""Return the mandate for this payment."""
return self.client.customer_mandates.with_parent_id(self.customer_id).get(self.mandate_id)
@property
def subscription(self):
"""Return the subscription for this payment."""
return self.client.customer_subscriptions.with_parent_id(self.customer_id).get(self.subscription_id)
@property
def customer(self):
"""Return the customer for this payment."""
return self.client.customers.get(self.customer_id)
@property
def order(self):
"""Return the order for this payment."""
from ..resources.orders import Order
url = self._get_link("order")
if url:
resp = self.client.orders.perform_api_call(self.client.orders.REST_READ, url)
return Order(resp, self.client)
# additional methods
def is_open(self):
return self._get_property("status") == self.STATUS_OPEN
def is_pending(self):
return self._get_property("status") == self.STATUS_PENDING
def is_canceled(self):
return self._get_property("status") == self.STATUS_CANCELED
def is_expired(self):
return self._get_property("status") == self.STATUS_EXPIRED
def is_failed(self):
return self._get_property("status") == self.STATUS_FAILED
def is_authorized(self):
return self._get_property("status") == self.STATUS_AUTHORIZED
def is_paid(self):
return self._get_property("paidAt") is not None
def has_refunds(self):
return self._get_link("refunds") is not None
def can_be_refunded(self):
return self._get_property("amountRemaining") is not None
def has_sequence_type_first(self):
return self._get_property("sequenceType") == self.SEQUENCETYPE_FIRST
def has_sequence_type_recurring(self):
return self._get_property("sequenceType") == self.SEQUENCETYPE_RECURRING
| bsd-2-clause | -5,234,986,768,338,279,000 | 24.603376 | 108 | 0.6353 | false |
andrecunha/idd3 | idd3/rules/universal/atomic_emitting_rulesets.py | 1 | 1614 | # -*- coding: utf-8 -*-
# IDD3 - Propositional Idea Density from Dependency Trees
# Copyright (C) 2014-2015 Andre Luiz Verucci da Cunha
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals, division
from idd3 import Ruleset
class AtomicEmittingRuleset(Ruleset):
"""A base ruleset for atomic relations that just emits the associated word
as a proposition."""
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,))
class NegRuleset(AtomicEmittingRuleset):
"""A ruleset that processes the 'neg' relation."""
rel = 'neg'
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,), 'M')
class DiscourseRuleset(AtomicEmittingRuleset):
"""A ruleset that processes the 'discourse' relation."""
rel = 'discourse'
def extract(self, relations, index, context, engine, info={}):
engine.emit((relations[index].word,), 'M')
| gpl-3.0 | -1,017,622,284,109,164,500 | 31.28 | 78 | 0.716853 | false |
langner/cclib | test/regression.py | 1 | 144398 | # This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2020, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""A regression framework for parsing and testing logfiles.
The intention here is to make it easy to add new datafiles as bugs
are fixed and to write specific tests in the form of test functions.
In short, the file called regressionfiles.txt contains a list of regression
logfiles, which is compared to the files found on disk. All these files
should be parsed correctly, and if there is an appropriately named function
defined, that function will be used as a test.
There is also a mechanism for running unit tests on old logfiles, which
have been moved here from the cclib repository when newer versions
became available. We still want those logfiles to parse and test correctly,
although sometimes special modification will be needed.
To run the doctest, run `python -m test.regression` from the top level
directory in the cclib repository.
Running all regression can take anywhere from 10-20s to several minutes
depending in your hardware. To aid debugging, there are two ways to limit
which regressions to parse and test. You can limit the test to a specific
parse, for example:
python -m test.regression Gaussian
You can also limit a run to a single output file, using it's relative path
inside the data directory, like so:
python -m test.regression Gaussian/Gaussian03/borane-opt.log
"""
import glob
import logging
import os
import sys
import traceback
import unittest
import numpy
from packaging.version import parse as parse_version
from packaging.version import Version
from cclib.parser.utils import convertor
from cclib.parser import ccData
from cclib.parser import ADF
from cclib.parser import DALTON
from cclib.parser import FChk
from cclib.parser import GAMESS
from cclib.parser import GAMESSUK
from cclib.parser import Gaussian
from cclib.parser import Jaguar
from cclib.parser import Molcas
from cclib.parser import Molpro
from cclib.parser import MOPAC
from cclib.parser import NWChem
from cclib.parser import ORCA
from cclib.parser import Psi3
from cclib.parser import Psi4
from cclib.parser import QChem
from cclib.parser import Turbomole
from cclib.io import ccopen, ccread, moldenwriter
# This assume that the cclib-data repository is located at a specific location
# within the cclib repository. It would be better to figure out a more natural
# way to import the relevant tests from cclib here.
test_dir = os.path.realpath(os.path.dirname(__file__)) + "/../../test"
# This is safer than sys.path.append, and isn't sys.path.insert(0, ...) so
# virtualenvs work properly. See https://stackoverflow.com/q/10095037.
sys.path.insert(1, os.path.abspath(test_dir))
from .test_data import all_modules
from .test_data import all_parsers
from .test_data import module_names
from .test_data import parser_names
from .test_data import get_program_dir
# We need this to point to files relative to this script.
__filedir__ = os.path.abspath(os.path.dirname(__file__))
__regression_dir__ = os.path.join(__filedir__, "../data/regression/")
# The following regression test functions were manually written, because they
# contain custom checks that were determined on a per-file basis. Care needs to be taken
# that the function name corresponds to the path of the logfile, with some characters
# changed according to normalisefilename().
# ADF #
def testADF_ADF2004_01_Fe_ox3_final_out(logfile):
"""Make sure HOMOS are correct."""
assert logfile.data.homos[0] == 59 and logfile.data.homos[1] == 54
assert logfile.data.metadata["legacy_package_version"] == "2004.01"
assert logfile.data.metadata["package_version"] == "2004.01+200410211341"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testADF_ADF2013_01_dvb_gopt_b_unconverged_adfout(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "2013.01"
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testADF_ADF2013_01_stopiter_dvb_sp_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 10
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_dvb_sp_b_adfout(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
# Why is this not 3?
assert len(logfile.data.scfvalues[0]) == 2
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_dvb_sp_c_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 6
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_dvb_sp_d_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 7
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_dvb_un_sp_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 7
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_dvb_un_sp_c_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 10
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2013_01_stopiter_MoOCl4_sp_adfout(logfile):
"""This logfile has not SCF test lines so we have no way to check what happens."""
# This is what we would have checked:
# len(logfile.data.scfvalues[0]) == 11
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["package_version"] == "2013.01+201309012319"
def testADF_ADF2014_01_DMO_ORD_orig_out(logfile):
"""In lieu of a unit test, make sure the polarizability (and
potentially later the optical rotation) is properly parsed.
"""
assert hasattr(logfile.data, 'polarizabilities')
assert len(logfile.data.polarizabilities) == 1
assert logfile.data.polarizabilities[0].shape == (3, 3)
# isotropic polarizability
isotropic_calc = numpy.average(numpy.diag(logfile.data.polarizabilities[0]))
isotropic_ref = 51.3359
assert abs(isotropic_calc - isotropic_ref) < 1.0e-4
assert logfile.data.metadata["legacy_package_version"] == "2014"
assert logfile.data.metadata["package_version"] == "2014dev42059"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.metadata["package_version_date"] == "2014-06-11"
assert logfile.data.metadata["package_version_description"] == "development version"
def testADF_ADF2016_166_tddft_0_31_new_out(logfile):
"""This file led to StopIteration (#430)."""
assert logfile.data.metadata["legacy_package_version"] == "2016"
assert logfile.data.metadata["package_version"] == "2016dev53619"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.metadata["package_version_date"] == "2016-07-21"
assert "package_version_description" not in logfile.data.metadata
def testADF_ADF2016_fa2_adf_out(logfile):
"""This logfile, without symmetry, should get atombasis parsed."""
assert hasattr(logfile.data, "atombasis")
assert [b for ab in logfile.data.atombasis for b in ab] == list(range(logfile.data.nbasis))
assert logfile.data.metadata["legacy_package_version"] == "2016"
assert logfile.data.metadata["package_version"] == "2016dev50467"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.metadata["package_version_date"] == "2016-02-17"
assert logfile.data.metadata["package_version_description"] == "branches/AndrewAtkins/ADF-Shar"
# DALTON #
def testDALTON_DALTON_2013_dvb_td_normalprint_out(logfile):
r"""This original unit test prints a DFT-specific version of the excitation
eigenvectors, which we do not parse.
Here is an example of the general output (requiring `**RESPONSE/.PRINT 4`
for older versions of DALTON), followed by "PBHT MO Overlap Diagnostic"
which only appears for DFT calculations. Note that the reason we cannot
parse this for etsyms is it doesn't contain the necessary
coefficient. "K_IA" and "(r s) operator", which is $\kappa_{rs}$, the
coefficient for excitation from the r -> s MO in the response vector, is
not what most programs print; it is "(r s) scaled", which is $\kappa_{rs}
* \sqrt{S_{rr} - S_{ss}}$. Because this isn't available from the PBHT
output, we cannot parse it.
Eigenvector for state no. 1
Response orbital operator symmetry = 1
(only scaled elements abs greater than 10.00 % of max abs value)
Index(r,s) r s (r s) operator (s r) operator (r s) scaled (s r) scaled
---------- ----- ----- -------------- -------------- -------------- --------------
154 27(2) 28(2) 0.5645327267 0.0077924161 0.7983698385 0.0110201405
311 58(4) 59(4) -0.4223079545 0.0137981027 -0.5972336367 0.0195134639
...
PBHT MO Overlap Diagnostic
--------------------------
I A K_IA K_AI <|I|*|A|> <I^2*A^2> Weight Contrib
27 28 0.564533 0.007792 0.790146 0.644560 0.309960 0.244913
58 59 -0.422308 0.013798 0.784974 0.651925 0.190188 0.149293
In the future, if `aooverlaps` and `mocoeffs` are available, it may be
possible to calculate the necessary scaled coefficients for `etsecs`.
"""
assert hasattr(logfile.data, "etenergies")
assert not hasattr(logfile.data, "etsecs")
assert hasattr(logfile.data, "etsyms")
assert hasattr(logfile.data, "etoscs")
assert logfile.data.metadata["legacy_package_version"] == "2013.4"
assert logfile.data.metadata["package_version"] == "2013.4+7abef2ada27562fe5e02849d6caeaa67c961732f"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testDALTON_DALTON_2015_dalton_atombasis_out(logfile):
"""This logfile didn't parse due to the absence of a line in the basis
set section.
"""
assert hasattr(logfile.data, "nbasis")
assert logfile.data.nbasis == 37
assert hasattr(logfile.data, "atombasis")
assert logfile.data.metadata["legacy_package_version"] == "2015.0"
assert logfile.data.metadata["package_version"] == "2015.0+d34efb170c481236ad60c789dea90a4c857c6bab"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testDALTON_DALTON_2015_dalton_intgrl_out(logfile):
"""This logfile didn't parse due to the absence of a line in the basis
set section.
"""
assert hasattr(logfile.data, "nbasis")
assert logfile.data.nbasis == 4
assert hasattr(logfile.data, "atombasis")
assert logfile.data.metadata["package_version"] == "2015.0+d34efb170c481236ad60c789dea90a4c857c6bab"
def testDALTON_DALTON_2015_dvb_td_normalprint_out(logfile):
"""This original unit test prints a DFT-specific version of the excitation
eigenvectors, which we do not parse.
"""
assert hasattr(logfile.data, "etenergies")
assert not hasattr(logfile.data, "etsecs")
assert hasattr(logfile.data, "etsyms")
assert hasattr(logfile.data, "etoscs")
assert logfile.data.metadata["package_version"] == "2015.0+d34efb170c481236ad60c789dea90a4c857c6bab"
def testDALTON_DALTON_2015_stopiter_dalton_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 8
assert logfile.data.metadata["package_version"] == "2015.0+d34efb170c481236ad60c789dea90a4c857c6bab"
def testDALTON_DALTON_2015_stopiter_dalton_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 5
assert logfile.data.metadata["package_version"] == "2015.0+d34efb170c481236ad60c789dea90a4c857c6bab"
def testDALTON_DALTON_2016_huge_neg_polar_freq_out(logfile):
"""This is an example of a multiple frequency-dependent polarizability
calculation.
"""
assert hasattr(logfile.data, "polarizabilities")
assert len(logfile.data.polarizabilities) == 3
assert abs(logfile.data.polarizabilities[2][0, 0] - 183.6308) < 1.0e-5
assert logfile.data.metadata["legacy_package_version"] == "2016.2"
assert logfile.data.metadata["package_version"] == "2016.2+7db4647eac203e51aae7da3cbc289f55146b30e9"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testDALTON_DALTON_2016_huge_neg_polar_stat_out(logfile):
"""This logfile didn't parse due to lack of spacing between
polarizability tensor elements.
"""
assert hasattr(logfile.data, "polarizabilities")
assert len(logfile.data.polarizabilities) == 1
assert abs(logfile.data.polarizabilities[0][1, 1] + 7220.150408) < 1.0e-7
assert logfile.data.metadata["package_version"] == "2016.2+7db4647eac203e51aae7da3cbc289f55146b30e9"
def testDALTON_DALTON_2016_Trp_polar_response_diplnx_out(logfile):
"""Check that only the xx component of polarizability is defined and
all others are NaN even after parsing a previous file with full tensor.
"""
full_tens_path = os.path.join(__regression_dir__, "DALTON/DALTON-2015/Trp_polar_response.out")
DALTON(full_tens_path, loglevel=logging.ERROR).parse()
assert hasattr(logfile.data, "polarizabilities")
assert abs(logfile.data.polarizabilities[0][0, 0] - 95.11540019) < 1.0e-8
assert numpy.count_nonzero(numpy.isnan(logfile.data.polarizabilities)) == 8
assert logfile.data.metadata["package_version"] == "2016.2+7db4647eac203e51aae7da3cbc289f55146b30e9"
def testDALTON_DALTON_2018_dft_properties_nosym_H2O_cc_pVDZ_out(logfile):
"""The "simple" version string in newer development versions of DALTON wasn't
being parsed properly.
This file is in DALTON-2018, rather than DALTON-2019, because 2018.0 was
just released.
"""
assert logfile.data.metadata["legacy_package_version"] == "2019.alpha"
assert logfile.data.metadata["package_version"] == "2019.alpha"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testDALTON_DALTON_2018_tdhf_2000_out(logfile):
"""Ensure etsecs are being parsed from a TDHF calculation without symmetry and
a big print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 9
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), -0.9733558768]
assert logfile.data.metadata["legacy_package_version"] == "2019.alpha"
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testDALTON_DALTON_2018_tdhf_2000_sym_out(logfile):
"""Ensure etsecs are being parsed from a TDHF calculation with symmetry and a
big print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 3
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9733562358]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdhf_normal_out(logfile):
"""Ensure etsecs are being parsed from a TDHF calculation without symmetry and
a normal print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 9
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), -0.9733558768]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdhf_normal_sym_out(logfile):
"""Ensure etsecs are being parsed from a TDHF calculation with symmetry and a
normal print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 3
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9733562358]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdpbe_2000_out(logfile):
"""Ensure etsecs are being parsed from a TDDFT calculation without symmetry
and a big print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 9
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9992665559]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdpbe_2000_sym_out(logfile):
"""Ensure etsecs are being parsed from a TDDFT calculation with symmetry and a
big print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 3
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9992672154]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdpbe_normal_out(logfile):
"""Ensure etsecs are being parsed from a TDDFT calculation without symmetry
and a normal print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 9
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9992665559]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
def testDALTON_DALTON_2018_tdpbe_normal_sym_out(logfile):
"""Ensure etsecs are being parsed from a TDDFT calculation with symmetry and a
normal print level.
"""
assert hasattr(logfile.data, "etsecs")
for attr in ("etenergies", "etsecs", "etsyms", "etoscs"):
assert len(getattr(logfile.data, attr)) == 3
assert logfile.data.etsecs[0][0] == [(1, 0), (2, 0), 0.9992672154]
assert logfile.data.metadata["package_version"] == "2019.alpha+25947a3d842ee2ebb42bff87a4dd64adbbd3ec5b"
# Firefly #
def testGAMESS_Firefly8_0_dvb_gopt_a_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "8.0.1"
assert logfile.data.metadata["package_version"] == "8.0.1+8540"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_Firefly8_0_h2o_log(logfile):
"""Check that molecular orbitals are parsed correctly (cclib/cclib#208)."""
assert logfile.data.mocoeffs[0][0][0] == -0.994216
assert logfile.data.metadata["legacy_package_version"] == "8.0.0"
assert logfile.data.metadata["package_version"] == "8.0.0+7651"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_Firefly8_0_stopiter_firefly_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 6
assert logfile.data.metadata["package_version"] == "8.0.1+8540"
def testGAMESS_Firefly8_1_benzene_am1_log(logfile):
"""Molecular orbitals were not parsed (cclib/cclib#228)."""
assert hasattr(logfile.data, 'mocoeffs')
assert logfile.data.metadata["legacy_package_version"] == "8.1.0"
assert logfile.data.metadata["package_version"] == "8.1.0+9035"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_Firefly8_1_naphtalene_t_0_out(logfile):
"""Molecular orbitals were not parsed (cclib/cclib#228)."""
assert hasattr(logfile.data, 'mocoeffs')
assert logfile.data.metadata["legacy_package_version"] == "8.1.1"
assert logfile.data.metadata["package_version"] == "8.1.1+9295"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_Firefly8_1_naphtalene_t_0_SP_out(logfile):
"""Molecular orbitals were not parsed (cclib/cclib#228)."""
assert hasattr(logfile.data, 'mocoeffs')
assert logfile.data.metadata["package_version"] == "8.1.1+9295"
# GAMESS #
def testGAMESS_GAMESS_US2008_N2_UMP2_out(logfile):
"""Check that the new format for GAMESS MP2 is parsed."""
assert hasattr(logfile.data, "mpenergies")
assert len(logfile.data.mpenergies) == 1
assert abs(logfile.data.mpenergies[0] + 2975.97) < 0.01
assert logfile.data.metadata["legacy_package_version"] == "2008R1"
assert logfile.data.metadata["package_version"] == "2008.r1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2008_N2_ROMP2_out(logfile):
"""Check that the new format for GAMESS MP2 is parsed."""
assert hasattr(logfile.data, "mpenergies")
assert len(logfile.data.mpenergies) == 1
assert abs(logfile.data.mpenergies[0] + 2975.97) < 0.01
assert logfile.data.metadata["package_version"] == "2008.r1"
def testGAMESS_GAMESS_US2009_open_shell_ccsd_test_log(logfile):
"""Parse ccenergies from open shell CCSD calculations."""
assert hasattr(logfile.data, "ccenergies")
assert len(logfile.data.ccenergies) == 1
assert abs(logfile.data.ccenergies[0] + 3501.50) < 0.01
assert logfile.data.metadata["legacy_package_version"] == "2009R3"
assert logfile.data.metadata["package_version"] == "2009.r3"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2009_paulo_h2o_mp2_out(logfile):
"""Check that the new format for GAMESS MP2 is parsed."""
assert hasattr(logfile.data, "mpenergies")
assert len(logfile.data.mpenergies) == 1
assert abs(logfile.data.mpenergies[0] + 2072.13) < 0.01
assert logfile.data.metadata["package_version"] == "2009.r3"
def testGAMESS_GAMESS_US2012_dvb_gopt_a_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "2012R2"
assert logfile.data.metadata["package_version"] == "2012.r2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2012_stopiter_gamess_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 10
assert logfile.data.metadata["package_version"] == "2012.r1"
def testGAMESS_GAMESS_US2013_N_UHF_out(logfile):
"""An UHF job that has an LZ value analysis between the alpha and beta orbitals."""
assert len(logfile.data.moenergies) == 2
assert logfile.data.metadata["legacy_package_version"] == "2013R1"
assert logfile.data.metadata["package_version"] == "2013.r1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2014_CdtetraM1B3LYP_log(logfile):
"""This logfile had coefficients for only 80 molecular orbitals."""
assert len(logfile.data.mocoeffs) == 2
assert numpy.count_nonzero(logfile.data.mocoeffs[0][79-1:, :]) == 258
assert numpy.count_nonzero(logfile.data.mocoeffs[0][80-1: 0:]) == 0
assert logfile.data.mocoeffs[0].all() == logfile.data.mocoeffs[1].all()
assert logfile.data.metadata["legacy_package_version"] == "2014R1"
assert logfile.data.metadata["package_version"] == "2014.r1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2018_exam45_log(logfile):
"""This logfile has EOM-CC electronic transitions (not currently supported)."""
assert not hasattr(logfile.data, 'etenergies')
assert logfile.data.metadata["legacy_package_version"] == "2018R2"
assert logfile.data.metadata["package_version"] == "2018.r2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_GAMESS_US2018_exam46_log(logfile):
"""
This logfile has >100 scf iterations, which used to cause
a parsing error.
"""
assert len(logfile.data.scfvalues[0]) == 113
assert logfile.data.metadata["legacy_package_version"] == "2018R3"
assert logfile.data.metadata["package_version"] == "2018.r3"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_WinGAMESS_dvb_td_trplet_2007_03_24_r1_out(logfile):
"""Do some basic checks for this old unit test that was failing.
The unit tests are not run automatically on this old unit logfile,
because we know the output has etsecs whose sum is way off.
So, perform a subset of the basic assertions for GenericTDTesttrp.
"""
number = 5
assert len(logfile.data.etenergies) == number
idx_lambdamax = [i for i, x in enumerate(logfile.data.etoscs) if x == max(logfile.data.etoscs)][0]
assert abs(logfile.data.etenergies[idx_lambdamax] - 24500) < 100
assert len(logfile.data.etoscs) == number
assert abs(max(logfile.data.etoscs) - 0.0) < 0.01
assert len(logfile.data.etsecs) == number
assert logfile.data.metadata["legacy_package_version"] == "2007R1"
assert logfile.data.metadata["package_version"] == "2007.r1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testnoparseGAMESS_WinGAMESS_H2O_def2SVPD_triplet_2019_06_30_R1_out(logfile):
"""Check if the molden writer can handle an unrestricted case
"""
data = ccread(os.path.join(__filedir__,logfile))
writer = moldenwriter.MOLDEN(data)
# Check size of Atoms section.
assert len(writer._mo_from_ccdata()) == (data.nbasis + 4) * (data.nmo * 2)
# check docc orbital
beta_idx = (data.nbasis + 4) * data.nmo
assert "Beta" in writer._mo_from_ccdata()[beta_idx + 2]
assert "Occup= 1.000000" in writer._mo_from_ccdata()[beta_idx + 3]
assert "0.989063" in writer._mo_from_ccdata()[beta_idx + 4]
# GAMESS-UK #
def testGAMESS_UK_GAMESS_UK8_0_dvb_gopt_hf_unconverged_out(logfile):
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "8.0"
assert logfile.data.metadata["package_version"] == "8.0+6248"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGAMESS_UK_GAMESS_UK8_0_stopiter_gamessuk_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 7
assert logfile.data.metadata["package_version"] == "8.0+6248"
def testGAMESS_UK_GAMESS_UK8_0_stopiter_gamessuk_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 5
assert logfile.data.metadata["package_version"] == "8.0+6248"
# Gaussian #
def testGaussian_Gaussian98_C_bigmult_log(logfile):
"""
This file failed first becuase it had a double digit multiplicity.
Then it failed because it had no alpha virtual orbitals.
"""
assert logfile.data.charge == -3
assert logfile.data.mult == 10
assert logfile.data.homos[0] == 8
assert logfile.data.homos[1] == -1 # No occupied beta orbitals
assert logfile.data.metadata["legacy_package_version"] == "98revisionA.11.3"
assert logfile.data.metadata["package_version"] == "1998+A.11.3"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian98_NIST_CCCBDB_1himidaz_m21b0_out(logfile):
"""A G3 computation is a sequence of jobs."""
# All steps deal with the same molecule, so we extract the coordinates
# from all steps.
assert len(logfile.data.atomcoords) == 10
# Different G3 steps do perturbation to different orders, and so
# we expect only the last MP2 energy to be extracted.
assert len(logfile.data.mpenergies) == 1
assert logfile.data.metadata["legacy_package_version"] == "98revisionA.7"
assert logfile.data.metadata["package_version"] == "1998+A.7"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian98_NIST_CCCBDB_1himidaz_m23b6_out(logfile):
"""A job that was killed before it ended, should have several basic attributes parsed."""
assert hasattr(logfile.data, 'charge')
assert hasattr(logfile.data, 'metadata')
assert hasattr(logfile.data, 'mult')
assert logfile.data.metadata["package_version"] == "1998+A.7"
def testGaussian_Gaussian98_test_Cu2_log(logfile):
"""An example of the number of basis set function changing."""
assert logfile.data.nbasis == 38
assert logfile.data.metadata["legacy_package_version"] == "98revisionA.11.4"
assert logfile.data.metadata["package_version"] == "1998+A.11.4"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian98_test_H2_log(logfile):
"""
The atomic charges from a natural population analysis were
not parsed correctly, and they should be zero for dihydrogen.
"""
assert logfile.data.atomcharges['natural'][0] == 0.0
assert logfile.data.atomcharges['natural'][1] == 0.0
assert logfile.data.metadata["package_version"] == "1998+A.11.4"
def testGaussian_Gaussian98_water_zmatrix_nosym_log(logfile):
"""This file is missing natom.
This file had no atomcoords as it did not contain either an
"Input orientation" or "Standard orientation section".
As a result it failed to parse. Fixed in r400.
"""
assert len(logfile.data.atomcoords) == 1
assert logfile.data.natom == 3
assert logfile.data.metadata["package_version"] == "1998+A.11.3"
def testGaussian_Gaussian03_AM1_SP_out(logfile):
"""Previously, caused scfvalue parsing to fail."""
assert len(logfile.data.scfvalues[0]) == 13
assert logfile.data.metadata["legacy_package_version"] == "03revisionE.01"
assert logfile.data.metadata["package_version"] == "2003+E.01"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian03_anthracene_log(logfile):
"""This file exposed a bug in extracting the vibsyms."""
assert len(logfile.data.vibsyms) == len(logfile.data.vibfreqs)
assert logfile.data.metadata["legacy_package_version"] == "03revisionC.02"
assert logfile.data.metadata["package_version"] == "2003+C.02"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian03_borane_opt_log(logfile):
"""An example of changing molecular orbital count."""
assert logfile.data.optstatus[-1] == logfile.data.OPT_DONE
assert logfile.data.nmo == 609
assert logfile.data.metadata["package_version"] == "2003+E.01"
def testGaussian_Gaussian03_chn1_log(logfile):
"""
This file failed to parse, due to the use of 'pop=regular'.
We have decided that mocoeffs should not be defined for such calculations.
"""
assert not hasattr(logfile.data, "mocoeffs")
assert logfile.data.metadata["legacy_package_version"] == "03revisionB.04"
assert logfile.data.metadata["package_version"] == "2003+B.04"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian03_cyclopropenyl_rhf_g03_cut_log(logfile):
"""
Not using symmetry at all (option nosymm) means standard orientation
is not printed. In this case inputcoords are copied by the parser,
which up till now stored the last coordinates.
"""
assert len(logfile.data.atomcoords) == len(logfile.data.geovalues)
assert logfile.data.metadata["package_version"] == "2003+C.02"
def testGaussian_Gaussian03_DCV4T_C60_log(logfile):
"""This is a test for a very large Gaussian file with > 99 atoms.
The log file is too big, so we are just including the start.
Previously, parsing failed in the pseudopotential section.
"""
assert len(logfile.data.coreelectrons) == 102
assert logfile.data.coreelectrons[101] == 2
assert logfile.data.metadata["legacy_package_version"] == "03revisionD.02"
assert logfile.data.metadata["package_version"] == "2003+D.02"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian03_dvb_gopt_symmfollow_log(logfile):
"""Non-standard treatment of symmetry.
In this case the Standard orientation is also printed non-standard,
which caused only the first coordinates to be read previously.
"""
assert len(logfile.data.atomcoords) == len(logfile.data.geovalues)
assert logfile.data.metadata["legacy_package_version"] == "03revisionC.01"
assert logfile.data.metadata["package_version"] == "2003+C.01"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian03_mendes_out(logfile):
"""Previously, failed to extract coreelectrons."""
centers = [9, 10, 11, 27]
for i, x in enumerate(logfile.data.coreelectrons):
if i in centers:
assert x == 10
else:
assert x == 0
assert logfile.data.metadata["package_version"] == "2003+C.02"
def testGaussian_Gaussian03_Mo4OSibdt2_opt_log(logfile):
"""
This file had no atomcoords as it did not contain any
"Input orientation" sections, only "Standard orientation".
"""
assert logfile.data.optstatus[-1] == logfile.data.OPT_DONE
assert hasattr(logfile.data, "atomcoords")
assert logfile.data.metadata["package_version"] == "2003+C.02"
def testGaussian_Gaussian03_orbgs_log(logfile):
"""Check that the pseudopotential is being parsed correctly."""
assert hasattr(logfile.data, "coreelectrons"), "Missing coreelectrons"
assert logfile.data.coreelectrons[0] == 28
assert logfile.data.coreelectrons[15] == 10
assert logfile.data.coreelectrons[20] == 10
assert logfile.data.coreelectrons[23] == 10
assert logfile.data.metadata["package_version"] == "2003+C.02"
def testGaussian_Gaussian09_100_g09(logfile):
"""Check that the final system is the one parsed (cclib/cclib#243)."""
assert logfile.data.natom == 54
assert logfile.data.homos == [104]
assert logfile.data.metadata["legacy_package_version"] == "09revisionB.01"
assert logfile.data.metadata["package_version"] == "2009+B.01"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian09_25DMF_HRANH_log(logfile):
"""Check that the anharmonicities are being parsed correctly."""
assert hasattr(logfile.data, "vibanharms"), "Missing vibanharms"
anharms = logfile.data.vibanharms
N = len(logfile.data.vibfreqs)
assert 39 == N == anharms.shape[0] == anharms.shape[1]
assert abs(anharms[0][0] + 43.341) < 0.01
assert abs(anharms[N-1][N-1] + 36.481) < 0.01
assert logfile.data.metadata["package_version"] == "2009+B.01"
def testGaussian_Gaussian09_2D_PES_all_converged_log(logfile):
"""Check that optstatus has no UNCOVERGED values."""
assert ccData.OPT_UNCONVERGED not in logfile.data.optstatus
assert logfile.data.metadata["legacy_package_version"] == "09revisionD.01"
assert logfile.data.metadata["package_version"] == "2009+D.01"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
# The energies printed in the scan summary are misformated.
assert numpy.all(numpy.isnan(logfile.data.scanenergies))
def testGaussian_Gaussian09_2D_PES_one_unconverged_log(logfile):
"""Check that optstatus contains UNCOVERGED values."""
assert ccData.OPT_UNCONVERGED in logfile.data.optstatus
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_534_out(logfile):
"""Previously, caused etenergies parsing to fail."""
assert logfile.data.etsyms[0] == "Singlet-?Sym"
assert abs(logfile.data.etenergies[0] - 20920.55328) < 1.0
assert logfile.data.metadata["legacy_package_version"] == "09revisionA.02"
assert logfile.data.metadata["package_version"] == "2009+A.02"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian09_BSL_opt_freq_DFT_out(logfile):
"""Failed for converting to CJSON when moments weren't parsed for
Gaussian.
"""
assert hasattr(logfile.data, 'moments')
# dipole Y
assert logfile.data.moments[1][1] == 0.5009
# hexadecapole ZZZZ
assert logfile.data.moments[4][-1] == -77.9600
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_dvb_gopt_unconverged_log(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.optstatus[-1] == logfile.data.OPT_UNCONVERGED
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_dvb_lowdin_log(logfile):
"""Check if both Mulliken and Lowdin charges are parsed."""
assert "mulliken" in logfile.data.atomcharges
assert "lowdin" in logfile.data.atomcharges
assert logfile.data.metadata["package_version"] == "2009+A.02"
def testGaussian_Gaussian09_Dahlgren_TS_log(logfile):
"""Failed to parse ccenergies for a variety of reasons"""
assert hasattr(logfile.data, "ccenergies")
assert abs(logfile.data.ccenergies[0] - (-11819.96506609)) < 0.001
assert logfile.data.metadata["package_version"] == "2009+A.02"
def testGaussian_Gaussian09_irc_point_log(logfile):
"""Failed to parse vibfreqs except for 10, 11"""
assert hasattr(logfile.data, "vibfreqs")
assert len(logfile.data.vibfreqs) == 11
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_issue_460_log(logfile):
"""Lots of malformed lines when parsing for scfvalues:
RMSDP=3.79D-04 MaxDP=4.02D-02 OVMax= 4.31D-02
RMSDP=1.43D-06 MaxDP=5.44D-04 DE=-6.21D-07 OVMax= 5.76D-04
RMSDP=2.06D-05 MaxDP=3.84D-03 DE= 4.82D-04 O E= -2574.14897924075 Delta-E= 0.000439804468 Rises=F Damp=F
RMSDP=8.64D-09 MaxDP=2.65D-06 DE=-1.67D-10 OVMax= 3. E= -2574.14837678675 Delta-E= -0.000000179038 Rises=F Damp=F
RMSDP= E= -2574.14931865182 Delta-E= -0.000000019540 Rises=F Damp=F
RMSDP=9.34D- E= -2574.14837612206 Delta-E= -0.000000620705 Rises=F Damp=F
RMSDP=7.18D-05 Max E= -2574.14797761904 Delta-E= -0.000000000397 Rises=F Damp=F
RMSDP=1.85D-06 MaxD E= -2574.14770506975 Delta-E= -0.042173156160 Rises=F Damp=F
RMSDP=1.69D-06 MaxDP= E= -2574.14801776548 Delta-E= 0.000023521317 Rises=F Damp=F
RMSDP=3.80D-08 MaxDP=1 E= -2574.14856570920 Delta-E= -0.000002960194 Rises=F Damp=F
RMSDP=4.47D-09 MaxDP=1.40 E= -2574.14915435699 Delta-E= -0.000255709558 Rises=F Damp=F
RMSDP=5.54D-08 MaxDP=1.55D-05 DE=-2.55D-0 E= -2574.14854319757 Delta-E= -0.000929740010 Rises=F Damp=F
RMSDP=7.20D-09 MaxDP=1.75D-06 DE=- (Enter /QFsoft/applic/GAUSSIAN/g09d.01_pgi11.9-ISTANBUL/g09/l703.exe)
RMSDP=5.24D-09 MaxDP=1.47D-06 DE=-1.82D-11 OVMax= 2.15 (Enter /QFsoft/applic/GAUSSIAN/g09d.01_pgi11.9-ISTANBUL/g09/l703.exe)
RMSDP=1.71D-04 MaxDP=1.54D-02 Iteration 2 A^-1*A deviation from unit magnitude is 1.11D-15 for 266.
"""
assert hasattr(logfile.data, 'scfvalues')
assert logfile.data.scfvalues[0][0, 0] == 3.37e-03
assert numpy.isnan(logfile.data.scfvalues[0][0, 2])
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_OPT_td_g09_out(logfile):
"""Couldn't find etrotats as G09 has different output than G03."""
assert len(logfile.data.etrotats) == 10
assert logfile.data.etrotats[0] == -0.4568
assert logfile.data.metadata["package_version"] == "2009+A.02"
def testGaussian_Gaussian09_OPT_td_out(logfile):
"""Working fine - adding to ensure that CD is parsed correctly."""
assert len(logfile.data.etrotats) == 10
assert logfile.data.etrotats[0] == -0.4568
assert logfile.data.metadata["package_version"] == "2003+B.05"
def testGaussian_Gaussian09_OPT_oniom_log(logfile):
"""AO basis extraction broke with ONIOM"""
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_oniom_IR_intensity_log(logfile):
"""Problem parsing IR intensity from mode 192"""
assert hasattr(logfile.data, 'vibirs')
assert len(logfile.data.vibirs) == 216
assert logfile.data.metadata["package_version"] == "2009+C.01"
def testGaussian_Gaussian09_Ru2bpyen2_H2_freq3_log(logfile):
"""Here atomnos wans't added to the gaussian parser before."""
assert len(logfile.data.atomnos) == 69
assert logfile.data.metadata["package_version"] == "2009+A.02"
def testGaussian_Gaussian09_benzene_HPfreq_log(logfile):
"""Check that higher precision vib displacements obtained with freq=hpmodes) are parsed correctly."""
assert abs(logfile.data.vibdisps[0,0,2] - (-0.04497)) < 0.00001
assert logfile.data.metadata["package_version"] == "2009+C.01"
def testGaussian_Gaussian09_benzene_freq_log(logfile):
"""Check that default precision vib displacements are parsed correctly."""
assert abs(logfile.data.vibdisps[0,0,2] - (-0.04)) < 0.00001
assert logfile.data.metadata["package_version"] == "2009+C.01"
def testGaussian_Gaussian09_relaxed_PES_testH2_log(logfile):
"""Check that all optimizations converge in a single step."""
atomcoords = logfile.data.atomcoords
optstatus = logfile.data.optstatus
assert len(optstatus) == len(atomcoords)
assert all(s == ccData.OPT_DONE + ccData.OPT_NEW for s in optstatus)
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_relaxed_PES_testCO2_log(logfile):
"""A relaxed PES scan with some uncoverged and some converged runs."""
atomcoords = logfile.data.atomcoords
optstatus = logfile.data.optstatus
assert len(optstatus) == len(atomcoords)
new_points = numpy.where(optstatus & ccData.OPT_NEW)[0]
# The first new point is just the beginning of the scan.
assert new_points[0] == 0
# The next two new points are at the end of unconverged runs.
assert optstatus[new_points[1]-1] == ccData.OPT_UNCONVERGED
assert all(optstatus[i] == ccData.OPT_UNKNOWN for i in range(new_points[0]+1, new_points[1]-1))
assert optstatus[new_points[2]-1] == ccData.OPT_UNCONVERGED
assert all(optstatus[i] == ccData.OPT_UNKNOWN for i in range(new_points[1]+1, new_points[2]-1))
# The next new point is after a convergence.
assert optstatus[new_points[3]-1] == ccData.OPT_DONE
assert all(optstatus[i] == ccData.OPT_UNKNOWN for i in range(new_points[2]+1, new_points[3]-1))
# All subsequent point are both new and converged, since they seem
# to have converged in a single step.
assert all(s == ccData.OPT_DONE + ccData.OPT_NEW for s in optstatus[new_points[3]:])
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_stopiter_gaussian_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 4
assert logfile.data.metadata["package_version"] == "2009+D.01"
def testGaussian_Gaussian09_benzene_excited_states_optimization_issue889_log(logfile):
"""Check that only converged geometry excited states properties are reported."""
assert logfile.data.etdips.shape == (20,3)
assert len(logfile.data.etenergies) == 20
assert logfile.data.etmagdips.shape == (20,3)
assert len(logfile.data.etoscs) == 20
assert len(logfile.data.etrotats) == 20
assert len(logfile.data.etsecs) == 20
assert logfile.data.etveldips.shape == (20,3)
def testGaussian_Gaussian16_naturalspinorbitals_parsing_log(logfile):
"""A UHF calculation with natural spin orbitals."""
assert isinstance(logfile.data.nsocoeffs, list)
assert isinstance(logfile.data.nsocoeffs[0], numpy.ndarray)
assert isinstance(logfile.data.nsocoeffs[1], numpy.ndarray)
assert isinstance(logfile.data.nsooccnos, list)
assert isinstance(logfile.data.nsooccnos[0], list)
assert isinstance(logfile.data.nsooccnos[1], list)
assert isinstance(logfile.data.aonames,list)
assert isinstance(logfile.data.atombasis,list)
assert numpy.shape(logfile.data.nsocoeffs) == (2,logfile.data.nmo,logfile.data.nmo)
assert len(logfile.data.nsooccnos[0]) == logfile.data.nmo
assert len(logfile.data.nsooccnos[1]) == logfile.data.nmo
assert len(logfile.data.aonames) == logfile.data.nbasis
assert len(numpy.ravel(logfile.data.atombasis)) == logfile.data.nbasis
assert logfile.data.nsooccnos[0][14] == 0.00506
assert logfile.data.nsooccnos[1][14] == 0.00318
assert logfile.data.nsocoeffs[0][14,12] == 0.00618
assert logfile.data.nsocoeffs[1][14,9] == 0.79289
assert logfile.data.aonames[41] == 'O2_9D 0'
assert logfile.data.atombasis[1][0] == 23
assert logfile.data.metadata["legacy_package_version"] == "16revisionA.03"
assert logfile.data.metadata["package_version"] == "2016+A.03"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testGaussian_Gaussian16_issue851_log(logfile):
"""Surface scan from cclib/cclib#851 where attributes were not lists."""
assert isinstance(logfile.data.scannames, list)
assert isinstance(logfile.data.scanparm, list)
assert isinstance(logfile.data.scanenergies, list)
def testGaussian_Gaussian16_issue962_log(logfile):
"""For issue 962, this shouldn't have scftargets but should parse fully"""
assert not hasattr(logfile.data, "scftargets")
# Jaguar #
# It would be good to have an unconverged geometry optimization so that
# we can test that optdone is set properly.
#def testJaguarX.X_dvb_gopt_unconverged:
# assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
def testJaguar_Jaguar8_3_stopiter_jaguar_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 4
assert logfile.data.metadata["legacy_package_version"] == "8.3"
assert logfile.data.metadata["package_version"] == "8.3+13"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testJaguar_Jaguar8_3_stopiter_jaguar_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 3
assert logfile.data.metadata["package_version"] == "8.3+13"
# Molcas #
def testMolcas_Molcas18_test_standard_000_out(logfile):
"""Don't support parsing MOs for multiple symmetry species."""
assert not hasattr(logfile.data, "moenergies")
assert not hasattr(logfile.data, "mocoeffs")
assert logfile.data.metadata["legacy_package_version"] == "18.09"
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testMolcas_Molcas18_test_standard_001_out(logfile):
"""This logfile has two calculations, and we currently only want to parse the first."""
assert logfile.data.natom == 8
# There are also four symmetry species, and orbital count should cover all of them.
assert logfile.data.nbasis == 30
assert logfile.data.nmo == 30
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
def testMolcas_Molcas18_test_standard_003_out(logfile):
"""This logfile has extra charged monopoles (not part of the molecule)."""
assert logfile.data.charge == 0
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
def testMolcas_Molcas18_test_standard_005_out(logfile):
"""Final geometry in optimization has fewer atoms due to symmetry, and so is ignored."""
assert len(logfile.data.atomcoords) == 2
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
def testMolcas_Molcas18_test_stevenv_001_out(logfile):
"""Don't support parsing MOs for RAS (active space)."""
assert not hasattr(logfile.data, "moenergies")
assert not hasattr(logfile.data, "mocoeffs")
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
def testMolcas_Molcas18_test_stevenv_desym_out(logfile):
"""This logfile has iterations interrupted by a Fermi aufbau procedure."""
assert len(logfile.data.scfvalues) == 1
assert len(logfile.data.scfvalues[0]) == 26
assert logfile.data.metadata["package_version"] == "18.09+52-ge15dc38.81d3fb3dc6a5c5df6b3791ef1ef3790f"
# Molpro #
def testMolpro_Molpro2008_ch2o_molpro_casscf_out(logfile):
"""A CASSCF job with symmetry and natural orbitals."""
# The last two atoms are equivalent, so the last ends up having no
# functions asigned. This is not obvious, because the functions are
# distributed between the last two atoms in the block where gbasis
# is parsed, but it seems all are assigned to the penultimate atom later.
assert logfile.data.atombasis[-1] == []
assert len(logfile.data.aonames) == logfile.data.nbasis
# The MO coefficients are printed in several block, each corresponding
# to one irrep, so make sure we have reconstructed the coefficients correctly.
assert len(logfile.data.moenergies) == 1
assert logfile.data.moenergies[0].shape == (logfile.data.nmo, )
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (logfile.data.nmo, logfile.data.nbasis)
# These coefficients should be zero due to symmetry.
assert logfile.data.mocoeffs[0][-2][0] == 0.0
assert logfile.data.mocoeffs[0][0][-2] == 0.0
assert isinstance(logfile.data.nocoeffs, numpy.ndarray)
assert isinstance(logfile.data.nooccnos, numpy.ndarray)
assert logfile.data.nocoeffs.shape == logfile.data.mocoeffs[0].shape
assert len(logfile.data.nooccnos) == logfile.data.nmo
assert logfile.data.nooccnos[27] == 1.95640
assert logfile.data.metadata["legacy_package_version"] == "2012.1"
assert logfile.data.metadata["package_version"] == "2012.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testMolpro_Molpro2012_CHONHSH_HF_STO_3G_out(logfile):
"""Formatting of the basis function is slightly different than expected."""
assert len(logfile.data.gbasis) == 7
assert len(logfile.data.gbasis[0]) == 3 # C
assert len(logfile.data.gbasis[1]) == 3 # N
assert len(logfile.data.gbasis[2]) == 3 # O
assert len(logfile.data.gbasis[3]) == 5 # S
assert len(logfile.data.gbasis[4]) == 1 # H
assert len(logfile.data.gbasis[5]) == 1 # H
assert len(logfile.data.gbasis[6]) == 1 # H
assert logfile.data.metadata["legacy_package_version"] == "2012.1"
assert logfile.data.metadata["package_version"] == "2012.1.23+f8cfea266908527a8826bdcd5983aaf62e47d3bf"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testMolpro_Molpro2012_dvb_gopt_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "2012.1"
assert logfile.data.metadata["package_version"] == "2012.1.12+e112a8ab93d81616c1987a1f1ef3707d874b6803"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testMolpro_Molpro2012_stopiter_molpro_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 6
assert logfile.data.metadata["legacy_package_version"] == "2012.1"
assert logfile.data.metadata["package_version"] == "2012.1+c18f7d37f9f045f75d4f3096db241dde02ddca0a"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testMolpro_Molpro2012_stopiter_molpro_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 6
assert logfile.data.metadata["package_version"] == "2012.1+c18f7d37f9f045f75d4f3096db241dde02ddca0a"
# MOPAC #
def testMOPAC_MOPAC2016_9S3_uuu_Cs_cation_freq_PM7_out(logfile):
"""There was a syntax error in the frequency parsing."""
assert hasattr(logfile.data, 'vibfreqs')
assert logfile.data.metadata["legacy_package_version"] == "2016"
assert logfile.data.metadata["package_version"] == "16.175"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
# NWChem #
def testNWChem_NWChem6_0_dvb_gopt_hf_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "6.0"
assert logfile.data.metadata["package_version"] == "6.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testNWChem_NWChem6_0_dvb_sp_hf_moments_only_quadrupole_out(logfile):
"""Quadrupole moments are printed/parsed, but not lower moments (no shape)."""
assert hasattr(logfile.data, 'moments') and len(logfile.data.moments) == 3
assert len(logfile.data.moments[0]) == 3
assert not logfile.data.moments[1].shape
assert len(logfile.data.moments[2]) == 6
assert logfile.data.metadata["package_version"] == "6.0"
def testNWChem_NWChem6_0_dvb_sp_hf_moments_only_octupole_out(logfile):
"""Quadrupole moments are printed/parsed, but not lower moments (no shape)."""
assert hasattr(logfile.data, 'moments') and len(logfile.data.moments) == 4
assert len(logfile.data.moments[0]) == 3
assert not logfile.data.moments[1].shape
assert not logfile.data.moments[2].shape
assert len(logfile.data.moments[3]) == 10
assert logfile.data.metadata["package_version"] == "6.0"
def testNWChem_NWChem6_0_hydrogen_atom_ROHF_cc_pVDZ_out(logfile):
"""A lone hydrogen atom is a common edge case; it has no beta
electrons.
"""
assert logfile.data.charge == 0
assert logfile.data.natom == 1
assert logfile.data.nbasis == 5
assert logfile.data.nmo == 5
assert len(logfile.data.moenergies) == 1
assert logfile.data.moenergies[0].shape == (5,)
assert logfile.data.homos.shape == (2,)
assert logfile.data.homos[0] == 0
assert logfile.data.homos[1] == -1
assert logfile.data.metadata["package_version"] == "6.0"
def testNWChem_NWChem6_0_hydrogen_atom_UHF_cc_pVDZ_out(logfile):
"""A lone hydrogen atom is a common edge case; it has no beta
electrons.
Additionally, this calculations has no title, which caused some
issues with skip_lines().
"""
assert logfile.data.charge == 0
assert logfile.data.natom == 1
assert logfile.data.nbasis == 5
assert logfile.data.nmo == 5
assert len(logfile.data.moenergies) == 2
assert logfile.data.moenergies[0].shape == (5,)
assert logfile.data.moenergies[1].shape == (5,)
assert logfile.data.homos.shape == (2,)
assert logfile.data.homos[0] == 0
assert logfile.data.homos[1] == -1
assert logfile.data.metadata["package_version"] == "6.0"
def testNWChem_NWChem6_5_stopiter_nwchem_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 3
assert logfile.data.metadata["legacy_package_version"] == "6.5"
assert logfile.data.metadata["package_version"] == "6.5+26243"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testNWChem_NWChem6_5_stopiter_nwchem_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 2
assert logfile.data.metadata["package_version"] == "6.5+26243"
def testNWChem_NWChem6_8_526_out(logfile):
"""If `print low` is present in the input, SCF iterations are not
printed.
"""
assert not hasattr(logfile.data, "scftargets")
assert not hasattr(logfile.data, "scfvalues")
assert logfile.data.metadata["legacy_package_version"] == "6.8.1"
assert logfile.data.metadata["package_version"] == "6.8.1+g08bf49b"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
# ORCA #
def testORCA_ORCA2_8_co_cosmo_out(logfile):
"""This is related to bug 3184890.
The scfenergies were not being parsed correctly for this geometry
optimization run, for two reasons.
First, the printing of SCF total energies is different inside
geometry optimization steps than for single point calculations,
which also affects unit tests.
However, this logfile uses a setting that causes an SCF run to
terminate prematurely when a set maximum number of cycles is reached.
In this case, the last energy reported should probably be used,
and the number of values in scfenergies preserved.
"""
assert hasattr(logfile.data, "scfenergies") and len(logfile.data.scfenergies) == 4
assert logfile.data.metadata["legacy_package_version"] == "2.8"
assert logfile.data.metadata["package_version"] == "2.8+2287"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA2_9_job_out(logfile):
"""First output file and request to parse atomic spin densities.
Make sure that the sum of such densities is one in this case (or reasonaby close),
but remember that this attribute is a dictionary, so we must iterate.
"""
assert all([abs(sum(v)-1.0) < 0.0001 for k, v in logfile.data.atomspins.items()])
assert logfile.data.metadata["legacy_package_version"] == "2.9.0"
assert logfile.data.metadata["package_version"] == "2.9.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA2_9_qmspeedtest_hf_out(logfile):
"""Check precision of SCF energies (cclib/cclib#210)."""
energy = logfile.data.scfenergies[-1]
expected = -17542.5188694
assert abs(energy - expected) < 10**-6
assert logfile.data.metadata["legacy_package_version"] == "2.9.1"
assert logfile.data.metadata["package_version"] == "2.9.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA3_0_chelpg_out(logfile):
"""ORCA file with chelpg charges"""
assert 'chelpg' in logfile.data.atomcharges
charges = logfile.data.atomcharges['chelpg']
assert len(charges) == 9
assert charges[0] == 0.363939
assert charges[1] == 0.025695
def testORCA_ORCA3_0_dvb_gopt_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
assert logfile.data.metadata["legacy_package_version"] == "3.0.1"
assert logfile.data.metadata["package_version"] == "3.0.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA3_0_polar_rhf_cg_out(logfile):
"""Alternative CP-SCF solver for the polarizability wasn't being detected."""
assert hasattr(logfile.data, 'polarizabilities')
assert logfile.data.metadata["legacy_package_version"] == "3.0.3"
assert logfile.data.metadata["package_version"] == "3.0.3"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA3_0_polar_rhf_diis_out(logfile):
"""Alternative CP-SCF solver for the polarizability wasn't being detected."""
assert hasattr(logfile.data, 'polarizabilities')
assert logfile.data.metadata["package_version"] == "3.0.3"
def testORCA_ORCA3_0_stopiter_orca_scf_compact_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 1
assert logfile.data.metadata["package_version"] == "3.0.1"
def testORCA_ORCA3_0_stopiter_orca_scf_large_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert len(logfile.data.scfvalues[0]) == 9
assert logfile.data.metadata["package_version"] == "2.9.1"
def testORCA_ORCA4_0_1_ttt_td_out(logfile):
"""RPA is slightly different from TDA, see #373."""
assert hasattr(logfile.data, 'etsyms')
assert len(logfile.data.etsecs) == 24
assert len(logfile.data.etsecs[0]) == 1
assert numpy.isnan(logfile.data.etsecs[0][0][2])
assert len(logfile.data.etrotats) == 24
assert logfile.data.etrotats[13] == -0.03974
assert logfile.data.metadata["legacy_package_version"] == "4.0.0"
assert logfile.data.metadata["package_version"] == "4.0.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA4_0_hydrogen_fluoride_numfreq_out(logfile):
"""Frequencies from linear molecules weren't parsed correctly (#426)."""
numpy.testing.assert_equal(logfile.data.vibfreqs, [4473.96])
def testORCA_ORCA4_0_hydrogen_fluoride_usesym_anfreq_out(logfile):
"""Frequencies from linear molecules weren't parsed correctly (#426)."""
numpy.testing.assert_equal(logfile.data.vibfreqs, [4473.89])
def testORCA_ORCA4_0_invalid_literal_for_float_out(logfile):
"""MO coefficients are glued together, see #629."""
assert hasattr(logfile.data, 'mocoeffs')
assert logfile.data.mocoeffs[0].shape == (logfile.data.nmo, logfile.data.nbasis)
# Test the coefficients from this line where things are glued together:
# 15C 6s -154.480939-111.069870-171.460819-79.052025241.536860-92.159399
assert logfile.data.mocoeffs[0][102][378] == -154.480939
assert logfile.data.mocoeffs[0][103][378] == -111.069870
assert logfile.data.mocoeffs[0][104][378] == -171.460819
assert logfile.data.mocoeffs[0][105][378] == -79.052025
assert logfile.data.mocoeffs[0][106][378] == 241.536860
assert logfile.data.mocoeffs[0][107][378] == -92.159399
assert logfile.data.metadata["legacy_package_version"] == "4.0.1.2"
assert logfile.data.metadata["package_version"] == "4.0.1.2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA4_0_IrCl6_sp_out(logfile):
"""Tests ECP and weird SCF printing."""
assert hasattr(logfile.data, 'scfvalues')
assert len(logfile.data.scfvalues) == 1
vals_first = [0.000000000000, 28.31276975, 0.71923638]
vals_last = [0.000037800796, 0.00412549, 0.00014041]
numpy.testing.assert_almost_equal(logfile.data.scfvalues[0][0], vals_first)
numpy.testing.assert_almost_equal(logfile.data.scfvalues[0][-1], vals_last)
def testORCA_ORCA4_0_comment_or_blank_line_out(logfile):
"""Coordinates with blank lines or comments weren't parsed correctly (#747)."""
assert hasattr(logfile.data,"atomcoords")
assert logfile.data.atomcoords.shape == (1, 8, 3)
assert logfile.data.metadata["legacy_package_version"] == "4.0.1.2"
assert logfile.data.metadata["package_version"] == "4.0.1.2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA4_1_725_out(logfile):
"""This file uses embedding potentials, which requires `>` after atom names in
the input file and that confuses different parts of the parser.
In #725 we decided to not include these potentials in the parsed results.
"""
assert logfile.data.natom == 7
numpy.testing.assert_equal(logfile.data.atomnos, numpy.array([20, 17, 17, 17, 17, 17, 17], dtype=int))
assert len(logfile.data.atomcharges["mulliken"]) == 7
assert len(logfile.data.atomcharges["lowdin"]) == 7
assert logfile.data.metadata["legacy_package_version"] == "4.1.x"
assert logfile.data.metadata["package_version"] == "4.1dev+13440"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testORCA_ORCA4_1_orca_from_issue_736_out(logfile):
"""ORCA file with no whitespace between SCF iteration columns."""
assert len(logfile.data.scfvalues) == 23
# The first iteration in the problematic block:
# ITER Energy Delta-E Max-DP RMS-DP [F,P] Damp
# *** Starting incremental Fock matrix formation ***
# 0 -257.0554667435 0.000000000000537.42184135 4.76025534 0.4401076 0.8500
assert abs(logfile.data.scfvalues[14][0][1] - 537) < 1.0, logfile.data.scfvalues[14][0]
def testORCA_ORCA4_1_porphine_out(logfile):
"""ORCA optimization with multiple TD-DFT gradients and absorption spectra."""
assert len(logfile.data.etenergies) == 1
def testORCA_ORCA4_1_single_atom_freq_out(logfile):
"""ORCA frequency with single atom."""
assert len(logfile.data.vibdisps) == 0
assert len(logfile.data.vibfreqs) == 0
assert len(logfile.data.vibirs) == 0
# These values are different from what ORCA prints as the total enthalpy,
# because for single atoms that includes a spurious correction. We build the
# enthalpy ourselves from electronic and translational energies (see #817 for details).
numpy.testing.assert_almost_equal(logfile.data.enthalpy, -460.14376, 5)
numpy.testing.assert_almost_equal(logfile.data.entropy, 6.056e-5, 8)
numpy.testing.assert_almost_equal(logfile.data.freeenergy, -460.16182, 6)
def testORCA_ORCA4_2_947_out(logfile):
"""A constrained geometry optimization which prints the extra line
WARNING: THERE ARE 5 CONSTRAINED CARTESIAN COORDINATES
just before the gradient.
"""
assert len(logfile.data.atomcoords) == 7
assert len(logfile.data.grads) == 6
def testORCA_ORCA4_2_MP2_gradient_out(logfile):
"""ORCA numerical frequency calculation with gradients."""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert hasattr(logfile.data, 'grads')
assert logfile.data.grads.shape == (1, 3, 3)
# atom 2, y-coordinate.
idx = (0, 1, 1)
assert logfile.data.grads[idx] == -0.00040549
def testORCA_ORCA4_2_long_input_out(logfile):
"""Long ORCA input file (#804)."""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert hasattr(logfile.data, 'atomcoords')
assert logfile.data.atomcoords.shape == (100, 12, 3)
def testORCA_ORCA4_2_water_dlpno_ccsd_out(logfile):
"""DLPNO-CCSD files have extra lines between E(0) and E(TOT) than normal CCSD
outputs:
----------------------
COUPLED CLUSTER ENERGY
----------------------
E(0) ... -74.963574242
E(CORR)(strong-pairs) ... -0.049905771
E(CORR)(weak-pairs) ... 0.000000000
E(CORR)(corrected) ... -0.049905771
E(TOT) ... -75.013480013
Singles Norm <S|S>**1/2 ... 0.013957180
T1 diagnostic ... 0.004934608
"""
assert hasattr(logfile.data, 'ccenergies')
# PSI 3 #
def testPsi3_Psi3_4_water_psi3_log(logfile):
"""An RHF for water with D orbitals and C2v symmetry.
Here we can check that the D orbitals are considered by checking atombasis and nbasis.
"""
assert logfile.data.nbasis == 25
assert [len(ab) for ab in logfile.data.atombasis] == [15, 5, 5]
assert logfile.data.metadata["legacy_package_version"] == "3.4"
assert logfile.data.metadata["package_version"] == "3.4alpha"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
# PSI 4 #
def testPsi4_Psi4_beta5_dvb_gopt_hf_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert logfile.data.metadata["legacy_package_version"] == "beta5"
assert logfile.data.metadata["package_version"] == "0!0.beta5"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
def testPsi4_Psi4_beta5_sample_cc54_0_01_0_1_0_1_out(logfile):
"""TODO"""
assert logfile.data.metadata["legacy_package_version"] == "beta2+"
assert logfile.data.metadata["package_version"] == "0!0.beta2.dev+fa5960b375b8ca2a5e4000a48cb95e7f218c579a"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testPsi4_Psi4_beta5_stopiter_psi_dft_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert logfile.data.metadata["package_version"] == "0!0.beta5"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert len(logfile.data.scfvalues[0]) == 7
def testPsi4_Psi4_beta5_stopiter_psi_hf_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert logfile.data.metadata["package_version"] == "0!0.beta5"
assert len(logfile.data.scfvalues[0]) == 6
def testPsi4_Psi4_0_5_sample_scf5_out(logfile):
assert logfile.data.metadata["legacy_package_version"] == "0.5"
assert logfile.data.metadata["package_version"] == "1!0.5.dev+master-dbe9080"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testPsi4_Psi4_0_5_water_fdgrad_out(logfile):
"""Ensure that finite difference gradients are parsed."""
assert logfile.data.metadata["legacy_package_version"] == "1.2a1.dev429"
assert logfile.data.metadata["package_version"] == "1!1.2a1.dev429+fixsym-7838fc1-dirty"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert hasattr(logfile.data, 'grads')
assert logfile.data.grads.shape == (1, 3, 3)
assert abs(logfile.data.grads[0, 0, 2] - 0.05498126903657) < 1.0e-12
# In C2v symmetry, there are 5 unique displacements for the
# nuclear gradient, and this is at the MP2 level.
assert logfile.data.mpenergies.shape == (5, 1)
def testPsi4_Psi4_1_2_ch4_hf_opt_freq_out(logfile):
"""Ensure that molecular orbitals and normal modes are parsed in Psi4 1.2"""
assert logfile.data.metadata["legacy_package_version"] == "1.2.1"
assert logfile.data.metadata["package_version"] == "1!1.2.1.dev+HEAD-406f4de"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert hasattr(logfile.data, 'mocoeffs')
assert hasattr(logfile.data, 'vibdisps')
assert hasattr(logfile.data, 'vibfreqs')
# Q-Chem #
def testQChem_QChem4_2_CH3___Na__RS_out(logfile):
"""An unrestricted fragment job with BSSE correction.
Contains only the Roothaan step energies for the CP correction.
The fragment SCF sections are printed.
This is to ensure only the supersystem is parsed.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.2.2"
assert logfile.data.metadata["package_version"] == "4.2.2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.charge == 1
assert logfile.data.mult == 2
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.atomcoords[0]) == 5
assert len(logfile.data.atomnos) == 5
# Fragments: A, B, RS_CP(A), RS_CP(B), Full
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-201.9388745658, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 40
assert len(logfile.data.moenergies[0]) == 40
assert len(logfile.data.moenergies[1]) == 40
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
assert type(logfile.data.moenergies[1]) == type(numpy.array([]))
def testQChem_QChem4_2_CH3___Na__RS_SCF_out(logfile):
"""An unrestricted fragment job with BSSE correction.
Contains both the Roothaan step and full SCF energies for the CP correction.
The fragment SCF sections are printed.
This is to ensure only the supersystem is printed.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.1.0.1"
assert logfile.data.metadata["package_version"] == "4.1.0.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.charge == 1
assert logfile.data.mult == 2
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.atomcoords[0]) == 5
assert len(logfile.data.atomnos) == 5
# Fragments: A, B, RS_CP(A), RS_CP(B), SCF_CP(A), SCF_CP(B), Full
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-201.9396979324, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 40
assert len(logfile.data.moenergies[0]) == 40
assert len(logfile.data.moenergies[1]) == 40
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
assert type(logfile.data.moenergies[1]) == type(numpy.array([]))
def testQChem_QChem4_2_CH4___Na__out(logfile):
"""A restricted fragment job with no BSSE correction.
The fragment SCF sections are printed.
This is to ensure only the supersystem is parsed.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.2.0"
assert logfile.data.metadata["package_version"] == "4.2.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.charge == 1
assert logfile.data.mult == 1
assert len(logfile.data.moenergies) == 1
assert len(logfile.data.atomcoords[0]) == 6
assert len(logfile.data.atomnos) == 6
# Fragments: A, B, Full
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-202.6119443654, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 42
assert len(logfile.data.moenergies[0]) == 42
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
def testQChem_QChem4_2_CH3___Na__RS_SCF_noprint_out(logfile):
"""An unrestricted fragment job with BSSE correction.
Contains both the Roothaan step and full SCF energies for the CP correction.
The fragment SCF sections are not printed.
This is to ensure only the supersystem is parsed.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.3.0"
assert logfile.data.metadata["package_version"] == "4.3.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.charge == 1
assert logfile.data.mult == 2
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.atomcoords[0]) == 5
assert len(logfile.data.atomnos) == 5
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-201.9396979324, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 40
assert len(logfile.data.moenergies[0]) == 40
assert len(logfile.data.moenergies[1]) == 40
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
assert type(logfile.data.moenergies[1]) == type(numpy.array([]))
def testQChem_QChem4_2_CH3___Na__RS_noprint_out(logfile):
"""An unrestricted fragment job with BSSE correction.
Contains only the Roothaan step energies for the CP correction.
The fragment SCF sections are not printed.
This is to ensure only the supersystem is parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == 1
assert logfile.data.mult == 2
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.atomcoords[0]) == 5
assert len(logfile.data.atomnos) == 5
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-201.9388582085, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 40
assert len(logfile.data.moenergies[0]) == 40
assert len(logfile.data.moenergies[1]) == 40
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
assert type(logfile.data.moenergies[1]) == type(numpy.array([]))
def testQChem_QChem4_2_CH4___Na__noprint_out(logfile):
"""A restricted fragment job with no BSSE correction.
The fragment SCF sections are not printed.
This is to ensure only the supersystem is parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == 1
assert logfile.data.mult == 1
assert len(logfile.data.moenergies) == 1
assert len(logfile.data.atomcoords[0]) == 6
assert len(logfile.data.atomnos) == 6
assert len(logfile.data.scfenergies) == 1
scfenergy = convertor(-202.6119443654, "hartree", "eV")
assert abs(logfile.data.scfenergies[0] - scfenergy) < 1.0e-10
assert logfile.data.nbasis == logfile.data.nmo == 42
assert len(logfile.data.moenergies[0]) == 42
assert type(logfile.data.moenergies) == type([])
assert type(logfile.data.moenergies[0]) == type(numpy.array([]))
def testQChem_QChem4_2_CO2_out(logfile):
"""A job containing a specific number of orbitals requested for
printing.
"""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 45
nmo = 45
nalpha = 11
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0][0, 0] == -0.0001434
assert logfile.data.mocoeffs[0][nalpha + 5 - 1, nbasis - 1] == -0.0000661
assert len(logfile.data.moenergies) == 1
assert len(logfile.data.moenergies[0]) == nmo
def testQChem_QChem4_2_CO2_cation_UHF_out(logfile):
"""A job containing a specific number of orbitals requested for
printing."""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 45
nmo = 45
nalpha = 11
nbeta = 10
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 2
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[1].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0][0, 0] == -0.0001549
assert logfile.data.mocoeffs[0][nalpha + 5 - 1, nbasis - 1] == -0.0000985
assert logfile.data.mocoeffs[1][0, 0] == -0.0001612
assert logfile.data.mocoeffs[1][nbeta + 5 - 1, nbasis - 1] == -0.0027710
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.moenergies[0]) == nmo
assert len(logfile.data.moenergies[1]) == nmo
def testQChem_QChem4_2_CO2_cation_ROHF_bigprint_allvirt_out(logfile):
"""A job containing a specific number of orbitals requested for
printing."""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 45
nmo = 45
nalpha = 11
nbeta = 10
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 2
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[1].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0][0, 0] == -0.0001543
assert logfile.data.mocoeffs[0][nalpha + 5 - 3, nbasis - 1] == -0.0132848
assert logfile.data.mocoeffs[1][2, 0] == 0.9927881
assert logfile.data.mocoeffs[1][nbeta + 5 - 1, nbasis - 1] == 0.0018019
assert len(logfile.data.moenergies) == 2
assert len(logfile.data.moenergies[0]) == nmo
assert len(logfile.data.moenergies[1]) == nmo
def testQChem_QChem4_2_CO2_linear_dependence_printall_out(logfile):
"""A job with linear dependency and all MOs printed."""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 138
nmo = 106
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0].T[59, 15] == -0.28758
assert logfile.data.mocoeffs[0].T[59, 16] == -0.00000
def testQChem_QChem4_2_CO2_linear_dependence_printall_final_out(logfile):
"""A job with linear dependency and all MOs printed.
The increased precision is due to the presence of `scf_final_print
= 3` giving a separate block with more decimal places.
"""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 138
nmo = 106
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0].T[59, 15] == -0.2875844
# Even though all MO coefficients are printed in the less precise
# block, they aren't parsed.
# assert logfile.data.mocoeffs[0].T[59, 16] == -0.00000
assert numpy.isnan(logfile.data.mocoeffs[0].T[59, 16])
def testQChem_QChem4_2_CO2_linear_dependence_printdefault_out(logfile):
"""A job with linear dependency and the default number of MOs printed
(all occupieds and 5 virtuals).
"""
assert logfile.data.metadata["package_version"] == "4.2.2"
nbasis = 138
nmo = 106
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0].T[59, 15] == -0.28758
assert numpy.isnan(logfile.data.mocoeffs[0].T[59, 16])
def testQChem_QChem4_2_dvb_gopt_unconverged_out(logfile):
"""An unconverged geometry optimization to test for empty optdone (see #103 for details)."""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert hasattr(logfile.data, 'optdone') and not logfile.data.optdone
def testQChem_QChem4_2_dvb_sp_multipole_10_out(logfile):
"""Multipole moments up to the 10-th order.
Since this example has various formats for the moment ranks, we can test
the parser by making sure the first moment (pure X) is as expected.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert hasattr(logfile.data, 'moments') and len(logfile.data.moments) == 11
tol = 1.0e-6
assert logfile.data.moments[1][0] < tol
assert abs(logfile.data.moments[2][0] - -50.9647) < tol
assert abs(logfile.data.moments[3][0] - 0.0007) < tol
assert abs(logfile.data.moments[4][0] - -1811.1540) < tol
assert abs(logfile.data.moments[5][0] - 0.0159) < tol
assert abs(logfile.data.moments[6][0] - -57575.0744) < tol
assert abs(logfile.data.moments[7][0] - 0.3915) < tol
assert numpy.isnan(logfile.data.moments[8][0])
assert abs(logfile.data.moments[9][0] - 10.1638) < tol
assert numpy.isnan(logfile.data.moments[10][0])
def testQChem_QChem4_2_MoOCl4_sp_noprint_builtin_mixed_all_Cl_out(logfile):
"""ECP on all Cl atoms, but iprint is off, so coreelectrons must be
guessed.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert hasattr(logfile.data, 'coreelectrons')
coreelectrons = numpy.array([0, 0, 10, 10, 10, 10], dtype=int)
assert numpy.all(coreelectrons == logfile.data.coreelectrons)
def testQChem_QChem4_2_MoOCl4_sp_noprint_builtin_mixed_both_out(logfile):
"""ECP on Mo and all Cl atoms, but iprint is off, so coreelectrons
can't be guessed.
Uses `ecp = gen`.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert not hasattr(logfile.data, 'coreelectrons')
def testQChem_QChem4_2_MoOCl4_sp_noprint_builtin_mixed_single_Mo_out(logfile):
"""ECP on Mo, but iprint is off, so coreelectrons must be guessed."""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert hasattr(logfile.data, 'coreelectrons')
coreelectrons = numpy.array([28, 0, 0, 0, 0, 0], dtype=int)
assert numpy.all(coreelectrons == logfile.data.coreelectrons)
def testQChem_QChem4_2_MoOCl4_sp_noprint_builtin_out(logfile):
"""ECP on Mo and all Cl atoms, but iprint is off, so coreelectrons
can't be guessed.
Uses `ecp = <builtin>`.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert not hasattr(logfile.data, 'coreelectrons')
def testQChem_QChem4_2_MoOCl4_sp_noprint_user_Mo_builtin_all_Cl_out(logfile):
"""ECP on Mo and all Cl atoms, but iprint is off; the coreelectrons
count is given for Mo, and Cl can be guessed.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert hasattr(logfile.data, 'coreelectrons')
coreelectrons = numpy.array([28, 0, 10, 10, 10, 10], dtype=int)
assert numpy.all(coreelectrons == logfile.data.coreelectrons)
def testQChem_QChem4_2_MoOCl4_sp_print_builtin_mixed_single_Mo_single_Cl_out(logfile):
"""ECP on Mo and all Cl atoms; iprint is on, so coreelectrons can be
calculated.
This was intended to only have an ECP on a single Cl, but Q-Chem
silently puts it on all.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert logfile.data.charge == -2
assert logfile.data.mult == 1
assert hasattr(logfile.data, 'coreelectrons')
coreelectrons = numpy.array([28, 0, 10, 10, 10, 10], dtype=int)
assert numpy.all(coreelectrons == logfile.data.coreelectrons)
def testQChem_QChem4_2_print_frgm_false_opt_out(logfile):
"""Fragment calculation: geometry optimization.
Fragment sections are not printed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == -1
assert logfile.data.mult == 1
assert len(logfile.data.scfenergies) == 11
assert len(logfile.data.grads) == 11
def testQChem_QChem4_2_print_frgm_true_opt_out(logfile):
"""Fragment calculation: geometry optimization.
Fragment sections are printed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == -1
assert logfile.data.mult == 1
assert len(logfile.data.scfenergies) == 11
assert len(logfile.data.grads) == 11
def testQChem_QChem4_2_print_frgm_false_sp_out(logfile):
"""Fragment calculation: single point energy.
Fragment sections are not printed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == -1
assert logfile.data.mult == 1
assert len(logfile.data.scfenergies) == 1
def testQChem_QChem4_2_print_frgm_true_sp_out(logfile):
"""Fragment calculation: single point energy.
Fragment sections are printed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert logfile.data.charge == -1
assert logfile.data.mult == 1
assert len(logfile.data.scfenergies) == 1
def testQChem_QChem4_2_print_frgm_true_sp_ccsdt_out(logfile):
"""Fragment calculation: single point energy, CCSD(T).
Fragment sections are printed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert len(logfile.data.mpenergies[0]) == 1
assert len(logfile.data.ccenergies) == 1
def testQChem_QChem4_2_qchem_tddft_rpa_out(logfile):
"""An RPA/TD-DFT job.
Here Q-Chem prints both the TDA and RPA results. These differ somewhat, since
TDA allows only X vectors (occupied-virtual transitions) whereas RPA also
allows Y vectors (virtual-occupied deexcitations), and the formatting in these
two cases is subtly different (see cclib/cclib#154 for details).
Currently cclib will store the second set of transitions (RPA), but this
could change in the future if we support multistep jobs.
"""
assert logfile.data.metadata["package_version"] == "4.2.0"
assert len(logfile.data.etsecs) == 10
assert len(logfile.data.etsecs[0]) == 13
# Check a few vectors manually, since we know the output. X vectors are transitions
# from occupied to virtual orbitals, whereas Y vectors the other way around, so cclib
# should be switching the indices. Here is the corresponding fragment in the logfile:
# Excited state 1: excitation energy (eV) = 3.1318
# Total energy for state 1: -382.185270280389
# Multiplicity: Triplet
# Trans. Mom.: 0.0000 X 0.0000 Y 0.0000 Z
# Strength : 0.0000
# X: D( 12) --> V( 13) amplitude = 0.0162
# X: D( 28) --> V( 5) amplitude = 0.1039
# Y: D( 28) --> V( 5) amplitude = 0.0605
assert logfile.data.etsecs[0][0] == [(11, 0), (47, 0), 0.0162]
assert logfile.data.etsecs[0][1] == [(27, 0), (39, 0), 0.1039]
assert logfile.data.etsecs[0][2] == [(39, 0), (27, 0), 0.0605]
def testQChem_QChem4_2_read_molecule_out(logfile):
"""A two-calculation output with the charge/multiplicity not specified
in the user section."""
assert logfile.data.metadata["package_version"] == "4.3.0"
# These correspond to the second calculation.
assert logfile.data.charge == 1
assert logfile.data.mult == 2
assert len(logfile.data.moenergies) == 2
# However, we currently take data from both, since they aren't
# exactly fragment calculations.
assert len(logfile.data.scfenergies) == 2
def testQChem_QChem4_2_stopiter_qchem_out(logfile):
"""Check to ensure that an incomplete SCF is handled correctly."""
assert logfile.data.metadata["legacy_package_version"] == "4.0.0.1"
assert logfile.data.metadata["package_version"] == "4.0.0.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert len(logfile.data.scfvalues[0]) == 7
def testQChem_QChem4_3_R_propylene_oxide_force_ccsd_out(logfile):
"""Check to see that the CCSD gradient (not the HF gradient) is being
parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert hasattr(logfile.data, 'grads')
assert logfile.data.grads.shape == (1, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
assert logfile.data.grads[idx] == 0.00584973
def testQChem_QChem4_3_R_propylene_oxide_force_hf_numerical_energies_out(logfile):
"""Check to see that the HF numerical gradient (from energies) is
being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
# This isn't implemented yet.
assert not hasattr(logfile.data, "grads")
def testQChem_QChem4_3_R_propylene_oxide_force_mp2_out(logfile):
"""Check to see that the MP2 gradient (not the HF gradient) is
being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert hasattr(logfile.data, 'grads')
assert logfile.data.grads.shape == (1, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
assert logfile.data.grads[idx] == 0.00436177
def testQChem_QChem4_3_R_propylene_oxide_force_rimp2_out(logfile):
"""Check to see that the RI-MP2 gradient (not the HF gradient) is
being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
assert hasattr(logfile.data, 'grads')
assert logfile.data.grads.shape == (1, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
assert logfile.data.grads[idx] == 0.00436172
def testQChem_QChem4_3_R_propylene_oxide_freq_ccsd_out(logfile):
"""Check to see that the CCSD (numerical) Hessian is being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
# The gradient of the initial geometry in a Hessian calculated
# from finite difference of gradients should be the same as in a
# force calculation.
assert hasattr(logfile.data, 'grads')
ngrads = 1 + 6*logfile.data.natom
assert logfile.data.grads.shape == (ngrads, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
assert logfile.data.grads[idx] == 0.00584973
assert hasattr(logfile.data, 'hessian')
assert logfile.data.hessian.shape == (3*logfile.data.natom, 3*logfile.data.natom)
# atom 4, x-coordinate.
idx = (9, 9)
assert logfile.data.hessian[idx] == 0.3561243
def testQChem_QChem4_3_R_propylene_oxide_freq_hf_numerical_gradients_out(logfile):
"""Check to see that the HF Hessian (from gradients) is being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
# This isn't implemented yet.
assert not hasattr(logfile.data, "freq")
def testQChem_QChem4_3_R_propylene_oxide_freq_mp2_out(logfile):
"""Check to see that the MP2 (numerical) Hessian is being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
# The gradient of the initial geometry in a Hessian calculated
# from finite difference of gradients should be the same as in a
# force calculation.
assert hasattr(logfile.data, 'grads')
ngrads = 1 + 6*logfile.data.natom
assert logfile.data.grads.shape == (ngrads, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
assert logfile.data.grads[idx] == 0.00436177
assert hasattr(logfile.data, 'hessian')
assert logfile.data.hessian.shape == (3*logfile.data.natom, 3*logfile.data.natom)
# atom 4, x-coordinate.
idx = (9, 9)
assert logfile.data.hessian[idx] == 0.3520255
def testQChem_QChem4_3_R_propylene_oxide_freq_rimp2_out(logfile):
"""Check to see that the RI-MP2 (numerical) Hessian is being parsed.
"""
assert logfile.data.metadata["package_version"] == "4.3.0"
# The gradient of the initial geometry in a Hessian calculated
# from finite difference of gradients should be the same as in a
# force calculation.
assert hasattr(logfile.data, 'grads')
ngrads = 1 + 6*logfile.data.natom
assert logfile.data.grads.shape == (ngrads, logfile.data.natom, 3)
# atom 9, y-coordinate.
idx = (0, 8, 1)
# Well, not quite in this case...
assert logfile.data.grads[idx] == 0.00436167
assert hasattr(logfile.data, 'hessian')
assert logfile.data.hessian.shape == (3*logfile.data.natom, 3*logfile.data.natom)
# atom 4, x-coordinate.
idx = (9, 9)
assert logfile.data.hessian[idx] == 0.3520538
def testQChem_QChem4_4_full_2_out(logfile):
"""The polarizability section may not be parsed due to something
appearing just beforehand from a frequency-type calculation.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.4.2"
assert logfile.data.metadata["package_version"] == "4.4.2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert hasattr(logfile.data, 'polarizabilities')
def testQChem_QChem4_4_srtlg_out(logfile):
"""Some lines in the MO coefficients require fixed-width parsing. See
#349 and #381.
"""
assert logfile.data.metadata["legacy_package_version"] == "4.4.0"
assert logfile.data.metadata["package_version"] == "4.4.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
# There is a linear dependence problem.
nbasis, nmo = 1129, 1115
assert len(logfile.data.mocoeffs) == 2
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[1].shape == (nmo, nbasis)
index_ao = 151 - 1
indices_mo = [index_mo - 1 for index_mo in (493, 494, 495, 496, 497, 498)]
# line 306371:
# 151 C 7 s -54.24935 -36.37903-102.67529 32.37428-150.40380-103.24478
ref = numpy.asarray([-54.24935, -36.37903, -102.67529, 32.37428, -150.40380, -103.24478])
res = logfile.data.mocoeffs[1][indices_mo, index_ao]
numpy.testing.assert_allclose(ref, res, atol=1.0e-5, rtol=0.0)
def testQChem_QChem4_4_Trp_polar_ideriv0_out(logfile):
"""Ensure that the polarizability section is being parsed, but don't
compare to reference results as 2nd-order finite difference can have
large errors.
"""
assert logfile.data.metadata["package_version"] == "4.4.2"
assert hasattr(logfile.data, 'polarizabilities')
def testQChem_QChem4_4_top_out(logfile):
"""This job has fewer MOs (7) than would normally be printed (15)."""
assert logfile.data.metadata["package_version"] == "4.4.2"
nbasis = 7
nmo = 7
assert logfile.data.nbasis == nbasis
assert logfile.data.nmo == nmo
assert len(logfile.data.mocoeffs) == 1
assert logfile.data.mocoeffs[0].shape == (nmo, nbasis)
assert logfile.data.mocoeffs[0].T[6, 5] == 0.8115082
def testQChem_QChem5_0_438_out(logfile):
"""This job has an ECP on Pt, replacing 60 of 78 electrons, and was
showing the charge as 60.
"""
assert logfile.data.metadata["legacy_package_version"] == "5.0.0"
assert logfile.data.metadata["package_version"] == "5.0.0"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.charge == 0
assert logfile.data.coreelectrons[0] == 60
def testQChem_QChem5_0_argon_out(logfile):
"""This job has unit specifications at the end of 'Total energy for
state' lines.
"""
assert logfile.data.metadata["legacy_package_version"] == "5.0.1"
assert logfile.data.metadata["package_version"] == "5.0.1"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
nroots = 12
assert len(logfile.data.etenergies) == nroots
state_0_energy = -526.6323968555
state_1_energy = -526.14663738
assert logfile.data.scfenergies[0] == convertor(state_0_energy, 'hartree', 'eV')
assert abs(logfile.data.etenergies[0] - convertor(state_1_energy - state_0_energy, 'hartree', 'wavenumber')) < 1.0e-1
def testQChem_QChem5_0_Si_out(logfile):
"""
This job includes MOs as a test for this version. This fist MO coefficient is checked to ensure they were parsed.
"""
assert logfile.data.metadata["legacy_package_version"] == "5.0.2"
assert logfile.data.metadata["package_version"] == "5.0.2"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.mocoeffs[0][0,0] == 1.00042
def testQChem_QChem5_1_old_final_print_1_out(logfile):
"""This job has was run from a development version."""
assert logfile.data.metadata["legacy_package_version"] == "5.1.0"
assert logfile.data.metadata["package_version"] == "5.1.0dev+branches_libresponse-27553"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
def testQChem_QChem5_3_ccman2_soc_cisd_out(logfile):
"""This file has its atomcoords in bohr, which need to be converted."""
convfac = 0.5291772109
assert logfile.data.atomcoords[0, 0, 2] == -0.24685 * convfac
assert logfile.data.atomcoords[0, 1, 2] == 1.72795 * convfac
# Turbomole
def testTurbomole_Turbomole7_2_dvb_gopt_b3_lyp_Gaussian__(logfile):
assert logfile.data.metadata["legacy_package_version"] == "7.2"
assert logfile.data.metadata["package_version"] == "7.2.r21471"
assert isinstance(
parse_version(logfile.data.metadata["package_version"]), Version
)
assert logfile.data.natom == 20
# These regression tests are for logfiles that are not to be parsed
# for some reason, and the function should start with 'testnoparse'.
def testnoparseADF_ADF2004_01_mo_sp_adfout(filename):
"""This is an ADF file that has a different number of AO functions
and SFO functions. Currently nbasis parses the SFO count. This will
be discussed and resolved in the future (see issue #170), and can
this to get rid of the error in the meantime.
"""
pass
def testnoparseGaussian_Gaussian09_coeffs_log(filename):
"""This is a test for a Gaussian file with more than 999 basis functions.
The log file is too big, so we are just including a section. Before
parsing, we set some attributes of the parser so that it all goes smoothly.
"""
parser = Gaussian(os.path.join(__filedir__, filename), loglevel=logging.ERROR)
parser.nmo = 5
parser.nbasis = 1128
data = parser.parse()
assert data.mocoeffs[0].shape == (5, 1128)
assert data.aonames[-1] == "Ga71_19D-2"
assert data.aonames[0] == "Mn1_1S"
def flatten(seq):
"""Converts a list of lists [of lists] to a single flattened list.
Taken from the web.
"""
res = []
for item in seq:
if (isinstance(item, (tuple, list))):
res.extend(flatten(item))
else:
res.append(item)
return res
def normalisefilename(filename):
"""Replace all non-alphanumeric symbols by underscores.
>>> from . import regression
>>> for x in [ "Gaussian/Gaussian03/Mo4OSibdt2-opt.log" ]:
... print(regression.normalisefilename(x))
...
Gaussian_Gaussian03_Mo4OSibdt2_opt_log
"""
ans = []
for y in filename:
x = y.lower()
if (x >= 'a' and x <= 'z') or (x >= '0' and x <= '9'):
ans.append(y)
else:
ans.append("_")
return "".join(ans)
# When a unit test is removed or replaced by a newer version, we normally want
# the old logfile to become a regression, namely to run the unit test as part of
# the regression suite. To this end, add the logfile path to the dictionary
# below along with the appropriate unit test class to use, and the appropriate
# regression test function will be created automatically. If modifications
# are necessary due to developments in the unit test class, tweak it here
# and provide the modified version of the test class.
# Although there is probably a cleaner way to do this, making the unit class test names
# global makes reading the dictionary of old unit tests much easier, especially it
# will contain some classes defined here.
for m, module in all_modules.items():
for name in dir(module):
if name[-4:] == "Test":
globals()[name] = getattr(module, name)
class ADFGeoOptTest_noscfvalues(ADFGeoOptTest):
@unittest.skip('Cannot parse scfvalues from this file.')
def testgeovalues_scfvalues(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse scfvalues from this file.')
def testscftargetdim(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse scfvalues from this file.')
def testscfvaluetype(self):
"""SCF cycles were not printed here."""
class ADFSPTest_noscfvalues(ADFSPTest):
@unittest.skip('Cannot parse scfvalues from this file.')
def testscftargetdim(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse scfvalues from this file.')
def testscfvaluetype(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse aooverlaps from this file.')
def testaooverlaps(self):
"""AO overlaps were not printed here."""
class ADFSPTest_nosyms(ADFSPTest, GenericSPTest):
foverlap00 = 1.00000
foverlap11 = 0.99999
foverlap22 = 0.99999
@unittest.skip('Symmetry labels were not printed here')
def testsymlabels(self):
"""Symmetry labels were not printed here."""
class ADFSPTest_nosyms_noscfvalues(ADFSPTest_nosyms):
@unittest.skip('Cannot parse scfvalues from this file.')
def testscftargetdim(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse scfvalues from this file.')
def testscfvaluetype(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse aooverlaps from this file.')
def testaooverlaps(self):
"""AO overlaps were not printed here."""
class ADFSPTest_nosyms_valence(ADFSPTest_nosyms):
def testlengthmoenergies(self):
"""Only valence orbital energies were printed here."""
self.assertEqual(len(self.data.moenergies[0]), 45)
self.assertEqual(self.data.moenergies[0][0], 99999.0)
class ADFSPTest_nosyms_valence_noscfvalues(ADFSPTest_nosyms_valence):
@unittest.skip('Cannot parse scfvalues from this file.')
def testscftargetdim(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse scfvalues from this file.')
def testscfvaluetype(self):
"""SCF cycles were not printed here."""
@unittest.skip('Cannot parse aooverlaps from this file.')
def testaooverlaps(self):
"""AO overlaps were not printed here."""
# DATLON #
class DALTONBigBasisTest_aug_cc_pCVQZ(GenericBigBasisTest):
contractions = { 6: 29 }
spherical = True
class DALTONSPTest_nosyms_nolabels(GenericSPTest):
@unittest.skip('?')
def testsymlabels(self):
"""Are all the symmetry labels either Ag/u or Bg/u?."""
class DALTONTDTest_noetsecs(DALTONTDTest):
@unittest.skip("etsecs cannot be parsed from this file")
def testsecs(self):
pass
@unittest.skip("etsecs cannot be parsed from this file")
def testsecs_transition(self):
pass
# GAMESS #
class GAMESSUSSPunTest_charge0(GenericSPunTest):
def testcharge_and_mult(self):
"""The charge in the input was wrong."""
self.assertEqual(self.data.charge, 0)
@unittest.skip('HOMOs were incorrect due to charge being wrong')
def testhomos(self):
"""HOMOs were incorrect due to charge being wrong."""
class GAMESSUSIRTest_ts(GenericIRimgTest):
@unittest.skip('This is a transition state with different intensities')
def testirintens(self):
"""This is a transition state with different intensities."""
class GAMESSUSCISTest_dets(GenericCISTest):
nstates = 10
@unittest.skip('This gives unexpected coeficcients, also for current unit tests.')
def testetsecsvalues(self):
"""This gives unexpected coeficcients, also for current unit tests."""
class GAMESSSPTest_noaooverlaps(GenericSPTest):
@unittest.skip('Cannot parse aooverlaps from this file.')
def testaooverlaps(self):
"""aooverlaps were not printed here."""
# Gaussian #
class GaussianSPunTest_nomosyms(GaussianSPunTest):
@unittest.skip('Cannot parse mosyms from this file.')
def testmosyms(self):
"""mosyms were not printed here."""
class GaussianSPunTest_nonaturalorbitals(GaussianCISTest):
@unittest.skip('Cannot parse natrual orbitals from this file.')
def testnocoeffs(self):
"""natural orbitals were not printed here."""
@unittest.skip('Cannot parse natrual orbital occupation numbers from this file.')
def testnooccnos(self):
"""natural orbital occupation numbers were not printed here."""
class GaussianPolarTest(ReferencePolarTest):
"""Customized static polarizability unittest, meant for calculations
with symmetry enabled.
"""
# Reference values are from Q-Chem 4.2/trithiolane_freq.out, since
# with symmetry enabled Q-Chem reorients molecules similarly to
# Gaussian.
isotropic = 66.0955766
principal_components = [46.71020322, 75.50778705, 76.06873953]
# Make the thresholds looser because these test jobs use symmetry,
# and the polarizability is orientation dependent.
isotropic_delta = 2.0
principal_components_delta = 0.7
# Jaguar #
class JaguarSPTest_6_31gss(JaguarSPTest):
"""AO counts and some values are different in 6-31G** compared to STO-3G."""
nbasisdict = {1: 5, 6: 15}
b3lyp_energy = -10530
overlap01 = 0.22
def testmetadata_basis_set(self):
"""This calculation did not use STO-3G for the basis set."""
self.assertEqual(self.data.metadata["basis_set"].lower(), "6-31g**")
class JaguarSPTest_6_31gss_nomosyms(JaguarSPTest_6_31gss):
@unittest.skip('Cannot parse mosyms from this file.')
def testsymlabels(self):
"""mosyms were not printed here."""
class JaguarSPunTest_nomosyms(JaguarSPunTest):
@unittest.skip('Cannot parse mosyms from this file.')
def testmosyms(self):
"""mosyms were not printed here."""
class JaguarSPunTest_nmo_all(JaguarSPunTest):
def testmoenergies(self):
"""Some tests printed all MO energies apparently."""
self.assertEqual(len(self.data.moenergies[0]), self.data.nmo)
class JaguarSPunTest_nmo_all_nomosyms(JaguarSPunTest_nmo_all):
@unittest.skip('Cannot parse mosyms from this file.')
def testmosyms(self):
"""mosyms were not printed here."""
class JaguarGeoOptTest_nmo45(GenericGeoOptTest):
def testlengthmoenergies(self):
"""Without special options, Jaguar only print Homo+10 orbital energies."""
self.assertEqual(len(self.data.moenergies[0]), 45)
class JaguarSPTest_nmo45(GenericSPTest):
def testlengthmoenergies(self):
"""Without special options, Jaguar only print Homo+10 orbital energies."""
self.assertEqual(len(self.data.moenergies[0]), 45)
@unittest.skip('Cannot parse mos from this file.')
def testfornoormo(self):
"""mos were not printed here."""
@unittest.skip('Cannot parse scftargets from this file.')
def testscftargets(self):
"""scftargets were not parsed correctly here."""
@unittest.skip('Cannot parse atomcharges from this file.')
def testatomcharges(self):
"""atomcharges were not parsed correctly here."""
@unittest.skip('Cannot parse atombasis from this file.')
def testatombasis(self):
"""atombasis was not parsed correctly here."""
class JaguarSPunTest_nmo45(GenericSPunTest):
def testlengthmoenergies(self):
"""Without special options, Jaguar only print Homo+10 orbital energies."""
self.assertEqual(len(self.data.moenergies[0]), 45)
class JaguarGeoOptTest_nmo45(GenericGeoOptTest):
def testlengthmoenergies(self):
"""Without special options, Jaguar only print Homo+10 orbital energies."""
self.assertEqual(len(self.data.moenergies[0]), 45)
class JaguarGeoOptTest_nmo45_nogeo(JaguarGeoOptTest_nmo45):
@unittest.skip('Cannot parse geotargets from this file.')
def testgeotargets(self):
"""geotargets were not printed here."""
@unittest.skip('Cannot parse geovalues from this file.')
def testgeovalues_atomcoords(self):
"""geovalues were not printed here."""
@unittest.skip('Cannot parse geovalues from this file.')
def testgeovalues_scfvalues(self):
"""geovalues were not printed here."""
@unittest.skip('Cannot parse optdone from this file.')
def testoptdone(self):
"""optdone does not exist for this file."""
class JaguarGeoOptTest_6_31gss(GenericGeoOptTest):
nbasisdict = {1: 5, 6: 15}
b3lyp_energy = -10530
class MolcasBigBasisTest_nogbasis(MolcasBigBasisTest):
@unittest.skip('gbasis was not printed in this output file')
def testgbasis(self):
"""gbasis was not parsed for this file"""
@unittest.skip('gbasis was not printed in this output file')
def testnames(self):
"""gbasis was not parsed for this file"""
@unittest.skip('gbasis was not printed in this output file')
def testprimitives(self):
"""gbasis was not parsed for this file"""
@unittest.skip('gbasis was not printed in this output file')
def testsizeofbasis(self):
"""gbasis was not parsed for this file"""
# Molpro #
class MolproBigBasisTest_cart(MolproBigBasisTest):
spherical = False
# ORCA #
class OrcaSPTest_3_21g(OrcaSPTest, GenericSPTest):
nbasisdict = {1: 2, 6: 9}
b3lyp_energy = -10460
overlap01 = 0.19
molecularmass = 130190
@unittest.skip('This calculation has no symmetry.')
def testsymlabels(self):
"""This calculation has no symmetry."""
class OrcaGeoOptTest_3_21g(OrcaGeoOptTest):
nbasisdict = {1: 2, 6: 9}
b3lyp_energy = -10460
class OrcaSPunTest_charge0(GenericSPunTest):
def testcharge_and_mult(self):
"""The charge in the input was wrong."""
self.assertEqual(self.data.charge, 0)
@unittest.skip('HOMOs were incorrect due to charge being wrong.')
def testhomos(self):
"""HOMOs were incorrect due to charge being wrong."""
def testorbitals(self):
"""Closed-shell calculation run as open-shell."""
self.assertTrue(self.data.closed_shell)
class OrcaTDDFTTest_error(OrcaTDDFTTest):
def testoscs(self):
"""These values used to be less accurate, probably due to wrong coordinates."""
self.assertEqual(len(self.data.etoscs), self.number)
self.assertAlmostEqual(max(self.data.etoscs), 1.0, delta=0.2)
class OrcaIRTest_old_coordsOK(OrcaIRTest):
enthalpy_places = -1
entropy_places = 2
freeenergy_places = -1
class OrcaIRTest_old(OrcaIRTest):
enthalpy_places = -1
entropy_places = 2
freeenergy_places = -1
@unittest.skip('These values were wrong due to wrong input coordinates.')
def testfreqval(self):
"""These values were wrong due to wrong input coordinates."""
@unittest.skip('These values were wrong due to wrong input coordinates.')
def testirintens(self):
"""These values were wrong due to wrong input coordinates."""
# PSI3 #
class Psi3SPTest(GenericSPTest):
"""Customized restricted single point HF/KS unittest"""
# The final energy is also a bit higher here, I think due to the fact
# that a SALC calculation is done instead of a full LCAO.
b3lyp_energy = -10300
@unittest.skip('atommasses not implemented yet')
def testatommasses(self):
pass
@unittest.skip('Psi3 did not print partial atomic charges')
def testatomcharges(self):
pass
@unittest.skip('MO coefficients are printed separately for each SALC')
def testfornoormo(self):
pass
@unittest.skip('MO coefficients are printed separately for each SALC')
def testdimmocoeffs(self):
pass
# PSI4 #
class PsiSPTest_noatommasses(PsiSPTest):
@unittest.skip('atommasses were not printed in this file.')
def testatommasses(self):
"""These values are not present in this output file."""
old_unittests = {
"ADF/ADF2004.01/MoOCl4-sp.adfout": ADFCoreTest,
"ADF/ADF2004.01/dvb_gopt.adfout": ADFGeoOptTest_noscfvalues,
"ADF/ADF2004.01/dvb_gopt_b.adfout": ADFGeoOptTest,
"ADF/ADF2004.01/dvb_sp.adfout": ADFSPTest_noscfvalues,
"ADF/ADF2004.01/dvb_sp_b.adfout": ADFSPTest_noscfvalues,
"ADF/ADF2004.01/dvb_sp_c.adfout": ADFSPTest_nosyms_valence_noscfvalues,
"ADF/ADF2004.01/dvb_sp_d.adfout": ADFSPTest_nosyms_noscfvalues,
"ADF/ADF2004.01/dvb_un_sp.adfout": GenericSPunTest,
"ADF/ADF2004.01/dvb_un_sp_c.adfout": GenericSPunTest,
"ADF/ADF2004.01/dvb_ir.adfout": GenericIRTest,
"ADF/ADF2006.01/dvb_gopt.adfout": ADFGeoOptTest_noscfvalues,
"ADF/ADF2013.01/dvb_gopt_b_fullscf.adfout": ADFGeoOptTest,
"ADF/ADF2014.01/dvb_gopt_b_fullscf.out": ADFGeoOptTest,
"DALTON/DALTON-2013/C_bigbasis.aug-cc-pCVQZ.out": DALTONBigBasisTest_aug_cc_pCVQZ,
"DALTON/DALTON-2013/b3lyp_energy_dvb_sp_nosym.out": DALTONSPTest_nosyms_nolabels,
"DALTON/DALTON-2013/dvb_sp_hf_nosym.out": GenericSPTest,
"DALTON/DALTON-2013/dvb_td_normalprint.out": DALTONTDTest_noetsecs,
"DALTON/DALTON-2013/sp_b3lyp_dvb.out": GenericSPTest,
"DALTON/DALTON-2015/dvb_td_normalprint.out": DALTONTDTest_noetsecs,
"DALTON/DALTON-2015/trithiolane_polar_abalnr.out": GaussianPolarTest,
"DALTON/DALTON-2015/trithiolane_polar_response.out": GaussianPolarTest,
"DALTON/DALTON-2015/trithiolane_polar_static.out": GaussianPolarTest,
"DALTON/DALTON-2015/Trp_polar_response.out": ReferencePolarTest,
"DALTON/DALTON-2015/Trp_polar_static.out": ReferencePolarTest,
"GAMESS/GAMESS-US2005/water_ccd_2005.06.27.r3.out": GenericCCTest,
"GAMESS/GAMESS-US2005/water_ccsd_2005.06.27.r3.out": GenericCCTest,
"GAMESS/GAMESS-US2005/water_ccsd(t)_2005.06.27.r3.out": GenericCCTest,
"GAMESS/GAMESS-US2005/water_cis_dets_2005.06.27.r3.out": GAMESSUSCISTest_dets,
"GAMESS/GAMESS-US2005/water_cis_saps_2005.06.27.r3.out": GenericCISTest,
"GAMESS/GAMESS-US2005/MoOCl4-sp_2005.06.27.r3.out": GenericCoreTest,
"GAMESS/GAMESS-US2005/water_mp2_2005.06.27.r3.out": GenericMP2Test,
"GAMESS/GAMESS-US2006/C_bigbasis_2006.02.22.r3.out": GenericBigBasisTest,
"GAMESS/GAMESS-US2006/dvb_gopt_a_2006.02.22.r2.out": GenericGeoOptTest,
"GAMESS/GAMESS-US2006/dvb_sp_2006.02.22.r2.out": GenericSPTest,
"GAMESS/GAMESS-US2006/dvb_un_sp_2006.02.22.r2.out": GenericSPunTest,
"GAMESS/GAMESS-US2006/dvb_ir.2006.02.22.r2.out": GenericIRTest,
"GAMESS/GAMESS-US2006/nh3_ts_ir.2006.2.22.r2.out": GAMESSUSIRTest_ts,
"GAMESS/GAMESS-US2010/dvb_gopt.log": GenericGeoOptTest,
"GAMESS/GAMESS-US2010/dvb_sp.log": GAMESSSPTest_noaooverlaps,
"GAMESS/GAMESS-US2010/dvb_sp_un.log": GAMESSUSSPunTest_charge0,
"GAMESS/GAMESS-US2010/dvb_td.log": GAMESSUSTDDFTTest,
"GAMESS/GAMESS-US2010/dvb_ir.log": GenericIRTest,
"GAMESS/GAMESS-US2014/Trp_polar_freq.out": ReferencePolarTest,
"GAMESS/GAMESS-US2014/trithiolane_polar_freq.out": GaussianPolarTest,
"GAMESS/GAMESS-US2014/trithiolane_polar_tdhf.out": GenericPolarTest,
"GAMESS/GAMESS-US2014/C_bigbasis.out" : GenericBigBasisTest,
"GAMESS/GAMESS-US2014/dvb_gopt_a.out" : GenericGeoOptTest,
"GAMESS/GAMESS-US2014/dvb_ir.out" : GamessIRTest,
"GAMESS/GAMESS-US2014/dvb_sp.out" : GenericBasisTest,
"GAMESS/GAMESS-US2014/dvb_sp.out" : GenericSPTest,
"GAMESS/GAMESS-US2014/dvb_td.out" : GAMESSUSTDDFTTest,
"GAMESS/GAMESS-US2014/dvb_td_trplet.out" : GenericTDDFTtrpTest,
"GAMESS/GAMESS-US2014/dvb_un_sp.out" : GenericSPunTest,
"GAMESS/GAMESS-US2014/MoOCl4-sp.out" : GenericCoreTest,
"GAMESS/GAMESS-US2014/nh3_ts_ir.out" : GenericIRimgTest,
"GAMESS/GAMESS-US2014/water_ccd.out" : GenericCCTest,
"GAMESS/GAMESS-US2014/water_ccsd.out" : GenericCCTest,
"GAMESS/GAMESS-US2014/water_ccsd(t).out" : GenericCCTest,
"GAMESS/GAMESS-US2014/water_cis_saps.out" : GAMESSCISTest,
"GAMESS/GAMESS-US2014/water_mp2.out" : GenericMP2Test,
"GAMESS/PCGAMESS/C_bigbasis.out": GenericBigBasisTest,
"GAMESS/PCGAMESS/dvb_gopt_b.out": GenericGeoOptTest,
"GAMESS/PCGAMESS/dvb_ir.out": FireflyIRTest,
"GAMESS/PCGAMESS/dvb_raman.out": GenericRamanTest,
"GAMESS/PCGAMESS/dvb_sp.out": GenericSPTest,
"GAMESS/PCGAMESS/dvb_td.out": GenericTDTest,
"GAMESS/PCGAMESS/dvb_td_trplet.out": GenericTDDFTtrpTest,
"GAMESS/PCGAMESS/dvb_un_sp.out": GenericSPunTest,
"GAMESS/PCGAMESS/water_mp2.out": GenericMP2Test,
"GAMESS/PCGAMESS/water_mp3.out": GenericMP3Test,
"GAMESS/PCGAMESS/water_mp4.out": GenericMP4SDQTest,
"GAMESS/PCGAMESS/water_mp4_sdtq.out": GenericMP4SDTQTest,
"GAMESS/WinGAMESS/dvb_td_2007.03.24.r1.out": GAMESSUSTDDFTTest,
"Gaussian/Gaussian03/CO_TD_delta.log": GenericTDunTest,
"Gaussian/Gaussian03/C_bigbasis.out": GaussianBigBasisTest,
"Gaussian/Gaussian03/dvb_gopt.out": GenericGeoOptTest,
"Gaussian/Gaussian03/dvb_ir.out": GaussianIRTest,
"Gaussian/Gaussian03/dvb_raman.out": GaussianRamanTest,
"Gaussian/Gaussian03/dvb_sp.out": GaussianSPTest,
"Gaussian/Gaussian03/dvb_sp_basis.log": GenericBasisTest,
"Gaussian/Gaussian03/dvb_sp_basis_b.log": GenericBasisTest,
"Gaussian/Gaussian03/dvb_td.out": GaussianTDDFTTest,
"Gaussian/Gaussian03/dvb_un_sp.out": GaussianSPunTest_nomosyms,
"Gaussian/Gaussian03/dvb_un_sp_b.log": GaussianSPunTest,
"Gaussian/Gaussian03/Mo4OCl4-sp.log": GenericCoreTest,
"Gaussian/Gaussian03/water_ccd.log": GenericCCTest,
"Gaussian/Gaussian03/water_ccsd(t).log": GenericCCTest,
"Gaussian/Gaussian03/water_ccsd.log": GenericCCTest,
"Gaussian/Gaussian03/water_cis.log": GaussianSPunTest_nonaturalorbitals,
"Gaussian/Gaussian03/water_cisd.log": GaussianSPunTest_nonaturalorbitals,
"Gaussian/Gaussian03/water_mp2.log": GaussianMP2Test,
"Gaussian/Gaussian03/water_mp3.log": GaussianMP3Test,
"Gaussian/Gaussian03/water_mp4.log": GaussianMP4SDTQTest,
"Gaussian/Gaussian03/water_mp4sdq.log": GaussianMP4SDQTest,
"Gaussian/Gaussian03/water_mp5.log": GenericMP5Test,
"Gaussian/Gaussian09/dvb_gopt_revA.02.out": GenericGeoOptTest,
"Gaussian/Gaussian09/dvb_ir_revA.02.out": GaussianIRTest,
"Gaussian/Gaussian09/dvb_raman_revA.02.out": GaussianRamanTest,
"Gaussian/Gaussian09/dvb_scan_revA.02.log": GaussianRelaxedScanTest,
"Gaussian/Gaussian09/dvb_sp_basis_b_gfprint.log": GenericBasisTest,
"Gaussian/Gaussian09/dvb_sp_basis_gfinput.log": GenericBasisTest,
"Gaussian/Gaussian09/dvb_sp_revA.02.out": GaussianSPTest,
"Gaussian/Gaussian09/dvb_td_revA.02.out": GaussianTDDFTTest,
"Gaussian/Gaussian09/dvb_un_sp_revA.02.log": GaussianSPunTest_nomosyms,
"Gaussian/Gaussian09/dvb_un_sp_b_revA.02.log": GaussianSPunTest,
"Gaussian/Gaussian09/trithiolane_polar.log": GaussianPolarTest,
"Jaguar/Jaguar4.2/dvb_gopt.out": JaguarGeoOptTest_nmo45,
"Jaguar/Jaguar4.2/dvb_gopt_b.out": GenericGeoOptTest,
"Jaguar/Jaguar4.2/dvb_sp.out": JaguarSPTest_nmo45,
"Jaguar/Jaguar4.2/dvb_sp_b.out": JaguarSPTest_nmo45,
"Jaguar/Jaguar4.2/dvb_un_sp.out": JaguarSPunTest_nmo_all_nomosyms,
"Jaguar/Jaguar4.2/dvb_ir.out": JaguarIRTest,
"Jaguar/Jaguar6.0/dvb_gopt.out": JaguarGeoOptTest_6_31gss,
"Jaguar/Jaguar6.0/dvb_sp.out": JaguarSPTest_6_31gss_nomosyms,
"Jaguar/Jaguar6.0/dvb_un_sp.out" : JaguarSPunTest_nmo_all_nomosyms,
"Jaguar/Jaguar6.5/dvb_gopt.out": JaguarGeoOptTest_nmo45,
"Jaguar/Jaguar6.5/dvb_sp.out": JaguarSPTest_nmo45,
"Jaguar/Jaguar6.5/dvb_un_sp.out": JaguarSPunTest_nomosyms,
"Jaguar/Jaguar6.5/dvb_ir.out": JaguarIRTest,
"Molcas/Molcas8.0/dvb_sp.out": MolcasSPTest,
"Molcas/Molcas8.0/dvb_sp_un.out": GenericSPunTest,
"Molcas/Molcas8.0/C_bigbasis.out": MolcasBigBasisTest_nogbasis,
"Molpro/Molpro2006/C_bigbasis_cart.out": MolproBigBasisTest_cart,
"Molpro/Molpro2012/trithiolane_polar.out": GenericPolarTest,
"NWChem/NWChem6.6/trithiolane_polar.out": GaussianPolarTest,
"ORCA/ORCA2.6/dvb_gopt.out": OrcaGeoOptTest_3_21g,
"ORCA/ORCA2.6/dvb_sp.out": OrcaSPTest_3_21g,
"ORCA/ORCA2.6/dvb_td.out": OrcaTDDFTTest_error,
"ORCA/ORCA2.6/dvb_ir.out": OrcaIRTest_old_coordsOK,
"ORCA/ORCA2.8/dvb_gopt.out": OrcaGeoOptTest,
"ORCA/ORCA2.8/dvb_sp.out": GenericBasisTest,
"ORCA/ORCA2.8/dvb_sp.out": OrcaSPTest,
"ORCA/ORCA2.8/dvb_sp_un.out": OrcaSPunTest_charge0,
"ORCA/ORCA2.8/dvb_td.out": OrcaTDDFTTest,
"ORCA/ORCA2.8/dvb_ir.out": OrcaIRTest_old,
"ORCA/ORCA2.9/dvb_gopt.out": OrcaGeoOptTest,
"ORCA/ORCA2.9/dvb_ir.out": OrcaIRTest,
"ORCA/ORCA2.9/dvb_raman.out": GenericRamanTest,
"ORCA/ORCA2.9/dvb_scan.out": OrcaRelaxedScanTest,
"ORCA/ORCA2.9/dvb_sp.out": GenericBasisTest,
"ORCA/ORCA2.9/dvb_sp.out": OrcaSPTest,
"ORCA/ORCA2.9/dvb_sp_un.out": GenericSPunTest,
"ORCA/ORCA2.9/dvb_td.out": OrcaTDDFTTest,
"ORCA/ORCA3.0/dvb_bomd.out": GenericBOMDTest,
"ORCA/ORCA3.0/dvb_gopt.out": OrcaGeoOptTest,
"ORCA/ORCA3.0/dvb_ir.out": OrcaIRTest,
"ORCA/ORCA3.0/dvb_raman.out": GenericRamanTest,
"ORCA/ORCA3.0/dvb_scan.out": OrcaRelaxedScanTest,
"ORCA/ORCA3.0/dvb_sp_un.out": GenericSPunTest,
"ORCA/ORCA3.0/dvb_sp.out": GenericBasisTest,
"ORCA/ORCA3.0/dvb_sp.out": OrcaSPTest,
"ORCA/ORCA3.0/dvb_td.out": OrcaTDDFTTest,
"ORCA/ORCA3.0/Trp_polar.out": ReferencePolarTest,
"ORCA/ORCA3.0/trithiolane_polar.out": GaussianPolarTest,
"ORCA/ORCA4.0/dvb_sp.out": GenericBasisTest,
"ORCA/ORCA4.0/dvb_gopt.out": OrcaGeoOptTest,
"ORCA/ORCA4.0/Trp_polar.out": ReferencePolarTest,
"ORCA/ORCA4.0/dvb_sp.out": OrcaSPTest,
"ORCA/ORCA4.0/dvb_sp_un.out": GenericSPunTest,
"ORCA/ORCA4.0/dvb_td.out": OrcaTDDFTTest,
"ORCA/ORCA4.0/dvb_rocis.out": OrcaROCIS40Test,
"ORCA/ORCA4.0/dvb_ir.out": GenericIRTest,
"ORCA/ORCA4.0/dvb_raman.out": OrcaRamanTest,
"Psi3/Psi3.4/dvb_sp_hf.out": Psi3SPTest,
"Psi4/Psi4-1.0/C_bigbasis.out": Psi4BigBasisTest,
"Psi4/Psi4-1.0/dvb_gopt_rhf.out": Psi4GeoOptTest,
"Psi4/Psi4-1.0/dvb_gopt_rks.out": Psi4GeoOptTest,
"Psi4/Psi4-1.0/dvb_ir_rhf.out": GenericIRTest,
"Psi4/Psi4-1.0/dvb_sp_rhf.out": PsiSPTest_noatommasses,
"Psi4/Psi4-1.0/dvb_sp_rks.out": PsiSPTest_noatommasses,
"Psi4/Psi4-1.0/dvb_sp_rohf.out": GenericROSPTest,
"Psi4/Psi4-1.0/dvb_sp_uhf.out": GenericSPunTest,
"Psi4/Psi4-1.0/dvb_sp_uks.out": GenericSPunTest,
"Psi4/Psi4-1.0/water_ccsd(t).out": GenericCCTest,
"Psi4/Psi4-1.0/water_ccsd.out": GenericCCTest,
"Psi4/Psi4-1.0/water_mp2.out": GenericMP2Test,
"Psi4/Psi4-beta5/C_bigbasis.out": GenericBigBasisTest,
"Psi4/Psi4-beta5/dvb_gopt_hf.out": Psi4GeoOptTest,
"Psi4/Psi4-beta5/dvb_sp_hf.out": GenericBasisTest,
"Psi4/Psi4-beta5/dvb_sp_hf.out": PsiSPTest_noatommasses,
"Psi4/Psi4-beta5/dvb_sp_ks.out": GenericBasisTest,
"Psi4/Psi4-beta5/dvb_sp_ks.out": PsiSPTest_noatommasses,
"Psi4/Psi4-beta5/water_ccsd.out": GenericCCTest,
"Psi4/Psi4-beta5/water_mp2.out": GenericMP2Test,
"QChem/QChem4.2/Trp_freq.out": ReferencePolarTest,
"QChem/QChem4.2/trithiolane_polar.out": GaussianPolarTest,
"QChem/QChem4.2/trithiolane_freq.out": GaussianPolarTest,
"QChem/QChem4.4/Trp_polar_ideriv1.out": ReferencePolarTest,
"QChem/QChem4.4/Trp_polar_response.out": ReferencePolarTest,
}
def make_regression_from_old_unittest(test_class):
"""Return a regression test function from an old unit test logfile."""
def old_unit_test(logfile):
test_class.logfile = logfile
test_class.data = logfile.data
devnull = open(os.devnull, 'w')
return unittest.TextTestRunner(stream=devnull).run(unittest.makeSuite(test_class))
return old_unit_test
def test_regressions(which=[], opt_traceback=True, regdir=__regression_dir__, loglevel=logging.ERROR):
# Build a list of regression files that can be found. If there is a directory
# on the third level, then treat all files within it as one job.
try:
filenames = {}
for p in parser_names:
filenames[p] = []
pdir = os.path.join(regdir, get_program_dir(p))
for version in os.scandir(pdir):
if version.is_file():
continue
for job in os.listdir(version.path):
path = os.path.join(version.path, job)
if os.path.isdir(path):
filenames[p].append(os.path.join(path, "*"))
else:
filenames[p].append(path)
except OSError as e:
print(e)
print("\nERROR: At least one program direcory is missing.")
print("Run 'git pull' or regression_download.sh in cclib to update.")
sys.exit(1)
# This file should contain the paths to all regresssion test files we have gathered
# over the years. It is not really necessary, since we can discover them on the disk,
# but we keep it as a legacy and a way to track the regression tests.
regfile = open(os.path.join(regdir, "regressionfiles.txt"), "r")
regfilenames = [os.sep.join(x.strip().split("/")) for x in regfile.readlines()]
regfile.close()
# We will want to print a warning if you haven't downloaded all of the regression
# test files, or when, vice versa, not all of the regression test files found on disk
# are included in filenames. However, gather that data here and print the warnings
# at the end so that we test all available files and the messages are displayed
# prominently at the end.
missing_on_disk = []
missing_in_list = []
for fn in regfilenames:
if not os.path.exists(os.path.join(regdir, fn)):
missing_on_disk.append(fn)
for fn in glob.glob(os.path.join(regdir, '*', '*', '*')):
fn = fn.replace(regdir, '').strip('/')
if fn not in regfilenames:
missing_in_list.append(fn)
# Create the regression test functions from logfiles that were old unittests.
for path, test_class in old_unittests.items():
funcname = "test" + normalisefilename(path)
func = make_regression_from_old_unittest(test_class)
globals()[funcname] = func
# Gather orphaned tests - functions starting with 'test' and not corresponding
# to any regression file name.
orphaned_tests = []
for pn in parser_names:
prefix = "test%s_%s" % (pn, pn)
tests = [fn for fn in globals() if fn[:len(prefix)] == prefix]
normalized = [normalisefilename(fn.replace(__regression_dir__, '')) for fn in filenames[pn]]
orphaned = [t for t in tests if t[4:] not in normalized]
orphaned_tests.extend(orphaned)
# Assume that if a string is not a parser name it'll be a relative
# path to a specific logfile.
# TODO: filter out things that are not parsers or files, and maybe
# raise an error in that case as well.
which_parsers = [w for w in which if w in parser_names]
which_filenames = [w for w in which if w not in which_parsers]
failures = errors = total = 0
for pn in parser_names:
parser_class = eval(pn)
# Continue to next iteration if we are limiting the regression and the current
# name was not explicitely chosen (that is, passed as an argument).
if which_parsers and pn not in which_parsers:
continue;
parser_total = 0
current_filenames = filenames[pn]
current_filenames.sort()
for fname in current_filenames:
relative_path = fname[len(regdir):]
if which_filenames and relative_path not in which_filenames:
continue;
parser_total += 1
if parser_total == 1:
print("Are the %s files ccopened and parsed correctly?" % pn)
total += 1
print(" %s ..." % fname, end=" ")
# Check if there is a test (needs to be an appropriately named function).
# If not, there can also be a test that does not assume the file is
# correctly parsed (for fragments, for example), and these test need
# to be additionaly prepended with 'testnoparse'.
test_this = test_noparse = False
fname_norm = normalisefilename(fname.replace(__regression_dir__, ''))
funcname = "test" + fname_norm
test_this = funcname in globals()
funcname_noparse = "testnoparse" + fname_norm
test_noparse = not test_this and funcname_noparse in globals()
if not test_noparse:
datatype = parser_class.datatype if hasattr(parser_class, 'datatype') else ccData
job_filenames = glob.glob(fname)
try:
if len(job_filenames) == 1:
logfile = ccopen(job_filenames[0], datatype=datatype, loglevel=loglevel)
else:
logfile = ccopen(job_filenames, datatype=datatype, loglevel=loglevel)
except Exception as e:
errors += 1
print("ccopen error: ", e)
if opt_traceback:
print(traceback.format_exc())
else:
if type(logfile) == parser_class:
try:
logfile.data = logfile.parse()
except KeyboardInterrupt:
sys.exit(1)
except Exception as e:
print("parse error:", e)
errors += 1
if opt_traceback:
print(traceback.format_exc())
else:
if test_this:
try:
res = eval(funcname)(logfile)
if res and len(res.failures) > 0:
failures += len(res.failures)
print("%i test(s) failed" % len(res.failures))
if opt_traceback:
for f in res.failures:
print("Failure for", f[0])
print(f[1])
continue
elif res and len(res.errors) > 0:
errors += len(res.errors)
print("{:d} test(s) had errors".format(len(res.errors)))
if opt_traceback:
for f in res.errors:
print("Error for", f[0])
print(f[1])
continue
except AssertionError:
print("test failed")
failures += 1
if opt_traceback:
print(traceback.format_exc())
else:
print("parsed and tested")
else:
print("parsed")
else:
print("ccopen failed")
failures += 1
else:
try:
eval(funcname_noparse)(fname)
except AssertionError:
print("test failed")
failures += 1
except:
print("parse error")
errors += 1
if opt_traceback:
print(traceback.format_exc())
else:
print("test passed")
if parser_total:
print()
print("Total: %d Failed: %d Errors: %d" % (total, failures, errors))
if not opt_traceback and failures + errors > 0:
print("\nFor more information on failures/errors, add --traceback as an argument.")
# Show these warnings at the end, so that they're easy to notice. Notice that the lists
# were populated at the beginning of this function.
if len(missing_on_disk) > 0:
print("\nWARNING: You are missing %d regression file(s)." % len(missing_on_disk))
print("Run regression_download.sh in the ../data directory to update.")
print("Missing files:")
print("\n".join(missing_on_disk))
if len(missing_in_list) > 0:
print("\nWARNING: The list in 'regressionfiles.txt' is missing %d file(s)." % len(missing_in_list))
print("Add these files paths to the list and commit the change.")
print("Missing files:")
print("\n".join(missing_in_list))
if len(orphaned_tests) > 0:
print("\nWARNING: There are %d orphaned regression test functions." % len(orphaned_tests))
print("Please make sure these function names correspond to regression files:")
print("\n".join(orphaned_tests))
if failures + errors > 0:
sys.exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--traceback", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.add_argument(
"parser_or_module",
nargs="*",
help="Limit the test to the packages/parsers passed as arguments. "
"No arguments implies all parsers."
)
args = parser.parse_args()
loglevel = logging.DEBUG if args.debug else logging.ERROR
test_regressions(args.parser_or_module, args.traceback, loglevel=loglevel)
| bsd-3-clause | 8,734,415,167,729,531,000 | 38.859649 | 128 | 0.660556 | false |
sfl-drupal/drupalizer | drush.py | 1 | 6916 | # coding: utf-8
#
# Copyright (C) 2016 Savoir-faire Linux Inc. (<www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from fabric.api import task, roles, env
from fabric.contrib.console import confirm
from fabric.colors import red, green
from fabric.utils import abort
from datetime import datetime
import helpers as h
import core as c
from git import isGitDirty
@task(alias='make')
@roles('local')
def make(action='install'):
"""
Build the platform by running the Makefile specified in the local_vars.py configuration file.
"""
if env.get('always_use_pty', True):
if (isGitDirty()):
if (not confirm(red('There are warnings on status of your repositories. '
'Do you want to continue and reset all changes to remote repositories'' states?'), default=False)):
abort('Aborting "drush {}" since there might be a risk of loosing local data.'.format(action))
drush_opts = "--prepare-install " if action != 'update' else ''
# Update profile codebase
if env.site_profile and env.site_profile != '':
drush_opts += "--contrib-destination=profiles/{} ".format(env.site_profile)
h.update_profile()
if not env.get('always_use_pty', True):
drush_opts += "--translations=" + env.site_languages + " "
elif confirm(red('Say [Y] to {} the site at {} with the specified translation(s): {}. If you say [n] '
'the site will be installed in English only'.format(action, env.site_root, env.site_languages))):
drush_opts += "--translations=" + env.site_languages + " "
if env.get('always_use_pty', True):
drush_opts += " --working-copy --no-gitinfofile"
if not h.fab_exists('local', env.site_root):
h.fab_run('local', "mkdir {}".format(env.site_root))
with h.fab_cd('local', env.site_root):
h.fab_run('local', 'drush make {} {} -y'.format(drush_opts, env.makefile))
@task
@roles('local')
def aliases():
"""
Copy conf/aliases.drushrc.php in the site environment.
"""
role = 'local'
drush_aliases = env.site_drush_aliases
workspace = env.workspace
if not h.fab_exists(role, drush_aliases):
h.fab_run(role, 'mkdir {}'.format(drush_aliases))
with h.fab_cd(role, drush_aliases):
# Create aliases
if h.fab_exists(role, '{}/aliases.drushrc.php'.format(drush_aliases)):
h.fab_run(role, 'rm {}/aliases.drushrc.php'.format(drush_aliases))
h.fab_run(role, 'cp {}/conf/aliases.drushrc.php .'.format(workspace))
print(green('Drush aliases have been copied to {} directory.'.format(drush_aliases)))
@task
@roles('docker')
def updatedb():
"""
Run the available database updates. Similar to drush updatedb.
"""
role = 'docker'
with h.fab_cd(role, env.docker_site_root):
h.fab_run(role, 'drush updatedb -y')
h.hook_execute(env.hook_post_update, role)
@task
@roles('docker')
def site_install():
"""
Run the site installation procedure.
"""
role = 'docker'
site_root = env.docker_site_root
apache = env.apache_user
profile = env.site_profile
db_user = env.site_db_user
db_pass = env.site_db_pass
db_host = env.site_db_host
db_name = env.site_db_name
site_name = env.site_name
site_admin_name = env.site_admin_user
site_admin_pass = env.site_admin_pass
site_subdir = env.site_subdir
# Create first the database if necessary
h.init_db('docker')
with h.fab_cd(role, site_root):
locale = '--locale="fr"' if env.locale else ''
h.fab_run(role, 'sudo -u {} drush site-install {} {} --db-url=mysql://{}:{}@{}/{} --site-name="{}" '
'--account-name={} --account-pass={} --sites-subdir={} -y'.format(apache, profile, locale,
db_user, db_pass,
db_host, db_name, site_name,
site_admin_name,
site_admin_pass,
site_subdir))
print(green('Site installed successfully!'))
# Import db_dump if it exists.
if 'db_dump' in env and env.db_dump is not False:
c.db_import(env.db_dump, role)
h.hook_execute(env.hook_post_install, role)
@task
@roles('docker')
def archive_dump(role='docker'):
"""
Archive the platform for release or deployment.
:param role Default 'role' where to run the task
"""
with h.fab_cd(role, env.docker_site_root):
platform = '{}-{}.tar.gz'.format(env.project_name, datetime.now().strftime('%Y%m%d_%H%M%S'))
h.fab_run(role, 'rm -f {}/build/*.tar.gz'.format(env.docker_workspace))
print(green('All tar.gz archives found in {}/build have been deleted.'.format(env.docker_workspace)))
h.fab_run(
role,
'drush archive-dump --destination={}/build/{} --tags="sflinux {}" --generatorversion="2.x" '
'--generator="Drupalizer::fab drush.archive_dump" --tar-options="--exclude=.git"'
''.format(env.docker_workspace, platform, env.project_name)
)
@task
@roles('docker')
def gen_doc(role='docker'):
"""
Generate README file
:param role Default 'role' where to run the task
"""
if h.fab_exists(role, '{}/README.adoc'.format(env.docker_workspace)):
h.fab_run(role, 'asciidoctor -d book -b html5 -o {}/README.html {}/README.adoc'.
format(env.docker_workspace, env.docker_workspace))
print(green('README.html generated in {}'.format(env.docker_workspace)))
if h.fab_exists(role, '{}/CHANGELOG.adoc'.format(env.docker_workspace)):
h.fab_run(role, 'asciidoctor -d book -b html5 -o {}/CHANGELOG.html {}/CHANGELOG.adoc'.
format(env.docker_workspace, env.docker_workspace))
print(green('CHANGELOG.html generated in {}'.format(env.docker_workspace)))
| gpl-3.0 | -5,361,245,927,559,476,000 | 37 | 123 | 0.596877 | false |
floyd-fuh/afl-crash-analyzer | utilities/Logger.py | 1 | 2025 | #!/usr/bin/env python2.7
'''
AFL crash analyzer, crash triage for the American Fuzzy Lop fuzzer
Copyright (C) 2015 floyd
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Created on Apr 13, 2015
@author: floyd, http://floyd.ch, @floyd_ch
'''
import sys
class Logger():
#TODO: use curses, use colors, etc.
#min 0, max 10 (only used up to 6 atm)
debug_level = 6
#This is the setting if you want the dots to be printed so you see the program is busy
busy_inform = debug_level <= 3 and False
@staticmethod
def setDebug(level):
Logger.debug_level = level
@staticmethod
def error(*text):
print "[-] Error: "+str(" ".join(str(i) for i in text))
@staticmethod
def warning(*text):
print " [-] Warning: "+str(" ".join(str(i) for i in text))
@staticmethod
def fatal(*text):
print "[-] Fatal Error: "+str(" ".join(str(i) for i in text))
exit()
@staticmethod
def info(*text):
print "[+] "+str(" ".join(str(i) for i in text))
@staticmethod
def debug(*text, **kwargs):
level = 2
if "debug_level" in kwargs:
level = kwargs["debug_level"]
if level <= Logger.debug_level:
print " ["+"+"*level+"] "+str(" ".join(str(i) for i in text))
@staticmethod
def busy():
if Logger.busy_inform:
sys.stdout.write(".")
sys.stdout.flush() | gpl-3.0 | -3,715,117,632,580,918,000 | 35.178571 | 90 | 0.628642 | false |
Bitergia/allura | Allura/allura/ext/admin/admin_main.py | 1 | 36370 | import logging
from collections import defaultdict
from datetime import datetime
import pkg_resources
from pylons import c, g, request
from paste.deploy.converters import asbool
from tg import expose, redirect, flash, validate, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from webob import exc
from bson import ObjectId
from allura.app import Application, DefaultAdminController, SitemapEntry
from allura.lib import helpers as h
from allura import version
from allura import model as M
from allura.lib.security import has_access, require_access
from allura.lib.widgets import form_fields as ffw
from allura.lib import exceptions as forge_exc
from allura.lib import plugin
from allura.controllers import BaseController
from allura.lib.decorators import require_post
from . import widgets as aw
from allura.lib.widgets.project_list import ProjectScreenshots
log = logging.getLogger(__name__)
class W:
markdown_editor = ffw.MarkdownEdit()
label_edit = ffw.LabelEdit()
mount_delete = ffw.Lightbox(name='mount_delete',trigger='a.mount_delete')
admin_modal = ffw.Lightbox(name='admin_modal',trigger='a.admin_modal')
install_modal = ffw.Lightbox(name='install_modal',trigger='a.install_trig')
explain_export_modal = ffw.Lightbox(name='explain_export',trigger='#why_export')
group_card = aw.GroupCard()
permission_card = aw.PermissionCard()
group_settings = aw.GroupSettings()
new_group_settings = aw.NewGroupSettings()
screenshot_admin = aw.ScreenshotAdmin()
screenshot_list = ProjectScreenshots()
metadata_admin = aw.MetadataAdmin()
audit = aw.AuditLog()
page_list=ffw.PageList()
class AdminApp(Application):
'''This is the admin app. It is pretty much required for
a functioning allura project.
'''
__version__ = version.__version__
installable=False
_installable_tools = None
tool_label = 'admin'
icons={
24:'images/admin_24.png',
32:'images/admin_32.png',
48:'images/admin_48.png'
}
def __init__(self, project, config):
Application.__init__(self, project, config)
self.root = ProjectAdminController()
self.admin = AdminAppAdminController(self)
self.templates = pkg_resources.resource_filename('allura.ext.admin', 'templates')
self.sitemap = [ SitemapEntry('Admin','.')]
def is_visible_to(self, user):
'''Whether the user can view the app.'''
return has_access(c.project, 'create')(user=user)
@staticmethod
def installable_tools_for(project):
cls = AdminApp
if cls._installable_tools is None:
tools = [dict(name=k, app=v) for k,v in g.entry_points['tool'].iteritems()]
tools.sort(key=lambda t:(t['app'].status_int(), t['app'].ordinal))
cls._installable_tools = [ t for t in tools if t['app'].installable ]
return [ t for t in cls._installable_tools
if t['app'].status in project.allowed_tool_status ]
def main_menu(self):
'''Apps should provide their entries to be added to the main nav
:return: a list of :class:`SitemapEntries <allura.app.SitemapEntry>`
'''
return [SitemapEntry('Admin', '.')]
@h.exceptionless([], log)
def sidebar_menu(self):
links = []
admin_url = c.project.url()+'admin/'
if c.project.is_nbhd_project:
links.append(SitemapEntry('Add Project', c.project.url()+'add_project', ui_icon=g.icons['plus']))
nbhd_admin_url = c.project.neighborhood.url()+'_admin/'
links = links + [
SitemapEntry('Neighborhood'),
SitemapEntry('Overview', nbhd_admin_url+'overview'),
SitemapEntry('Awards', nbhd_admin_url+'accolades')]
else:
links += [SitemapEntry('Metadata', admin_url+'overview'),]
if c.project.neighborhood.name != "Users":
links += [
SitemapEntry('Screenshots', admin_url+'screenshots'),
SitemapEntry('Categorization', admin_url+'trove')
]
links.append(SitemapEntry('Tools', admin_url+'tools'))
if c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('User Permissions', admin_url+'groups/'))
if not c.project.is_root and has_access(c.project, 'admin')():
links.append(SitemapEntry('Permissions', admin_url+'permissions/'))
if len(c.project.neighborhood_invitations):
links.append(SitemapEntry('Invitation(s)', admin_url+'invitations'))
links.append(SitemapEntry('Audit Trail', admin_url+ 'audit/'))
if c.project.is_nbhd_project:
links.append(SitemapEntry('Statistics', nbhd_admin_url+ 'stats/'))
links.append(None)
links.append(SitemapEntry('Help', nbhd_admin_url+ 'help/'))
return links
def admin_menu(self):
return []
def install(self, project):
pass
class ProjectAdminController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def __init__(self):
self.permissions = PermissionsController()
self.groups = GroupsController()
self.audit = AuditController()
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_admin.html')
def index(self, **kw):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_invitations.html')
def invitations(self):
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_overview.html')
def overview(self, **kw):
c.markdown_editor = W.markdown_editor
c.metadata_admin = W.metadata_admin
c.explain_export_modal = W.explain_export_modal
show_export_control = asbool(config.get('show_export_control', False))
allow_project_delete = asbool(config.get('allow_project_delete', True))
explain_export_text = '''The purpose of this section is to determine whether your project is subject to the provisions of the
US Export Administration Regulations. You should consult section 734.4 and Supplement 2 to Part 734 for information on such items
and the calculation of U.S. controlled content.
<a href="http://www.bis.doc.gov/encryption/default.htm" target="_blank">http://www.bis.doc.gov/encryption/default.htm</a>'''
if 'us_export_contact' in config:
explain_export_text += 'If you have additional questions, please contact <a href="mailto:{contact}">{contact}</a>.'.format(
contact=config['us_export_contact']
)
return dict(show_export_control=show_export_control,
allow_project_delete=allow_project_delete,
explain_export_text=explain_export_text)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_screenshots.html')
def screenshots(self, **kw):
c.screenshot_admin = W.screenshot_admin
c.screenshot_list = W.screenshot_list
return dict()
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_trove.html')
def trove(self):
c.label_edit = W.label_edit
base_troves = M.TroveCategory.query.find(dict(trove_parent_id=0)).sort('fullname').all()
topic_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='topic')
license_trove = M.TroveCategory.query.get(trove_parent_id=0,shortname='license')
return dict(base_troves=base_troves,license_trove=license_trove,topic_trove=topic_trove)
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_tools.html')
def tools(self, **kw):
c.markdown_editor = W.markdown_editor
c.label_edit = W.label_edit
c.mount_delete = W.mount_delete
c.admin_modal = W.admin_modal
c.install_modal = W.install_modal
mounts = c.project.ordered_mounts()
return dict(
mounts=mounts,
installable_tools=AdminApp.installable_tools_for(c.project),
roles=M.ProjectRole.query.find(dict(project_id=c.project.root_project._id)).sort('_id').all(),
categories=M.ProjectCategory.query.find(dict(parent_id=None)).sort('label').all())
@expose()
@require_post()
def update_labels(self, labels=None, labels_old=None, **kw):
require_access(c.project, 'admin')
c.project.labels = labels.split(',')
M.AuditLog.log('updated labels')
redirect('trove')
@without_trailing_slash
@expose()
def clone(self,
repo_type=None, source_url=None,
mount_point=None, mount_label=None,
**kw):
require_access(c.project, 'admin')
if repo_type is None:
return (
'<form method="get">'
'<input name="repo_type" value="Git">'
'<input name="source_url">'
'<input type="submit">'
'</form>')
for ep in pkg_resources.iter_entry_points('allura', repo_type):
break
if ep is None or source_url is None:
raise exc.HTTPNotFound
h.log_action(log, 'install tool').info(
'clone repo from %s', source_url,
meta=dict(tool_type=repo_type, mount_point=mount_point, mount_label=mount_label))
c.project.install_app(
repo_type,
mount_point=mount_point,
mount_label=mount_label,
init_from_url=source_url)
M.AuditLog.log('Create repo as clone')
redirect('tools')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def groups(self, **kw):
return dict()
@expose()
def _lookup(self, name, *remainder):
app = c.project.app_instance(name)
if app is None:
raise exc.HTTPNotFound, name
return app.admin, remainder
@expose()
@require_post()
@validate(W.metadata_admin, error_handler=overview)
def update(self, name=None,
short_description=None,
summary='',
icon=None,
category=None,
external_homepage='',
support_page='',
support_page_url='',
removal='',
moved_to_url='',
export_controlled=False,
export_control_type=None,
tracking_id='',
**kw):
require_access(c.project, 'update')
if removal != c.project.removal:
M.AuditLog.log('change project removal status to %s', removal)
h.log_action(log, 'change project removal status').info('')
c.project.removal = removal
c.project.removal_changed_date = datetime.utcnow()
if 'delete_icon' in kw:
M.ProjectFile.query.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('remove project icon')
h.log_action(log, 'remove project icon').info('')
g.post_event('project_updated')
redirect('overview')
elif 'delete' in kw:
allow_project_delete = asbool(config.get('allow_project_delete', True))
if allow_project_delete or not c.project.is_root:
M.AuditLog.log('delete project')
h.log_action(log, 'delete project').info('')
plugin.ProjectRegistrationProvider.get().delete_project(c.project, c.user)
redirect('overview')
elif 'undelete' in kw:
h.log_action(log, 'undelete project').info('')
M.AuditLog.log('undelete project')
plugin.ProjectRegistrationProvider.get().undelete_project(c.project, c.user)
redirect('overview')
if name != c.project.name:
h.log_action(log, 'change project name').info('')
M.AuditLog.log('change project name to %s', name)
c.project.name = name
if short_description != c.project.short_description:
h.log_action(log, 'change project short description').info('')
M.AuditLog.log('change short description to %s', short_description)
c.project.short_description = short_description
if summary != c.project.summary:
h.log_action(log, 'change project summary').info('')
M.AuditLog.log('change summary to %s', summary)
c.project.summary = summary
category = category and ObjectId(category) or None
if category != c.project.category_id:
h.log_action(log, 'change project category').info('')
M.AuditLog.log('change category to %s', category)
c.project.category_id = category
if external_homepage != c.project.external_homepage:
h.log_action(log, 'change external home page').info('')
M.AuditLog.log('change external home page to %s', external_homepage)
c.project.external_homepage = external_homepage
if support_page != c.project.support_page:
h.log_action(log, 'change project support page').info('')
M.AuditLog.log('change project support page to %s', support_page)
c.project.support_page = support_page
if support_page_url != c.project.support_page_url:
h.log_action(log, 'change project support page url').info('')
M.AuditLog.log('change project support page url to %s', support_page_url)
c.project.support_page_url = support_page_url
if moved_to_url != c.project.moved_to_url:
h.log_action(log, 'change project moved to url').info('')
M.AuditLog.log('change project moved to url to %s', moved_to_url)
c.project.moved_to_url = moved_to_url
if export_controlled != c.project.export_controlled:
h.log_action(log, 'change project export controlled status').info('')
M.AuditLog.log('change project export controlled status to %s', export_controlled)
c.project.export_controlled = not not export_controlled
if not export_controlled:
export_control_type = None
if export_control_type != c.project.export_control_type:
h.log_action(log, 'change project export control type').info('')
M.AuditLog.log('change project export control type to %s', export_control_type)
c.project.export_control_type = export_control_type
if tracking_id != c.project.tracking_id:
h.log_action(log, 'change project tracking ID').info('')
M.AuditLog.log('change project tracking ID to %s', tracking_id)
c.project.tracking_id = tracking_id
if icon is not None and icon != '':
if c.project.icon:
M.ProjectFile.remove(dict(project_id=c.project._id, category='icon'))
M.AuditLog.log('update project icon')
M.ProjectFile.save_image(
icon.filename, icon.file, content_type=icon.type,
square=True, thumbnail_size=(48,48),
thumbnail_meta=dict(project_id=c.project._id,category='icon'))
g.post_event('project_updated')
redirect('overview')
def _add_trove(self, type, new_trove):
current_troves = getattr(c.project,'trove_%s'%type)
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(new_trove))
error_msg = None
if type in ['license','audience','developmentstatus','language'] and len(current_troves) >= 6:
error_msg = 'You may not have more than 6 of this category.'
elif type in ['topic'] and len(current_troves) >= 3:
error_msg = 'You may not have more than 3 of this category.'
elif trove_obj is not None:
if trove_obj._id not in current_troves:
current_troves.append(trove_obj._id)
g.post_event('project_updated')
else:
error_msg = 'This category has already been assigned to the project.'
return (trove_obj, error_msg)
@expose('json:')
@require_post()
def add_trove_js(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
return dict(trove_full_path = trove_obj.fullpath, trove_cat_id = trove_obj.trove_cat_id, error_msg=error_msg)
redirect('trove')
@expose()
@require_post()
def add_trove(self, type, new_trove, **kw):
require_access(c.project, 'update')
trove_obj, error_msg = self._add_trove(type, new_trove)
M.AuditLog.log('add trove %s: %s', type, trove_obj.fullpath)
if error_msg:
flash(error_msg,'error')
redirect('trove')
@expose()
@require_post()
def delete_trove(self, type, trove, **kw):
require_access(c.project, 'update')
trove_obj = M.TroveCategory.query.get(trove_cat_id=int(trove))
current_troves = getattr(c.project,'trove_%s'%type)
if trove_obj is not None and trove_obj._id in current_troves:
M.AuditLog.log('remove trove %s: %s', type, trove_obj.fullpath)
current_troves.remove(trove_obj._id)
g.post_event('project_updated')
redirect('trove')
@expose()
@require_post()
@validate(W.screenshot_admin)
def add_screenshot(self, screenshot=None, caption=None, **kw):
require_access(c.project, 'update')
if len(c.project.get_screenshots()) >= 6:
flash('You may not have more than 6 screenshots per project.','error')
elif screenshot is not None and screenshot != '':
M.AuditLog.log('add screenshot')
M.ProjectFile.save_image(
screenshot.filename, screenshot.file, content_type=screenshot.type,
save_original=True,
original_meta=dict(project_id=c.project._id,category='screenshot',caption=caption),
square=True, thumbnail_size=(150,150),
thumbnail_meta=dict(project_id=c.project._id,category='screenshot_thumb'))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def delete_screenshot(self, id=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.AuditLog.log('remove screenshot')
M.ProjectFile.query.remove(dict(project_id=c.project._id, _id=ObjectId(id)))
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def edit_screenshot(self, id=None, caption=None, **kw):
require_access(c.project, 'update')
if id is not None and id != '':
M.ProjectFile.query.get(project_id=c.project._id, _id=ObjectId(id)).caption=caption
g.post_event('project_updated')
redirect('screenshots')
@expose()
@require_post()
def join_neighborhood(self, nid):
require_access(c.project, 'admin')
if not nid:
n = M.Neighborhood.query.get(name='Projects')
c.project.neighborhood_id = n._id
flash('Joined %s' % n.name)
redirect(c.project.url() + 'admin/')
nid = ObjectId(str(nid))
if nid not in c.project.neighborhood_invitations:
flash('No invitation to that neighborhood', 'error')
redirect('.')
c.project.neighborhood_id = nid
n = M.Neighborhood.query.get(_id=nid)
flash('Joined %s' % n.name)
redirect('invitations')
@h.vardec
@expose()
@require_post()
def update_mount_order(self, subs=None, tools=None, **kw):
if subs:
for sp in subs:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
p.ordinal = int(sp['ordinal'])
if tools:
for p in tools:
c.project.app_config(p['mount_point']).options.ordinal = int(p['ordinal'])
redirect('tools')
@h.vardec
@expose()
@require_post()
def update_mounts(self, subproject=None, tool=None, new=None, **kw):
if subproject is None: subproject = []
if tool is None: tool = []
for sp in subproject:
p = M.Project.query.get(shortname=sp['shortname'],
neighborhood_id=c.project.neighborhood_id)
if sp.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('delete subproject %s', sp['shortname'])
h.log_action(log, 'delete subproject').info(
'delete subproject %s', sp['shortname'],
meta=dict(name=sp['shortname']))
p.removal = 'deleted'
plugin.ProjectRegistrationProvider.get().delete_project(p, c.user)
elif not new:
M.AuditLog.log('update subproject %s', sp['shortname'])
p.name = sp['name']
p.ordinal = int(sp['ordinal'])
for p in tool:
if p.get('delete'):
require_access(c.project, 'admin')
M.AuditLog.log('uninstall tool %s', p['mount_point'])
h.log_action(log, 'uninstall tool').info(
'uninstall tool %s', p['mount_point'],
meta=dict(mount_point=p['mount_point']))
c.project.uninstall_app(p['mount_point'])
elif not new:
M.AuditLog.log('update tool %s', p['mount_point'])
options = c.project.app_config(p['mount_point']).options
options.mount_label = p['mount_label']
options.ordinal = int(p['ordinal'])
try:
if new and new.get('install'):
ep_name = new.get('ep_name', None)
if not ep_name:
require_access(c.project, 'create')
mount_point = new['mount_point'].lower() or h.nonce()
M.AuditLog.log('create subproject %s', mount_point)
h.log_action(log, 'create subproject').info(
'create subproject %s', mount_point,
meta=dict(mount_point=mount_point,name=new['mount_label']))
sp = c.project.new_subproject(mount_point)
sp.name = new['mount_label']
sp.ordinal = int(new['ordinal'])
else:
require_access(c.project, 'admin')
mount_point = new['mount_point'].lower() or ep_name.lower()
M.AuditLog.log('install tool %s', mount_point)
h.log_action(log, 'install tool').info(
'install tool %s', mount_point,
meta=dict(tool_type=ep_name, mount_point=mount_point, mount_label=new['mount_label']))
c.project.install_app(ep_name, mount_point, mount_label=new['mount_label'], ordinal=new['ordinal'])
except forge_exc.ForgeError, exc:
flash('%s: %s' % (exc.__class__.__name__, exc.args[0]),
'error')
g.post_event('project_updated')
redirect('tools')
class PermissionsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_permissions.html')
def index(self, **kw):
c.card = W.permission_card
return dict(permissions=self._index_permissions())
@without_trailing_slash
@expose()
@h.vardec
@require_post()
def update(self, card=None, **kw):
permissions = self._index_permissions()
old_permissions = dict(permissions)
for args in card:
perm = args['id']
new_group_ids = args.get('new', [])
group_ids = args.get('value', [])
if isinstance(new_group_ids, basestring):
new_group_ids = [ new_group_ids ]
if isinstance(group_ids, basestring):
group_ids = [ group_ids ]
# make sure the admin group has the admin permission
if perm == 'admin':
if c.project.is_root:
pid = c.project._id
else:
pid = c.project.parent_id
admin_group_id = str(M.ProjectRole.query.get(project_id=pid, name='Admin')._id)
if admin_group_id not in group_ids + new_group_ids:
flash('You cannot remove the admin group from the admin permission.','warning')
group_ids.append(admin_group_id)
permissions[perm] = []
role_ids = map(ObjectId, group_ids + new_group_ids)
permissions[perm] = role_ids
c.project.acl = []
for perm, role_ids in permissions.iteritems():
role_names = lambda ids: ','.join(sorted(
pr.name for pr in M.ProjectRole.query.find(dict(_id={'$in':ids}))))
old_role_ids = old_permissions.get(perm, [])
if old_role_ids != role_ids:
M.AuditLog.log('updated "%s" permissions: "%s" => "%s"',
perm,role_names(old_role_ids), role_names(role_ids))
c.project.acl += [M.ACE.allow(rid, perm) for rid in role_ids]
g.post_event('project_updated')
redirect('.')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
class GroupsController(BaseController):
def _check_security(self):
require_access(c.project, 'admin')
def _index_permissions(self):
permissions = dict(
(p,[]) for p in c.project.permissions)
for ace in c.project.acl:
if ace.access == M.ACE.ALLOW:
permissions[ace.permission].append(ace.role_id)
return permissions
def _map_group_permissions(self):
roles = c.project.named_roles
permissions=self._index_permissions()
permissions_by_role = dict()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
for role in roles+[auth_role, anon_role]:
permissions_by_role[str(role._id)] = []
for perm in permissions:
perm_info = dict(has="no", text="Does not have permission %s" % perm, name=perm)
role_ids = permissions[perm]
if role._id in role_ids:
perm_info['text'] = "Has permission %s" % perm
perm_info['has'] = "yes"
else:
for r in role.child_roles():
if r._id in role_ids:
perm_info['text'] = "Inherited permission %s from %s" % (perm, r.name)
perm_info['has'] = "inherit"
break
if perm_info['has'] == "no":
if anon_role._id in role_ids:
perm_info['text'] = "Inherited permission %s from Anonymous" % perm
perm_info['has'] = "inherit"
elif auth_role._id in role_ids and role != anon_role:
perm_info['text'] = "Inherited permission %s from Authenticated" % perm
perm_info['has'] = "inherit"
permissions_by_role[str(role._id)].append(perm_info)
return permissions_by_role
@without_trailing_slash
@expose()
@h.vardec
def delete_group(self, group_name, **kw):
role = M.ProjectRole.by_name(group_name)
if not role:
flash('Group "%s" does not exist.' % group_name, 'error')
else:
role.delete()
M.AuditLog.log('delete group %s', group_name)
flash('Group "%s" deleted successfully.' % group_name)
g.post_event('project_updated')
redirect('.')
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_groups.html')
def index(self, **kw):
c.admin_modal = W.admin_modal
c.card = W.group_card
permissions_by_role = self._map_group_permissions()
auth_role = M.ProjectRole.authenticated()
anon_role = M.ProjectRole.anonymous()
roles = c.project.named_roles
roles.append(None)
return dict(roles=roles, permissions_by_role=permissions_by_role,
auth_role=auth_role, anon_role=anon_role)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def change_perm(self, role_id, permission, allow="true", **kw):
if allow=="true":
M.AuditLog.log('granted permission %s to group with id %s', permission, role_id)
c.project.acl.append(M.ACE.allow(ObjectId(role_id), permission))
else:
admin_group_id = str(M.ProjectRole.by_name('Admin')._id)
if admin_group_id == role_id and permission == 'admin':
return dict(error='You cannot remove the admin permission from the admin group.')
M.AuditLog.log('revoked permission %s from group with id %s', permission, role_id)
c.project.acl.remove(M.ACE.allow(ObjectId(role_id), permission))
g.post_event('project_updated')
return self._map_group_permissions()
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def add_user(self, role_id, username, **kw):
if not username or username=='*anonymous':
return dict(error='You must choose a user to add.')
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id in user.project_role().roles:
return dict(error='%s (%s) is already in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
g.post_event('project_updated')
return dict(username=username, displayname=user.display_name)
@without_trailing_slash
@expose('json:')
@require_post()
@h.vardec
def remove_user(self, role_id, username, **kw):
group = M.ProjectRole.query.get(_id=ObjectId(role_id))
user = M.User.by_username(username.strip())
if group.name == 'Admin' and len(group.users_with_role()) == 1:
return dict(error='You must have at least one user with the Admin role.')
if not group:
return dict(error='Could not find group with id %s' % role_id)
if not user:
return dict(error='User %s not found' % username)
if group._id not in user.project_role().roles:
return dict(error='%s (%s) is not in the group %s.' % (user.display_name, username, group.name))
M.AuditLog.log('remove user %s from %s', username, group.name)
user.project_role().roles.remove(group._id)
g.post_event('project_updated')
return dict()
@without_trailing_slash
@expose()
@require_post()
@h.vardec
def update(self, card=None, **kw):
for pr in card:
group = M.ProjectRole.query.get(_id=ObjectId(pr['id']))
assert group.project == c.project, 'Security violation'
user_ids = pr.get('value', [])
new_users = pr.get('new', [])
if isinstance(user_ids, basestring):
user_ids = [ user_ids ]
if isinstance(new_users, basestring):
new_users = [ new_users ]
# Handle new users in groups
user_added = False
for username in new_users:
user = M.User.by_username(username.strip())
if not user:
flash('User %s not found' % username, 'error')
redirect('.')
if not user._id:
continue # never add anon users to groups
M.AuditLog.log('add user %s to %s', username, group.name)
user.project_role().roles.append(group._id)
user_added = True
# Make sure we aren't removing all users from the Admin group
if group.name == u'Admin' and not (user_ids or user_added):
flash('You must have at least one user with the Admin role.',
'warning')
redirect('.')
# Handle users removed from groups
user_ids = set(
uid and ObjectId(uid)
for uid in user_ids)
for role in M.ProjectRole.query.find(dict(user_id={'$ne':None}, roles=group._id)):
if role.user_id and role.user_id not in user_ids:
role.roles = [ rid for rid in role.roles if rid != group._id ]
M.AuditLog.log('remove user %s from %s', role.user.username, group.name)
g.post_event('project_updated')
redirect('.')
@without_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def new(self):
c.form = W.new_group_settings
return dict(
group=None,
show_settings=True,
action="create")
@expose()
@require_post()
@validate(W.new_group_settings)
@h.vardec
def create(self, name=None, **kw):
if M.ProjectRole.by_name(name):
flash('%s already exists' % name, 'error')
else:
M.ProjectRole(project_id=c.project._id, name=name)
M.AuditLog.log('create group %s', name)
g.post_event('project_updated')
redirect('.')
@expose()
def _lookup(self, name, *remainder):
return GroupController(name), remainder
class GroupController(BaseController):
def __init__(self, name):
self._group = M.ProjectRole.query.get(_id=ObjectId(name))
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/project_group.html')
def index(self):
if self._group.name in ('Admin', 'Developer', 'Member'):
show_settings = False
action = None
else:
show_settings = True
action = self._group.settings_href + 'update'
c.form = W.group_settings
return dict(
group=self._group,
show_settings=show_settings,
action=action)
@expose()
@h.vardec
@require_post()
@validate(W.group_settings)
def update(self, _id=None, delete=None, name=None, **kw):
pr = M.ProjectRole.by_name(name)
if pr and pr._id != _id._id:
flash('%s already exists' % name, 'error')
redirect('..')
if delete:
_id.delete()
M.AuditLog.log('delete group %s', _id.name)
flash('%s deleted' % name)
redirect('..')
M.AuditLog.log('update group name %s=>%s', _id.name, name)
_id.name = name
flash('%s updated' % name)
redirect('..')
class AuditController(BaseController):
@with_trailing_slash
@expose('jinja:allura.ext.admin:templates/audit.html')
def index(self, limit=10, page=0, **kwargs):
limit = int(limit)
page = int(page)
count = M.AuditLog.query.find(dict(project_id=c.project._id)).count()
q = M.AuditLog.query.find(dict(project_id=c.project._id))
q = q.sort('timestamp', -1)
q = q.skip(page * limit)
if count > limit:
q = q.limit(limit)
else:
limit=count
c.widget = W.audit
return dict(
entries=q.all(),
limit=limit,
page=page,
count=count)
class AdminAppAdminController(DefaultAdminController):
'''Administer the admin app'''
pass
| apache-2.0 | -1,951,223,242,091,528,700 | 42.143535 | 137 | 0.581743 | false |
bat-serjo/vivisect | vqt/application.py | 1 | 6519 | import os
import logging
from PyQt5 import QtCore
from PyQt5.QtWidgets import *
import vqt.cli as vq_cli
import vqt.main as vq_main
import vqt.saveable as vq_save
import vqt.hotkeys as vq_hotkeys
import vqt.menubuilder as vq_menu
from vqt.saveable import compat_isNone
logger = logging.getLogger(__name__)
class VQDockWidget(vq_hotkeys.HotKeyMixin, QDockWidget):
def __init__(self, parent):
QDockWidget.__init__(self, parent)
vq_hotkeys.HotKeyMixin.__init__(self)
self.addHotKey('ctrl+enter', 'mem:undockmaximize')
self.addHotKeyTarget('mem:undockmaximize', self._hotkey_undock_maximize)
self.setAllowedAreas(QtCore.Qt.AllDockWidgetAreas)
def vqSaveState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqSaveState(settings, name, stub)
def vqRestoreState(self, settings, name, stub=''):
wid = self.widget()
if isinstance(wid, vq_save.SaveableWidget):
return wid.vqRestoreState(settings, name, stub)
def setWidget(self, widget):
# If he sets his window title, we want to...
self.setWindowTitle(widget.windowTitle())
widget.setWindowTitle = self.setWindowTitle
QDockWidget.setWidget(self, widget)
def closeEvent(self, event):
self.hide()
w = self.widget()
w.setParent(None)
w.close()
self.parent().vqRemoveDockWidget(self)
event.accept()
def _hotkey_undock_maximize(self):
# if docked, undock
if not self.isFloating():
self.setFloating(1)
# if not maximized, maximize
if not self.isMaximized():
self.showMaximized()
else:
# else dock
self.showNormal()
self.setFloating(False)
else:
# else dock
self.showNormal()
self.setFloating(False)
self.show()
self.raise_()
class VQMainCmdWindow(vq_hotkeys.HotKeyMixin, QMainWindow):
'''
A base class for application window's to inherit from.
'''
__cli_widget_class__ = vq_cli.VQCli
def __init__(self, appname, cmd, **kwargs):
super(QMainWindow, self).__init__(**kwargs)
vq_hotkeys.HotKeyMixin.__init__(self)
self._vq_appname = appname
self._vq_dockwidgets = []
self._vq_settings = QtCore.QSettings('invisigoth', application=appname, parent=self)
self._vq_histfile = os.path.join(os.path.expanduser('~'), '.%s_history' % appname)
self._dock_classes = {}
self.vqInitDockWidgetClasses()
self._vq_mbar = vq_menu.VQMenuBar()
self.setMenuBar(self._vq_mbar)
# AnimatedDocks, AllowNestedDocks, AllowTabbedDocks, ForceTabbedDocks, VerticalTabs
self.setDockOptions(self.AnimatedDocks | self.AllowTabbedDocks)
self._vq_cli = self.__cli_widget_class__(cmd)
self._vq_cli.input.loadHistory(self._vq_histfile)
self._vq_cli.sigCliQuit.connect( self.close )
self.setCentralWidget(self._vq_cli)
self.vqRestoreGuiSettings(self._vq_settings)
def vqAddMenuField(self, fname, callback, args=()):
self._vq_mbar.addField(fname, callback, args=args)
def vqAddDynMenu(self, fname, callback):
self._vq_mbar.addDynMenu(fname, callback)
def vqInitDockWidgetClasses(self):
# apps can over-ride
pass
def vqAddDockWidgetClass(self, cls, args=()):
self._dock_classes[cls.__name__] = (cls, args)
def vqBuildDockWidget(self, clsname, floating=False, area=QtCore.Qt.TopDockWidgetArea):
res = self._dock_classes.get(clsname)
if res is None:
logger.error('vqBuildDockWidget Failed For: %s', clsname)
return
cls, args = res
obj = cls(*args)
return self.vqDockWidget(obj, area, floating=floating), obj
def vqRestoreGuiSettings(self, settings, stub=''):
dwcls = settings.value('DockClasses')
if not compat_isNone(dwcls):
for i, clsname in enumerate(dwcls):
name = 'VQDockWidget%d' % i
try:
tup = self.vqBuildDockWidget(str(clsname), floating=False)
if tup is not None:
d, obj = tup
d.setObjectName(name)
d.vqRestoreState(settings, name, stub)
d.show()
except Exception as e:
logger.error('Error Building: %s: %s', clsname, e)
# Once dock widgets are loaded, we can restoreState
state = settings.value('DockState')
if not compat_isNone(state):
self.restoreState(state)
geom = settings.value('DockGeometry')
if not compat_isNone(geom):
self.restoreGeometry(geom)
# Just get all the resize activities done...
vq_main.eatevents()
for w in self.vqGetDockWidgets():
w.show()
return True
def vqSaveGuiSettings(self, settings, stub=''):
dock_classes = []
# Enumerate the current dock windows and set
# their names by their list order...
for i, w in enumerate(self.vqGetDockWidgets()):
widget = w.widget()
dock_classes.append(widget.__class__.__name__)
name = 'VQDockWidget%d' % i
w.setObjectName(name)
w.vqSaveState(settings,name,stub)
settings.setValue('DockClasses', dock_classes)
settings.setValue('DockGeometry', self.saveGeometry())
settings.setValue('DockState', self.saveState())
def closeEvent(self, event):
self.vqSaveGuiSettings(self._vq_settings)
self._vq_cli.input.saveHistory(self._vq_histfile)
QMainWindow.closeEvent(self, event)
def vqGetDockWidgets(self):
return list(self._vq_dockwidgets)
def vqClearDockWidgets(self):
for wid in self.vqGetDockWidgets():
wid.close()
def vqRemoveDockWidget(self, widget):
self._vq_dockwidgets.remove(widget)
self.removeDockWidget(widget)
def vqDockWidget(self, widget, area=QtCore.Qt.TopDockWidgetArea, floating=False):
d = VQDockWidget(self)
d.setWidget(widget)
d.setFloating(floating)
self.addDockWidget(area, d)
self._vq_dockwidgets.append(d)
self.restoreDockWidget(d)
d.show()
return d
| apache-2.0 | -1,507,048,536,738,605,300 | 31.272277 | 92 | 0.609296 | false |
ioam/param | tests/API1/testparamdepends.py | 1 | 2200 | """
Unit test for param.depends.
"""
import param
from . import API1TestCase
class TestParamDepends(API1TestCase):
def setUp(self):
class P(param.Parameterized):
a = param.Parameter()
b = param.Parameter()
@param.depends('a')
def single_parameter(self):
pass
@param.depends('a:constant')
def constant(self):
pass
@param.depends('a.param')
def nested(self):
pass
self.P = P
def test_param_depends_instance(self):
p = self.P()
pinfos = p.param.params_depended_on('single_parameter')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, p)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
def test_param_depends_class(self):
pinfos = self.P.param.params_depended_on('single_parameter')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, None)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
def test_param_depends_constant(self):
pinfos = self.P.param.params_depended_on('constant')
self.assertEqual(len(pinfos), 1)
pinfo = pinfos[0]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, None)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'constant')
def test_param_depends_nested(self):
inst = self.P(a=self.P())
pinfos = inst.param.params_depended_on('nested')
self.assertEqual(len(pinfos), 4)
pinfos = {(pi.inst, pi.name): pi for pi in pinfos}
pinfo = pinfos[(inst, 'a')]
self.assertIs(pinfo.cls, self.P)
self.assertIs(pinfo.inst, inst)
self.assertEqual(pinfo.name, 'a')
self.assertEqual(pinfo.what, 'value')
for p in ['name', 'a', 'b']:
info = pinfos[(inst.a, p)]
self.assertEqual(info.name, p)
self.assertIs(info.inst, inst.a)
| bsd-3-clause | 8,995,908,249,948,755,000 | 29.555556 | 68 | 0.572727 | false |
google/eclipse2017 | scripts/get_user_ids.py | 1 | 1785 | #
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print user ids matching email addresses."""
import argparse
from google.cloud import datastore
import common.service_account as sa
DEFAULT_PROJECT_ID = 'eclipse-2017-test-147301'
DEFAULT_EMAIL_ADDRESS_FILE = 'email_addresses.txt'
def get_arguments():
parser = argparse.ArgumentParser(description='Print user ids matching email addresses.')
parser.add_argument('--project_id', type=str, default=DEFAULT_PROJECT_ID)
parser.add_argument('--email_address_file', type=str, default=DEFAULT_EMAIL_ADDRESS_FILE)
return parser.parse_args()
def main():
args = get_arguments()
client = datastore.Client(project=args.project_id)
addresses = [address.strip() for address in open(args.email_address_file).readlines()]
# Can't find a way to query a collection of records matching different email addresses.
for email in addresses:
query = client.query(kind="User")
query.add_filter('email', '=', email)
entities = query.fetch()
l = list(entities)
if l == []:
print "No match for", email
else:
for entity in l:
print entity.key.name, entity['email']
if __name__ == '__main__':
main()
| apache-2.0 | 7,845,751,704,223,151,000 | 34.7 | 93 | 0.693557 | false |
sebinthomas/pyvarnam | pyvarnam/varnam_defs.py | 1 | 3346 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Varnam library functions list
"""
# Varnam library functions list
from .utils import *
import ctypes as C
#REMINDER: Change this for every major release of varnam
LIBVARNAM_MAJOR_VERSION = 3
VARNAM_PATHS = ['','..','/usr/local/lib', '/usr/local/lib/i386-linux-gnu', '/usr/local/lib/x86_64-linux-gnu', '/usr/lib/i386-linux-gnu', '/usr/lib/x86_64-linux-gnu', '/usr/lib']
VARNAM_NAMES = ['libvarnam.so', "libvarnam.so.{0}".format(LIBVARNAM_MAJOR_VERSION), 'libvarnam.dylib', 'varnam.dll']
class VarnamHandle(C.Structure):
_fields_ = [('scheme_file', STRING),
('suggestions_file', STRING),
('internal', VOID)]
VARNAM_PTR = C.POINTER(VarnamHandle)
class Varray(C.Structure):
_fields_ = [('memory', C.POINTER(VOID)),
('allocated', C.c_size_t),
('used', C.c_size_t),
('index', INT)]
VARRAY_PTR = C.POINTER(Varray)
class VlearnStatus(C.Structure):
_fields_ = [('total_words', INT),
('failed', INT)]
VLEARN_STATUS_PTR = C.POINTER(VlearnStatus)
#TODO: do we need this ?
class Token(C.Structure):
_fields_ = [('id', INT),
('type', INT),
('match_type', INT),
('priority', INT),
('accept_condition', INT),
('flags', INT),
('tag', STRING),
('pattern', STRING),
('value1', STRING),
('value2', STRING),
('value3', STRING)]
class Word(C.Structure):
_fields_ = [('text', STRING),
('confidence', INT)]
FUNCTION_LIST = [
['varnam_init', [STRING, C.POINTER(VARNAM_PTR), C.POINTER(STRING)], INT],
['varnam_init_from_id', [STRING, C.POINTER(VARNAM_PTR), C.POINTER(STRING)], INT],
['varnam_version', [], STRING],
['varnam_transliterate', [VARNAM_PTR, STRING, C.POINTER(VARRAY_PTR)], INT],
['varnam_reverse_transliterate', [VARNAM_PTR, STRING, C.POINTER(STRING)], INT],
['varnam_detect_lang', [VARNAM_PTR, STRING], INT],
['varnam_learn', [VARNAM_PTR, STRING], INT],
['varnam_train', [VARNAM_PTR, STRING, STRING], INT],
['varnam_learn_from_file', [VARNAM_PTR, STRING, VLEARN_STATUS_PTR, VOID, VOID], INT],
['varnam_create_token', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING, INT, INT, INT, INT, INT], INT],
['varnam_set_scheme_details', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING], INT],
['varnam_get_last_error', [VARNAM_PTR], STRING],
['varnam_flush_buffer', [VARNAM_PTR], INT],
['varnam_config', [], INT],
['varnam_get_all_tokens', [VARNAM_PTR, INT, C.POINTER(VARRAY_PTR)], INT],
['varray_get', [VARRAY_PTR, INT], VOID],
['varray_length', [VARRAY_PTR], INT],
['varnam_export_words', [VARNAM_PTR, INT, STRING, INT, VOID], INT],
['varnam_import_learnings_from_file', [VARNAM_PTR, STRING, VOID], INT],
['varnam_destroy', [VARNAM_PTR], VOID],
['varnam_get_scheme_file', [VARNAM_PTR], STRING],
['varnam_get_suggestions_file', [VARNAM_PTR], STRING],
['varnam_create_token', [VARNAM_PTR, STRING, STRING, STRING, STRING, STRING, INT, INT, INT, INT, INT], INT],
['varnam_config']]
# TODO: varnam_learn_from_file uses a callback. So does some other function.
# TODO: varnam_config uses a varargs function.
| mit | 1,290,528,990,029,869,300 | 37.906977 | 177 | 0.59205 | false |
rgayon/plaso | plaso/output/shared_dsv.py | 1 | 4484 | # -*- coding: utf-8 -*-
"""Shared functionality for delimiter separated values output modules."""
from __future__ import unicode_literals
from plaso.output import formatting_helper
from plaso.output import interface
class DSVEventFormattingHelper(formatting_helper.EventFormattingHelper):
"""Delimiter separated values output module event formatting helper."""
def __init__(
self, output_mediator, field_formatting_helper, field_names,
field_delimiter=','):
"""Initializes a delimiter separated values event formatting helper.
Args:
output_mediator (OutputMediator): output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
field_names (list[str]): names of the fields to output.
field_delimiter (Optional[str]): field delimiter.
"""
super(DSVEventFormattingHelper, self).__init__(output_mediator)
self._field_delimiter = field_delimiter
self._field_names = field_names
self._field_formatting_helper = field_formatting_helper
def _SanitizeField(self, field):
"""Sanitizes a field for output.
This method replaces any field delimiters with a space.
Args:
field (str): value of the field to sanitize.
Returns:
str: sanitized value of the field.
"""
if self._field_delimiter and isinstance(field, str):
return field.replace(self._field_delimiter, ' ')
return field
def GetFormattedEvent(self, event, event_data, event_data_stream, event_tag):
"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
Returns:
str: string representation of the event.
"""
field_values = []
for field_name in self._field_names:
field_value = self._field_formatting_helper.GetFormattedField(
field_name, event, event_data, event_data_stream, event_tag)
field_value = self._SanitizeField(field_value)
field_values.append(field_value)
return self._field_delimiter.join(field_values)
def GetFormattedFieldNames(self):
"""Retrieves a string representation of the field names.
Returns:
str: string representation of the field names.
"""
return self._field_delimiter.join(self._field_names)
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._field_delimiter = field_delimiter
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._field_names = field_names
class DSVOutputModule(interface.LinearOutputModule):
"""Shared functionality for delimiter separated values output modules."""
def __init__(
self, output_mediator, field_formatting_helper, names, delimiter=',',
header=None):
"""Initializes a delimiter separated values output module.
Args:
output_mediator (OutputMediator): an output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
names (list[str]): names of the fields to output.
delimiter (Optional[str]): field delimiter.
header (Optional[str]): header, where None will have WriteHeader
generate a header from the field names.
"""
event_formatting_helper = DSVEventFormattingHelper(
output_mediator, field_formatting_helper, names,
field_delimiter=delimiter)
super(DSVOutputModule, self).__init__(
output_mediator, event_formatting_helper)
self._header = header
def SetFieldDelimiter(self, field_delimiter):
"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""
self._event_formatting_helper.SetFieldDelimiter(field_delimiter)
def SetFields(self, field_names):
"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""
self._event_formatting_helper.SetFields(field_names)
def WriteHeader(self):
"""Writes the header to the output."""
if self._header:
output_text = self._header
else:
output_text = self._event_formatting_helper.GetFormattedFieldNames()
output_text = '{0:s}\n'.format(output_text)
self._output_writer.Write(output_text)
| apache-2.0 | -6,534,213,863,833,684,000 | 31.492754 | 79 | 0.690455 | false |
ysasaki6023/NeuralNetworkStudy | cifar04/net.py | 1 | 3132 | #!/usr/bin/env python
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer.utils import conv
class ImageProcessNetwork(chainer.Chain):
def __init__(self,
I_colors, I_Xunit, I_Yunit, F_unit,
N_PLayers = 4,
P0C_feature = 32,
P1C_feature = 32,
P2C_feature = 16,
P0C_filter = 3,
P1C_filter = 3,
P2C_filter = 3,
P0P_ksize = 2,
P1P_ksize = 2,
P2P_ksize = 2,
L1_dropout = 0.5,
L2_dropout = 0.0,
L2_unit = 500):
super(ImageProcessNetwork, self).__init__()
self.IsTrain = True
self.NPLayers = N_PLayers
self.NFeatures = [I_colors]
self.NFilter = [1]
self.NKsize = [1]
self.NImgPix = [(I_Xunit,I_Yunit)]
self.L1_dropout = L1_dropout
self.L2_dropout = L2_dropout
self.L2_unit = L2_unit
for iL in range(self.NPLayers):
## Set Variables
self.NFeatures.append(self.gradualVariable(iL,self.NPLayers,P0C_feature,P1C_feature,P2C_feature))
self.NFilter.append( self.gradualVariable(iL,self.NPLayers,P0C_filter ,P1C_filter ,P2C_filter ))
self.NKsize.append( self.gradualVariable(iL,self.NPLayers,P0P_ksize ,P1P_ksize ,P2P_ksize ))
## Update layers
self.NImgPix.append(
( conv.get_conv_outsize( self.NImgPix[-1][0], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True),
conv.get_conv_outsize( self.NImgPix[-1][1], self.NKsize[-1], self.NKsize[-1], 0, cover_all = True)))
self.add_link("P%d"%iL,L.Convolution2D( self.NFeatures[-2], self.NFeatures[-1],
self.NFilter[-1] , pad=int(self.NFilter[-1]/2.)))
self.add_link("L1",L.Linear( self.NImgPix[-1][0] * self.NImgPix[-1][1] * self.NFeatures[-1] , L2_unit))
self.add_link("L2",L.Linear( L2_unit, F_unit))
return
def gradualVariable(self, cLayer, tLayer, val0, val1, val2):
pos = 0.5
if cLayer <= int(pos*tLayer): v0, v1, p0, p1, pc = val0, val1, 0, int(pos*tLayer), int( cLayer - 0 )
else : v0, v1, p0, p1, pc = val1, val2, int(pos*tLayer), tLayer-1, int( cLayer - int(pos*tLayer))
return int(float(v0) + (float(v1)-float(v0))/(float(p1)-float(p0))*float(pc))
def setTrainMode(self, IsTrain):
self.IsTrain = IsTrain
return
def __call__(self, x):
h = x
for iL in range(self.NPLayers):
h = self.__dict__["P%d"%iL](h)
h = F.local_response_normalization(h)
h = F.max_pooling_2d(F.relu(h), ksize=self.NKsize[iL+1], cover_all=True)
h = F.dropout(F.relu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
h = F.dropout(F.relu(self.L2(h)),ratio=self.L2_dropout,train=self.IsTrain)
y = h
return y
| mit | 2,948,105,231,894,065,000 | 42.5 | 128 | 0.529055 | false |
caltech-chimera/pychimera | scripts/multiphot.py | 1 | 9783 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python fastphot.py [options] image coords
Authors:
Navtej Saini, Lee Rosenthal
Organization:
Caltech, Pasadena, CA, USA
Version:
7 January 2016 0.1 Initial implementation
9 February 2016 0.2 User input for photometric zero point
28 July 2017 0.3 Allow processing of multiple stars.
--------------------------------------------------------------------------
"""
import os, sys
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def process(infile, coords, method, inner_radius, outer_radius, cen_method, window_size, output, zmag):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
method : string
FWHM of the stelar psf in pixels
inner_radius : float
Sky background sigma
outer_radius : int
Inner sky annulus radius in pixels
cen_method : string
Centroid method
window_size : int
Centroid finding window size in pixels
output : string
Output file name
zmag : float
Photometric zero point
Returns
-------
None
"""
print "FASTPHOT: CHIMERA Fast Aperture Photometry Routine"
inner_radius = float(inner_radius)
outer_radius = float(outer_radius)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
print "REGISTER: Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
pos = np.loadtxt(coords, ndmin = 2)
nstars = len(pos)
total_phot_data = []
for i in range(ncubes):
sci_file = image_cubes[i]
print " Processing science image %s" %sci_file
# Read FITS image and star coordinate
image = chimera.fitsread(sci_file)
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus, dannulus and zmag
ap.method = method
ap.inner_radius = inner_radius
ap.outer_radius = outer_radius
if zmag != "":
ap.zmag = float(zmag)
# Determine nominal aperture radius for photometry
if i == 0:
nom_aper = ap.cog(window_size, cen_method)
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("MSKY", "f8"),("NSKY", "f8"),("AREA", "f8"),("FLUX_ADU", "f8"),("FLUX_ELEC", "f8"),("FERR", "f8"),("MAG", "f8")]
phot_data = np.zeros([nstars, ap.nframes], dtype = dtype)
for j in range(ap.nframes):
print " Processing frame number : %d" %(j+1)
objpos = chimera.recenter(image[j,:,:], pos, window_size, cen_method)
aperphot_data = ap.phot(image[j,:,:], objpos, nom_aper)
pos = np.copy(objpos)
phot_data[:,j]['DATETIME'] = ap.addtime(j * ap.kintime).isoformat()
phot_data[:,j]['XCEN'] = aperphot_data["xcenter_raw"]
phot_data[:,j]['YCEN'] = aperphot_data["ycenter_raw"]
phot_data[:,j]['MSKY'] = aperphot_data["msky"]
phot_data[:,j]['NSKY'] = aperphot_data["nsky"]
phot_data[:,j]['AREA'] = aperphot_data["area"]
phot_data[:,j]['FLUX_ADU'] = aperphot_data["flux"]
phot_data[:,j]['FLUX_ELEC'] = phot_data[:,j]['FLUX_ADU'] * ap.epadu
phot_data[:,j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[:,j]['FLUX_ELEC']/ap.exptime)
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[:,j]['FERR'] = np.sqrt(phot_data[:,j]['FLUX_ELEC'] + phot_data[:,j]['AREA'] * (1 + phot_data[j]['AREA']/phot_data[j]['NSKY']) * (phot_data[j]['MSKY'] * ap.epadu + ap.readnoise**2))
total_phot_data.append(phot_data)
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = sci_file.replace(".fits", ".phot.npy")
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
#np.save(npy_outfile, phot_data)
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = sci_file.replace(".fits", ".lc.png")
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data, axis=1)
# Save the array as npy file
if output != "":
np.save(output+"phot_total.npy", total_phot_data_arr)
else: np.save("phot_total.npy", total_phot_data_arr)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform fast aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-m", "--method", dest = "method",
action="store", metavar="METHOD", help = "Method to use for determining overlap between aperture and pixels (default is exact)",
default = "exact"
)
parser.add_option("-i", "--inner_radius", dest = "inner_radius",
action="store", metavar="INNER_RADIUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--outer_radius", dest = "outer_radius",
action="store", metavar="OUTER_RADIUS", help = "Radius of sky annulus in pixels (default is 16)",
default = 16
)
parser.add_option("-c", "--cen_method", dest = "cen_method",
action="store", metavar="CEN_METHOD", help = "Centroid method (default is 2dg)",
default = "2dg"
)
parser.add_option("-w", "--window_size", dest = "window_size",
action="store", metavar="WINDOW_SIZE", help = "Window size for centroid (default is 35)",
default = 35
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zeroo point",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("FASTPHOT: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.method, options.inner_radius, options.outer_radius, options.cen_method, options.window_size, options.output, options.zmag)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| mit | -4,409,292,525,545,918,500 | 32.618557 | 202 | 0.552898 | false |
sunqm/pyscf | pyscf/scf/uhf_symm.py | 1 | 22155 | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic unrestricted Hartree-Fock with point group symmetry.
'''
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf import symm
from pyscf.lib import logger
from pyscf.scf import hf_symm
from pyscf.scf import uhf
from pyscf.scf import chkfile
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'scf_analyze_with_meta_lowdin', True)
MO_BASE = getattr(__config__, 'MO_BASE', 1)
def analyze(mf, verbose=logger.DEBUG, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
from pyscf.lo import orth
from pyscf.tools import dump_mat
mol = mf.mol
if not mol.symmetry:
return uhf.analyze(mf, verbose, with_meta_lowdin, **kwargs)
mo_energy = mf.mo_energy
mo_occ = mf.mo_occ
mo_coeff = mf.mo_coeff
ovlp_ao = mf.get_ovlp()
log = logger.new_logger(mf, verbose)
if log.verbose >= logger.NOTE:
mf.dump_scf_summary(log)
nirrep = len(mol.irrep_id)
ovlp_ao = mf.get_ovlp()
orbsyma, orbsymb = mf.get_orbsym(mo_coeff, ovlp_ao)
orbsyma_in_d2h = numpy.asarray(orbsyma) % 10
orbsymb_in_d2h = numpy.asarray(orbsymb) % 10
tot_sym = 0
noccsa = [sum(orbsyma_in_d2h[mo_occ[0]>0]==ir) for ir in mol.irrep_id]
noccsb = [sum(orbsymb_in_d2h[mo_occ[1]>0]==ir) for ir in mol.irrep_id]
for i, ir in enumerate(mol.irrep_id):
if (noccsa[i]+noccsb[i]) % 2:
tot_sym ^= ir
if mol.groupname in ('Dooh', 'Coov', 'SO3'):
log.note('TODO: total wave-function symmetry for %s', mol.groupname)
else:
log.note('Wave-function symmetry = %s',
symm.irrep_id2name(mol.groupname, tot_sym))
log.note('alpha occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsa)
log.note('beta occupancy for each irrep: '+(' %4s'*nirrep),
*mol.irrep_name)
log.note(' '+(' %4d'*nirrep),
*noccsb)
log.note('**** MO energy ****')
irname_full = {}
for k, ir in enumerate(mol.irrep_id):
irname_full[ir] = mol.irrep_name[k]
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('alpha MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j],
mo_energy[0][k], mo_occ[0][k])
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
log.note('beta MO #%d (%s #%d), energy= %.15g occ= %g',
k+MO_BASE, irname_full[j], irorbcnt[j],
mo_energy[1][k], mo_occ[1][k])
if mf.verbose >= logger.DEBUG:
label = mol.ao_labels()
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsyma):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' %
(k+MO_BASE, irname_full[j], irorbcnt[j]))
if with_meta_lowdin:
log.debug(' ** alpha MO coefficients (expansion on meta-Lowdin AOs) **')
orth_coeff = orth.orth_ao(mol, 'meta_lowdin', s=ovlp_ao)
c_inv = numpy.dot(orth_coeff.conj().T, ovlp_ao)
mo = c_inv.dot(mo_coeff[0])
else:
log.debug(' ** alpha MO coefficients (expansion on AOs) **')
mo = mo_coeff[0]
dump_mat.dump_rec(mf.stdout, mo, label, start=MO_BASE, **kwargs)
molabel = []
irorbcnt = {}
for k, j in enumerate(orbsymb):
if j in irorbcnt:
irorbcnt[j] += 1
else:
irorbcnt[j] = 1
molabel.append('#%-d(%s #%d)' %
(k+MO_BASE, irname_full[j], irorbcnt[j]))
if with_meta_lowdin:
log.debug(' ** beta MO coefficients (expansion on meta-Lowdin AOs) **')
mo = c_inv.dot(mo_coeff[1])
else:
log.debug(' ** beta MO coefficients (expansion on AOs) **')
mo = mo_coeff[1]
dump_mat.dump_rec(mol.stdout, mo, label, molabel, start=MO_BASE, **kwargs)
dm = mf.make_rdm1(mo_coeff, mo_occ)
if with_meta_lowdin:
pop_and_charge = mf.mulliken_meta(mol, dm, s=ovlp_ao, verbose=log)
else:
pop_and_charge = mf.mulliken_pop(mol, dm, s=ovlp_ao, verbose=log)
dip = mf.dip_moment(mol, dm, verbose=log)
return pop_and_charge, dip
def get_irrep_nelec(mol, mo_coeff, mo_occ, s=None):
'''Alpha/beta electron numbers for each irreducible representation.
Args:
mol : an instance of :class:`Mole`
To provide irrep_id, and spin-adapted basis
mo_occ : a list of 1D ndarray
Regular occupancy, without grouping for irreps
mo_coeff : a list of 2D ndarray
Regular orbital coefficients, without grouping for irreps
Returns:
irrep_nelec : dict
The number of alpha/beta electrons for each irrep {'ir_name':(int,int), ...}.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.UHF(mol)
>>> mf.scf()
-75.623975516256721
>>> scf.uhf_symm.get_irrep_nelec(mol, mf.mo_coeff, mf.mo_occ)
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
'''
if getattr(mo_coeff[0], 'orbsym', None) is not None:
orbsyma = mo_coeff[0].orbsym
else:
orbsyma = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[0], s, False)
if getattr(mo_coeff[1], 'orbsym', None) is not None:
orbsymb = mo_coeff[1].orbsym
else:
orbsymb = symm.label_orb_symm(mol, mol.irrep_id, mol.symm_orb,
mo_coeff[1], s, False)
orbsyma = numpy.array(orbsyma)
orbsymb = numpy.array(orbsymb)
irrep_nelec = dict([(mol.irrep_name[k], (int(sum(mo_occ[0][orbsyma==ir])),
int(sum(mo_occ[1][orbsymb==ir]))))
for k, ir in enumerate(mol.irrep_id)])
return irrep_nelec
def canonicalize(mf, mo_coeff, mo_occ, fock=None):
'''Canonicalization diagonalizes the UHF Fock matrix in occupied, virtual
subspaces separatedly (without change occupancy).
'''
mol = mf.mol
if not mol.symmetry:
return uhf.canonicalize(mf, mo_coeff, mo_occ, fock)
mo_occ = numpy.asarray(mo_occ)
assert(mo_occ.ndim == 2)
if fock is None:
dm = mf.make_rdm1(mo_coeff, mo_occ)
fock = mf.get_hcore() + mf.get_veff(mf.mol, dm)
occidxa = mo_occ[0] == 1
occidxb = mo_occ[1] == 1
viridxa = ~occidxa
viridxb = ~occidxb
mo = numpy.empty_like(mo_coeff)
mo_e = numpy.empty(mo_occ.shape)
s = mf.get_ovlp()
if (getattr(mo_coeff, 'orbsym', None) is not None or
(getattr(mo_coeff[0], 'orbsym', None) is not None and
getattr(mo_coeff[1], 'orbsym', None) is not None)):
orbsyma, orbsymb = mf.get_orbsym(mo_coeff, s)
def eig_(fock, mo_coeff, idx, es, cs):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
cs[:,idx] = numpy.dot(mo_coeff[:,idx], c)
for ir in set(orbsyma):
idx_ir = orbsyma == ir
eig_(fock[0], mo_coeff[0], idx_ir & occidxa, mo_e[0], mo[0])
eig_(fock[0], mo_coeff[0], idx_ir & viridxa, mo_e[0], mo[0])
for ir in set(orbsymb):
idx_ir = orbsymb == ir
eig_(fock[1], mo_coeff[1], idx_ir & occidxb, mo_e[1], mo[1])
eig_(fock[1], mo_coeff[1], idx_ir & viridxb, mo_e[1], mo[1])
else:
def eig_(fock, mo_coeff, idx, es, cs):
if numpy.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(numpy.dot, (orb.conj().T, fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
c = numpy.dot(mo_coeff[:,idx], c)
cs[:,idx] = hf_symm._symmetrize_canonicalization_(mf, e, c, s)
eig_(fock[0], mo_coeff[0], occidxa, mo_e[0], mo[0])
eig_(fock[0], mo_coeff[0], viridxa, mo_e[0], mo[0])
eig_(fock[1], mo_coeff[1], occidxb, mo_e[1], mo[1])
eig_(fock[1], mo_coeff[1], viridxb, mo_e[1], mo[1])
orbsyma, orbsymb = mf.get_orbsym(mo, s)
mo = (lib.tag_array(mo[0], orbsym=orbsyma),
lib.tag_array(mo[1], orbsym=orbsymb))
return mo_e, mo
def get_orbsym(mol, mo_coeff, s=None, check=False):
if getattr(mo_coeff, 'orbsym', None) is not None:
orbsym = numpy.asarray(mo_coeff.orbsym)
else:
orbsym = (hf_symm.get_orbsym(mol, mo_coeff[0], s, check),
hf_symm.get_orbsym(mol, mo_coeff[1], s, check))
return orbsym
def get_wfnsym(mf, mo_coeff=None, mo_occ=None):
orbsyma, orbsymb = mf.get_orbsym(mo_coeff)
if mf.mol.groupname in ('SO3', 'Dooh', 'Coov'):
if numpy.any(orbsyma > 7):
logger.warn(mf, 'Wave-function symmetry for %s not supported. '
'Wfn symmetry is mapped to D2h/C2v group.',
mf.mol.groupname)
orbsyma = orbsyma % 10
orbsymb = orbsymb % 10
if mo_occ is None:
mo_occ = mf.mo_occ
wfnsym = 0
for ir in orbsyma[mo_occ[0] == 1]:
wfnsym ^= ir
for ir in orbsymb[mo_occ[1] == 1]:
wfnsym ^= ir
return wfnsym
class SymAdaptedUHF(uhf.UHF):
__doc__ = uhf.UHF.__doc__ + '''
Attributes for symmetry allowed UHF:
irrep_nelec : dict
Specify the number of alpha/beta electrons for particular irrep
{'ir_name':(int,int), ...}.
For the irreps not listed in these dicts, the program will choose the
occupancy based on the orbital energies.
Examples:
>>> mol = gto.M(atom='O 0 0 0; H 0 0 1; H 0 1 0', basis='ccpvdz', symmetry=True, charge=1, spin=1, verbose=0)
>>> mf = scf.RHF(mol)
>>> mf.scf()
-75.623975516256692
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 1), 'B2': (1, 0)}
>>> mf.irrep_nelec = {'B1': (1, 0)}
>>> mf.scf()
-75.429189192031131
>>> mf.get_irrep_nelec()
{'A1': (3, 3), 'A2': (0, 0), 'B1': (1, 0), 'B2': (1, 1)}
'''
def __init__(self, mol):
uhf.UHF.__init__(self, mol)
# number of electrons for each irreps
self.irrep_nelec = {}
self._keys = self._keys.union(['irrep_nelec'])
def dump_flags(self, verbose=None):
uhf.UHF.dump_flags(self, verbose)
if self.irrep_nelec:
logger.info(self, 'irrep_nelec %s', self.irrep_nelec)
return self
def build(self, mol=None):
if mol is None: mol = self.mol
if mol.symmetry:
for irname in self.irrep_nelec:
if irname not in self.mol.irrep_name:
logger.warn(self, 'No irrep %s', irname)
hf_symm.check_irrep_nelec(mol, self.irrep_nelec, self.nelec)
return uhf.UHF.build(self, mol)
def eig(self, h, s):
mol = self.mol
if not mol.symmetry:
return self._eigh(h, s)
nirrep = mol.symm_orb.__len__()
s = symm.symmetrize_matrix(s, mol.symm_orb)
ha = symm.symmetrize_matrix(h[0], mol.symm_orb)
cs = []
es = []
orbsym = []
for ir in range(nirrep):
e, c = self._eigh(ha[ir], s[ir])
cs.append(c)
es.append(e)
orbsym.append([mol.irrep_id[ir]] * e.size)
ea = numpy.hstack(es)
ca = hf_symm.so2ao_mo_coeff(mol.symm_orb, cs)
ca = lib.tag_array(ca, orbsym=numpy.hstack(orbsym))
hb = symm.symmetrize_matrix(h[1], mol.symm_orb)
cs = []
es = []
orbsym = []
for ir in range(nirrep):
e, c = self._eigh(hb[ir], s[ir])
cs.append(c)
es.append(e)
orbsym.append([mol.irrep_id[ir]] * e.size)
eb = numpy.hstack(es)
cb = hf_symm.so2ao_mo_coeff(mol.symm_orb, cs)
cb = lib.tag_array(cb, orbsym=numpy.hstack(orbsym))
return (ea,eb), (ca,cb)
def get_grad(self, mo_coeff, mo_occ, fock=None):
g = uhf.UHF.get_grad(self, mo_coeff, mo_occ, fock)
if self.mol.symmetry:
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
orbsyma, orbsymb = self.get_orbsym(mo_coeff, self.get_ovlp())
sym_forbida = orbsyma[viridxa].reshape(-1,1) != orbsyma[occidxa]
sym_forbidb = orbsymb[viridxb].reshape(-1,1) != orbsymb[occidxb]
sym_forbid = numpy.hstack((sym_forbida.ravel(),
sym_forbidb.ravel()))
g[sym_forbid] = 0
return g
def get_occ(self, mo_energy=None, mo_coeff=None):
''' We assumed mo_energy are grouped by symmetry irreps, (see function
self.eig). The orbitals are sorted after SCF.
'''
if mo_energy is None: mo_energy = self.mo_energy
mol = self.mol
if not mol.symmetry:
return uhf.UHF.get_occ(self, mo_energy, mo_coeff)
orbsyma, orbsymb = self.get_orbsym(mo_coeff, self.get_ovlp())
mo_occ = numpy.zeros_like(mo_energy)
idx_ea_left = []
idx_eb_left = []
neleca_fix = nelecb_fix = 0
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = numpy.where(orbsyma == ir)[0]
ir_idxb = numpy.where(orbsymb == ir)[0]
if irname in self.irrep_nelec:
if isinstance(self.irrep_nelec[irname], (int, numpy.integer)):
nelecb = self.irrep_nelec[irname] // 2
neleca = self.irrep_nelec[irname] - nelecb
else:
neleca, nelecb = self.irrep_nelec[irname]
ea_idx = numpy.argsort(mo_energy[0][ir_idxa].round(9), kind='mergesort')
eb_idx = numpy.argsort(mo_energy[1][ir_idxb].round(9), kind='mergesort')
mo_occ[0,ir_idxa[ea_idx[:neleca]]] = 1
mo_occ[1,ir_idxb[eb_idx[:nelecb]]] = 1
neleca_fix += neleca
nelecb_fix += nelecb
else:
idx_ea_left.append(ir_idxa)
idx_eb_left.append(ir_idxb)
nelec = self.nelec
neleca_float = nelec[0] - neleca_fix
nelecb_float = nelec[1] - nelecb_fix
assert(neleca_float >= 0)
assert(nelecb_float >= 0)
if len(idx_ea_left) > 0:
idx_ea_left = numpy.hstack(idx_ea_left)
ea_left = mo_energy[0][idx_ea_left]
ea_sort = numpy.argsort(ea_left.round(9), kind='mergesort')
occ_idx = idx_ea_left[ea_sort][:neleca_float]
mo_occ[0][occ_idx] = 1
if len(idx_eb_left) > 0:
idx_eb_left = numpy.hstack(idx_eb_left)
eb_left = mo_energy[1][idx_eb_left]
eb_sort = numpy.argsort(eb_left.round(9), kind='mergesort')
occ_idx = idx_eb_left[eb_sort][:nelecb_float]
mo_occ[1][occ_idx] = 1
vir_idx = (mo_occ[0]==0)
if self.verbose >= logger.INFO and numpy.count_nonzero(vir_idx) > 0:
noccsa = []
noccsb = []
for i, ir in enumerate(mol.irrep_id):
irname = mol.irrep_name[i]
ir_idxa = orbsyma == ir
ir_idxb = orbsymb == ir
noccsa.append(numpy.count_nonzero(mo_occ[0][ir_idxa]))
noccsb.append(numpy.count_nonzero(mo_occ[1][ir_idxb]))
ir_id2name = dict(zip(mol.irrep_id, mol.irrep_name))
ehomo = ehomoa = max(mo_energy[0][mo_occ[0]>0 ])
elumo = elumoa = min(mo_energy[0][mo_occ[0]==0])
irhomoa = ir_id2name[orbsyma[mo_energy[0] == ehomoa][0]]
irlumoa = ir_id2name[orbsyma[mo_energy[0] == elumoa][0]]
logger.info(self, 'alpha HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomoa, ehomoa, irlumoa, elumoa)
if nelecb_float > 0:
ehomob = max(mo_energy[1][mo_occ[1]>0 ])
elumob = min(mo_energy[1][mo_occ[1]==0])
irhomob = ir_id2name[orbsymb[mo_energy[1] == ehomob][0]]
irlumob = ir_id2name[orbsymb[mo_energy[1] == elumob][0]]
logger.info(self, 'beta HOMO (%s) = %.15g LUMO (%s) = %.15g',
irhomob, ehomob, irlumob, elumob)
ehomo = max(ehomoa,ehomob)
elumo = min(elumoa,elumob)
logger.debug(self, 'alpha irrep_nelec = %s', noccsa)
logger.debug(self, 'beta irrep_nelec = %s', noccsb)
hf_symm._dump_mo_energy(mol, mo_energy[0], mo_occ[0], ehomo, elumo,
orbsyma, 'alpha-', verbose=self.verbose)
hf_symm._dump_mo_energy(mol, mo_energy[1], mo_occ[1], ehomo, elumo,
orbsymb, 'beta-', verbose=self.verbose)
if mo_coeff is not None and self.verbose >= logger.DEBUG:
ovlp_ao = self.get_ovlp()
ss, s = self.spin_square((mo_coeff[0][:,mo_occ[0]>0],
mo_coeff[1][:,mo_occ[1]>0]), ovlp_ao)
logger.debug(self, 'multiplicity <S^2> = %.8g 2S+1 = %.8g', ss, s)
return mo_occ
def _finalize(self):
uhf.UHF._finalize(self)
ea = numpy.hstack(self.mo_energy[0])
eb = numpy.hstack(self.mo_energy[1])
# Using mergesort because it is stable. We don't want to change the
# ordering of the symmetry labels when two orbitals are degenerated.
oa_sort = numpy.argsort(ea[self.mo_occ[0]>0 ].round(9), kind='mergesort')
va_sort = numpy.argsort(ea[self.mo_occ[0]==0].round(9), kind='mergesort')
ob_sort = numpy.argsort(eb[self.mo_occ[1]>0 ].round(9), kind='mergesort')
vb_sort = numpy.argsort(eb[self.mo_occ[1]==0].round(9), kind='mergesort')
idxa = numpy.arange(ea.size)
idxa = numpy.hstack((idxa[self.mo_occ[0]> 0][oa_sort],
idxa[self.mo_occ[0]==0][va_sort]))
idxb = numpy.arange(eb.size)
idxb = numpy.hstack((idxb[self.mo_occ[1]> 0][ob_sort],
idxb[self.mo_occ[1]==0][vb_sort]))
self.mo_energy = (ea[idxa], eb[idxb])
orbsyma, orbsymb = self.get_orbsym(self.mo_coeff, self.get_ovlp())
self.mo_coeff = (lib.tag_array(self.mo_coeff[0][:,idxa], orbsym=orbsyma[idxa]),
lib.tag_array(self.mo_coeff[1][:,idxb], orbsym=orbsymb[idxb]))
self.mo_occ = (self.mo_occ[0][idxa], self.mo_occ[1][idxb])
if self.chkfile:
chkfile.dump_scf(self.mol, self.chkfile, self.e_tot, self.mo_energy,
self.mo_coeff, self.mo_occ, overwrite_mol=False)
return self
@lib.with_doc(analyze.__doc__)
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return analyze(self, verbose, with_meta_lowdin, **kwargs)
@lib.with_doc(get_irrep_nelec.__doc__)
def get_irrep_nelec(self, mol=None, mo_coeff=None, mo_occ=None, s=None):
if mol is None: mol = self.mol
if mo_occ is None: mo_occ = self.mo_occ
if mo_coeff is None: mo_coeff = self.mo_coeff
if s is None: s = self.get_ovlp()
return get_irrep_nelec(mol, mo_coeff, mo_occ, s)
def get_orbsym(self, mo_coeff=None, s=None):
if mo_coeff is None:
mo_coeff = self.mo_coeff
if s is None:
s = self.get_ovlp()
return get_orbsym(self.mol, mo_coeff, s)
orbsym = property(get_orbsym)
get_wfnsym = get_wfnsym
wfnsym = property(get_wfnsym)
canonicalize = canonicalize
UHF = SymAdaptedUHF
class HF1e(UHF):
def scf(self, *args):
logger.info(self, '\n')
logger.info(self, '******** 1 electron system ********')
self.converged = True
h1e = self.get_hcore(self.mol)
s1e = self.get_ovlp(self.mol)
self.mo_energy, self.mo_coeff = self.eig([h1e]*2, s1e)
self.mo_occ = self.get_occ(self.mo_energy, self.mo_coeff)
self.e_tot = self.mo_energy[0][self.mo_occ[0]>0][0] + self.mol.energy_nuc()
self._finalize()
return self.e_tot
del(WITH_META_LOWDIN)
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.build(
verbose = 1,
output = None,
atom = [['H', (0.,0.,0.)],
['H', (0.,0.,1.)], ],
basis = {'H': 'ccpvdz'},
symmetry = True,
charge = -1,
spin = 1
)
method = UHF(mol)
method.verbose = 5
method.irrep_nelec['A1u'] = (1,0)
energy = method.kernel()
print(energy)
method.analyze()
| apache-2.0 | 201,078,578,244,645,060 | 38.491979 | 113 | 0.534642 | false |
daichi-yoshikawa/dnn | examples/mnist/nn_mnist.py | 1 | 3211 | # Authors: Daichi Yoshikawa <[email protected]>
# License: BSD 3 clause
import sys
sys.path.append('../..')
import json
import numpy as np
"""Configure logger before importing dnnet."""
import logging.config
with open('../common/logging.json') as f:
data = json.load(f)
logging.config.dictConfig(data)
import dnnet
from dnnet.config import Config
Config.enable_gpu()
from dnnet.neuralnet import NeuralNetwork
from dnnet.utils.nn_utils import scale_normalization
from dnnet.training.optimizer import SGD, Momentum, AdaGrad, Adam, AdaDelta, RMSProp, SMORMS3
from dnnet.training.weight_initialization import DefaultInitialization, He
from dnnet.training.loss_function import MultinomialCrossEntropy
from dnnet.training.loss_function import SquaredError
from dnnet.layers.affine import AffineLayer
from dnnet.layers.activation import Activation, ActivationLayer
from dnnet.layers.dropout import DropoutLayer
from dnnet.layers.batch_norm import BatchNormLayer
from data import get_mnist
data_dir = '../../data'
x, y = get_mnist(data_dir)
scale_normalization(x)
x = x.reshape(-1, 1, 28, 28)
dtype = np.float32
force_cpu = {
'activation': True,
'dropout': True,
'batch_norm': True
}
model = NeuralNetwork(input_shape=(1, 28, 28), dtype=dtype)
#model = NeuralNetwork(input_shape=784, dtype=dtype)
model.add(DropoutLayer(drop_ratio=0.2, force_cpu=force_cpu['dropout']))
model.add(AffineLayer(
output_shape=400, weight_initialization=He()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.relu,
force_cpu=force_cpu['activation']))
model.add(DropoutLayer(drop_ratio=0.2, force_cpu=force_cpu['dropout']))
model.add(AffineLayer(
output_shape=400, weight_initialization=He()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.relu,
force_cpu=force_cpu['activation']))
model.add(AffineLayer(
output_shape=10, weight_initialization=DefaultInitialization()))
model.add(BatchNormLayer(force_cpu=force_cpu['batch_norm']))
model.add(ActivationLayer(activation=Activation.Type.softmax,
force_cpu=force_cpu['activation']))
model.compile()
config_str = model.get_config_str()
print(config_str)
#optimizer = SGD(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = Momentum(learning_rate=3e-2, weight_decay=1e-3, momentum_rate=0.99, dtype=dtype)
optimizer = AdaGrad(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = Adam(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
#optimizer = AdaDelta(learning_rate=3e-2, weight_decay=1e-3, gamma=0.99, dtype=dtype)
#optimizer = RMSProp(learning_rate=3e-2, weight_decay=1e-3, dtype=dtype)
lc = model.fit(
x=x, y=y, epochs=5, batch_size=100, optimizer=optimizer,
loss_function=MultinomialCrossEntropy(),
learning_curve=True, shuffle=True, shuffle_per_epoch=True,
test_data_ratio=0.142857, # Use 60,000 for training and 10,000 for test.
train_data_ratio_for_eval=0.01)
lc.plot(figsize=(8,10), fontsize=12)
model.show_filters(0, shape=(28, 28), layout=(10, 10), figsize=(12, 12))
| bsd-3-clause | -1,805,999,635,159,180,500 | 35.078652 | 93 | 0.737776 | false |
yingcuhk/LeetCode | Algorithms/#321 Create Maximum Number/PythonCode.py | 1 | 4004 |
"""
Given two arrays of length m and n with digits 0-9 representing two numbers. Create the maximum number of length k <= m + n from digits of the two. The relative order of the digits from the same array must be preserved. Return an array of the k digits. You should try to optimize your time and space complexity.
Example 1:
nums1 = [3, 4, 6, 5]
nums2 = [9, 1, 2, 5, 8, 3]
k = 5
return [9, 8, 6, 5, 3]
Example 2:
nums1 = [6, 7]
nums2 = [6, 0, 4]
k = 5
return [6, 7, 6, 0, 4]
Example 3:
nums1 = [3, 9]
nums2 = [8, 9]
k = 3
return [9, 8, 9]
"""
class Solution(object):
def maxNumber(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
S = [(0,0)]
remain_k = k
pos = []
while remain_k > 0:
new_S = []
highdig = -1
for s in S:
canddig, state = self.highest_digit(nums1,nums2,s,remain_k)
if canddig > highdig:
highdig = canddig
new_S = state
if canddig == highdig:
new_S = list(set(new_S + state))
#print new_S
pos.append(highdig)
S = new_S
remain_k = remain_k-1
return pos
#return self.maxNum_recursive(nums1,nums2,0,0,k)
def highest_digit(self,nums1,nums2,state,remain_k):
beg1 = state[0]
beg2 = state[1]
N1 = len(nums1)
N2 = len(nums2)
if remain_k == 1:
return max(nums1[beg1:]+nums2[beg2:]), [(N1,N2)]
ind1,ind2 = beg1,beg2
highdig1 = -1
pos1 = -1
while N1-ind1+N2-beg2 >= remain_k and ind1 < N1:
if nums1[ind1] > highdig1:
highdig1 = nums1[ind1]
pos1 = ind1
ind1 += 1
highdig2 = -1
pos2 = -1
while N1-beg1+N2-ind2 >= remain_k and ind2 < N2:
if nums2[ind2] > highdig2:
highdig2 = nums2[ind2]
pos2 = ind2
ind2 +=1
if highdig1 > highdig2:
return highdig1, [(pos1+1,beg2)]
elif highdig2 > highdig1:
return highdig2, [(beg1, pos2+1)]
else:
return highdig1, [(pos1+1,beg2),(beg1, pos2+1)]
"""
# a recursive solution
def maxNum_recursive(self,nums1,nums2,beg1,beg2,k):
N1 = len(nums1)
N2 = len(nums2)
if k == 0:
return []
highdig1 = -1
pos1 = -1
ind1,ind2 = beg1,beg2
while N1-ind1+N2-beg2 >= k and ind1 < N1:
if nums1[ind1] > highdig1:
highdig1 = nums1[ind1]
pos1 = ind1
ind1 += 1
highdig2 = -1
pos2 = -1
while N1-beg1+N2-ind2 >= k and ind2 < N2:
if nums2[ind2] > highdig2:
highdig2 = nums2[ind2]
pos2 = ind2
ind2 +=1
if highdig1 > highdig2:
return [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
elif highdig2 > highdig1:
return [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
else:
if pos2 == N2-1:
return [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
if pos1 == N1-1:
return [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
pos1 = [highdig1]+self.maxNum_recursive(nums1,nums2,pos1+1,beg2,k-1)
pos2 = [highdig2]+self.maxNum_recursive(nums1,nums2,beg1,pos2+1,k-1)
return self.the_larger_one(pos1,pos2)
def the_larger_one(self,pos1,pos2):
for val1,val2 in zip(pos1,pos2):
if val1 > val2:
return pos1
if val2 > val1:
return pos2
return pos1
"""
| mit | -3,520,379,620,287,191,600 | 28.021739 | 311 | 0.488511 | false |
lycantropos/VKScheduler | vk_scheduler/app.py | 1 | 10827 | import logging
import math
import os
import time
from typing import List
import requests
import vk.exceptions
from vk_app import App
from vk_app.app import captchured
from vk_app.models import VKPhoto, VKPhotoAlbum, VKVideo, VKPost
from vk_app.utils import make_delayed
from vk_scheduler.settings import (CONFIGURATION_FILE_PATH, TMP_DRC_ABSPATH,
CAPTCHA_IMG_ABSPATH, LINKS_SEP, LINKS_BLOCK_RE,
IMG_LINK_RE, EXTERNAL_VIDEO_LINK_RE,
MINIMAL_INTERVAL_BETWEEN_POST_EDITING_REQUESTS_IN_SECONDS,
config)
from vk_scheduler.utils import get_vk_object_ids, download, clear_drc, get_vk_object_links
class Scheduler(App):
def __init__(self, app_id: int = 0, group_id: int = 1,
user_login: str = '', user_password: str = '',
scope: str = '', access_token: str = '',
api_version: str = '5.57',
last_check_utc_timestamp: int = 0):
super().__init__(app_id, user_login, user_password, scope, access_token, api_version)
self.group_id = group_id
self.last_check_utc_timestamp = last_check_utc_timestamp
def check_posts(self):
for ind, unchecked_post in enumerate(self.unchecked_posts_by_community):
logging.info('Processing post: https://vk.com/wall{}'.format(unchecked_post.vk_id))
try:
self.edit_post(unchecked_post)
except vk.exceptions.VkAPIError:
logging.exception('Some error arose. Post will not be edited. '
'Continue...')
logging.info('Number of posts edited so far {}'.format(ind + 1))
self.last_check_utc_timestamp = int(time.time())
self.log_last_check_utc_timestamp()
clear_drc(TMP_DRC_ABSPATH)
def edit_post(self, post: VKPost):
search_res = LINKS_BLOCK_RE.search(post.text)
if search_res is None:
return
links_block = search_res.group().strip()
links = list(link.strip() for link in links_block.split(LINKS_SEP))
photos_links = get_vk_object_links(VKPhoto, links)
photo_albums_links = get_vk_object_links(VKPhotoAlbum, links)
videos_links = get_vk_object_links(VKVideo, links)
images_links = list(
link
for link in links
if IMG_LINK_RE.match(link) is not None
)
external_videos_links = list(
link
for link in links
if EXTERNAL_VIDEO_LINK_RE.match(link) is not None
)
if photos_links:
photos_by_links = self.get_photos_by_links(photos_links)
if photo_albums_links:
photo_albums = self.get_photo_albums_by_links(photo_albums_links)
if images_links:
photos_by_images_links = self.get_photos_by_images_links(images_links)
if videos_links:
videos_by_links = self.get_videos_by_links(videos_links)
if external_videos_links:
videos_by_external_links = self.get_videos_by_external_links(external_videos_links)
attachment_id_format = '{key}{vk_id}'
attachments_ids = list(
attachment_id_format.format(key=key, vk_id=vk_attachment.vk_id)
for attachment in post.attachments
for key, vk_attachment in attachment.items()
)
obscure_links = list()
for link in links:
if len(attachments_ids) >= 10:
logging.error('Too many attachments, '
'next link would be ignored: {}'.format(link))
obscure_links.append(link)
continue
attachment = None
if link in photos_links:
attachment = next((photo
for photo in photos_by_links
if photo.vk_id in link),
None)
elif link in photo_albums_links:
attachment = next((photo_album
for photo_album in photo_albums
if photo_album.vk_id in link),
None)
elif link in images_links:
attachment = photos_by_images_links.pop(0) if photos_by_images_links else None
elif link in videos_links:
attachment = next((video
for video in videos_by_links
if video.vk_id in link),
None)
elif link in external_videos_links:
attachment = next((video
for video in videos_by_external_links
if video.player_link in link),
None)
if attachment is not None:
attachments_ids.append(
attachment_id_format.format(key=attachment.key(), vk_id=attachment.vk_id)
)
else:
logging.error('Unknown link type: {}'.format(link))
obscure_links.append(link)
attachments = ','.join(attachments_ids)
message = post.text.replace(links_block, LINKS_SEP.join(obscure_links))
self.post_edited(post, message, attachments)
@make_delayed(MINIMAL_INTERVAL_BETWEEN_POST_EDITING_REQUESTS_IN_SECONDS)
@captchured(CAPTCHA_IMG_ABSPATH)
def post_edited(self, post: VKPost, message: str, attachments: str, **params):
self.api_session.wall.edit(owner_id=post.owner_id, post_id=post.object_id,
message=message, attachments=attachments, **params)
return
@property
def unchecked_posts_by_community(self) -> List[VKPost]:
params = dict(
owner_id=-self.group_id,
offset=0,
count=50,
filter='owner'
)
response = self.api_session.wall.get(**params)
raw_posts = response['items']
total_count = response['count']
while raw_posts[-1]['date'] > self.last_check_utc_timestamp and params['offset'] < total_count:
params['offset'] += params['count']
response = self.api_session.wall.get(**params)
raw_posts.extend(response['items'])
raw_posts.sort(key=lambda post: post['date'])
for raw_post in raw_posts:
if raw_post['date'] > self.last_check_utc_timestamp:
yield VKPost.from_raw(raw_post)
def get_photos_by_links(self, photos_links: List[str]) -> List[VKPhoto]:
photos_ids = get_vk_object_ids(VKPhoto, photos_links)
raw_photos = self.api_session.photos.getById(photos=','.join(photos_ids))
photos = list(VKPhoto.from_raw(raw_photo) for raw_photo in raw_photos)
return photos
def get_videos_by_links(self, videos_links: List[str]) -> List[VKVideo]:
videos_ids = get_vk_object_ids(VKVideo, videos_links)
raw_videos = self.api_session.video.get(videos=','.join(videos_ids))['items']
videos = list(VKVideo.from_raw(raw_photo) for raw_photo in raw_videos)
return videos
def get_photo_albums_by_links(self, albums_links: List[str]) -> List[VKVideo]:
albums_ids = get_vk_object_ids(VKPhotoAlbum, albums_links)
owners_ids_albums_ids = dict()
for album_id in albums_ids:
album_owner_id, album_object_id = album_id.split('_')
owners_ids_albums_ids.setdefault(album_owner_id, []).append(album_object_id)
raw_albums = list()
for owner_id, albums_ids in owners_ids_albums_ids.items():
raw_albums.extend(
self.api_session.photos.getAlbums(owner_id=owner_id,
album_ids=','.join(albums_ids))['items']
)
albums = list(VKPhotoAlbum.from_raw(raw_album) for raw_album in raw_albums)
return albums
def get_photos_by_images_links(self, images_links: List[str]) -> List[VKPhoto]:
photos = list()
save_method = VKPhoto.save_method('wall')
upload_server_method = VKPhoto.getUploadServer_method('wall')
# VK can process only seven photos at once,
# so splitting images links in chunks
chunks_num = math.ceil(len(images_links) / 7)
for chunk_num in range(chunks_num):
images = list()
slice_start = chunk_num * 7
slice_end = min((chunk_num + 1) * 7, len(images_links))
images_links_chunk = images_links[slice_start: slice_end]
for image_link in images_links_chunk:
image_name = image_link.split('/')[-1]
save_path = os.path.join(TMP_DRC_ABSPATH, image_name)
download(image_link, save_path)
with open(save_path, mode='rb') as file:
images.append(
(
'file',
(image_name, file.read())
)
)
upload_url = self.get_upload_server_url(upload_server_method,
group_id=self.group_id)
for image in images:
raw_photo, = self.upload_files_on_vk_server(method=save_method,
upload_url=upload_url,
files=[image],
group_id=self.group_id)
photos.append(VKPhoto.from_raw(raw_photo))
return photos
def get_videos_by_external_links(self, video_links: List[str]) -> List[VKVideo]:
video_ids = list(
'{owner_id}_{video_id}'.format(**response)
for response in self.videos_by_external_links(video_links)
)
raw_videos = self.api_session.video.get(videos=','.join(video_ids))['items']
videos = list(VKVideo.from_raw(raw_video) for raw_video in raw_videos)
return videos
def videos_by_external_links(self, links: List[str]):
for link in links:
response = self.api_session.video.save(link=link,
group_id=self.group_id)
with requests.Session() as session:
session.post(response['upload_url'])
yield response
def log_last_check_utc_timestamp(self):
config.set('schedule', 'last_check_utc_timestamp',
value=str(self.last_check_utc_timestamp))
with open(CONFIGURATION_FILE_PATH, mode='w') as configuration_file:
config.write(configuration_file)
| gpl-3.0 | 6,878,774,586,737,158,000 | 44.491597 | 103 | 0.54946 | false |
repotvsupertuga/tvsupertuga.repository | script.module.streamtvsupertuga/lib/resources/lib/sources/it/wmz.py | 1 | 4252 | import requests,re,time,xbmcaddon
import resolveurl as urlresolver
from ..common import clean_title, clean_search,send_log,error_log
from ..scraper import Scraper
dev_log = xbmcaddon.Addon('script.module.universalscrapers').getSetting("dev_log")
User_Agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36'
class wmz(Scraper):
domains = ['http://www.watchmovieszone.com']
name = "WatchMoviesZone"
sources = []
def __init__(self):
self.base_link = 'http://www.watchmovieszone.com'
if dev_log=='true':
self.start_time = time.time()
def scrape_movie(self, title, year, imdb, debrid = False):
try:
search_id = clean_search(title.lower())
start_url = '%s/Movie/searchMovieName/?movie=%s' %(self.base_link,search_id)
headers={'User-Agent':User_Agent}
html = requests.get(start_url,headers=headers,timeout=5).content
match = re.compile('"ID":"(.+?)","movieName":"(.+?)"',re.DOTALL).findall(html)
for ID,item_name in match:
if 'dubbed' not in item_name.lower():
if clean_title(title).lower() in clean_title(item_name).lower():
if year in item_name:
item_name = item_name.replace(' ','_')
url = '%s/Movie/Index/%s/%s' %(self.base_link,ID,item_name)
#print 'wmz Movie pass '+url
#print 'wmz ID ' +ID
self.get_source(url,ID)
return self.sources
except Exception, argument:
if dev_log == 'true':
error_log(self.name,'Check Search')
return self.sources
def get_source(self,url,ID):
try:
# url not needed
new_url = '%s/Movie/getmyLinks/?movID=%s' %(self.base_link,ID)
#print '###### '+new_url
headers={'User-Agent':User_Agent}
OPEN = requests.get(new_url,headers=headers,timeout=5).content
#print OPEN
Regex = re.compile('"picLink":"(.+?)"',re.DOTALL).findall(OPEN)
count = 0
for link in Regex:
#print link
if 'streamango.com' in link:
try:
get_res=requests.get(link,timeout=5).content
qual = re.compile('{type:"video/mp4".+?height:(.+?),',re.DOTALL).findall(get_res)[0]
if '1080' in qual:
rez='1080p'
elif '720' in qual:
rez = '720p'
else:rez= 'DVD'
except:rez='DVD'
count +=1
self.sources.append({'source': 'Streamango', 'quality': rez, 'scraper': self.name, 'url': link,'direct': False})
if 'openload' in link:
try:
chk = requests.get(link).content
rez = re.compile('"description" content="(.+?)"',re.DOTALL).findall(chk)[0]
if '1080' in rez:
res='1080p'
elif '720' in rez:
res='720p'
else:res='DVD'
except: res = 'DVD'
count +=1
self.sources.append({'source': 'Openload', 'quality': res, 'scraper': self.name, 'url': link,'direct': False})
else:
if urlresolver.HostedMediaFile(link).valid_url():
host = link.split('//')[1].replace('www.','')
host = host.split('/')[0].split('.')[0].title()
count +=1
self.sources.append({'source': host, 'quality': 'DVD', 'scraper': self.name, 'url': link,'direct': False})
if dev_log=='true':
end_time = time.time() - self.start_time
send_log(self.name,end_time,count)
except:
pass
| gpl-2.0 | 652,577,545,154,177,800 | 46.775281 | 144 | 0.46731 | false |
jokuf/hack-blog | users/migrations/0002_auto_20170322_2028.py | 1 | 2798 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-22 20:28
from __future__ import unicode_literals
from django.db import migrations, models
import users.managers
class Migration(migrations.Migration):
dependencies = [
('auth', '0008_alter_user_username_max_length'),
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='author',
options={'verbose_name': 'user', 'verbose_name_plural': 'users'},
),
migrations.AlterModelManagers(
name='author',
managers=[
('objects', users.managers.UserManager()),
],
),
migrations.AddField(
model_name='author',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='avatars/'),
),
migrations.AddField(
model_name='author',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='author',
name='is_active',
field=models.BooleanField(default=True, verbose_name='active'),
),
migrations.AddField(
model_name='author',
name='is_superuser',
field=models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status'),
),
migrations.AddField(
model_name='author',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AlterField(
model_name='author',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, verbose_name='date joined'),
),
migrations.AlterField(
model_name='author',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='email address'),
),
migrations.AlterField(
model_name='author',
name='first_name',
field=models.CharField(blank=True, max_length=30, verbose_name='first name'),
),
migrations.AlterField(
model_name='author',
name='last_name',
field=models.CharField(blank=True, max_length=30, verbose_name='last name'),
),
]
| mit | -6,814,778,244,652,638,000 | 37.861111 | 256 | 0.585061 | false |
asteca/ASteCA | packages/best_fit/DEPRECATED/abcpmc_algor_DEPRECATED.py | 1 | 9822 |
import numpy as np
from scipy.optimize import differential_evolution as DE
import time as t
from .abcpmc import sampler, threshold
from ..synth_clust import synth_cluster
from . import likelihood
from .emcee_algor import varPars, closeSol, discreteParams, convergenceVals
def main(
lkl_method, e_max, err_lst, completeness, max_mag_syn,
fundam_params, obs_clust, theor_tracks, R_V, ext_coefs, st_dist_mass,
N_fc, cmpl_rnd, err_rnd, nwalkers_abc, nsteps_abc, nburn_abc,
priors_abc):
varIdxs, ndim, ranges = varPars(fundam_params)
def dist(synth_clust, obs_clust):
lkl = np.inf
if synth_clust:
lkl = likelihood.main(lkl_method, synth_clust, obs_clust)
return lkl
def postfn(model):
# Re-scale z and M
model_scale = [
model[0] / 100., model[1], model[2], model[3] * 10.,
model[4] * 1000., model[5]]
check_ranges = [
r[0] <= p <= r[1] for p, r in zip(*[model_scale, ranges[varIdxs]])]
synth_clust = []
# If some parameter is outside of the given ranges, don't bother
# obtaining the proper model.
if all(check_ranges):
model_proper = closeSol(fundam_params, varIdxs, model_scale)
# Metallicity and age indexes to identify isochrone.
m_i = fundam_params[0].index(model_proper[0])
a_i = fundam_params[1].index(model_proper[1])
isochrone = theor_tracks[m_i][a_i]
# Generate synthetic cluster.
synth_clust = synth_cluster.main(
e_max, err_lst, completeness, max_mag_syn, st_dist_mass,
isochrone, R_V, ext_coefs, N_fc, cmpl_rnd, err_rnd,
model_proper)
return synth_clust
# TODO add these parameters to the input params file
alpha, init_eps = 95, None
N_conv, tol_conv = 50., 0.01
max_secs = 22. * 60. * 60.
# Break out when AF is low.
# af_low, af_min_steps = 0.001, .1
max_t_walker = 30.
# eps_stuck_perc, N_eps_stuck_max = .005, 100
# Start timing.
elapsed = 0.
available_secs = max(30, max_secs)
start_t = t.time()
abcsampler = sampler.Sampler(
N=nwalkers_abc, Y=obs_clust, postfn=postfn, dist=dist)
# Set proposal
# sampler.particle_proposal_cls = sampler.OLCMParticleProposal
if init_eps is None:
# Estimate initial threshold value using DE.
def lnprob(model):
synth_clust = postfn(model)
return dist(synth_clust, obs_clust)
# Scale parameters bounds.
bounds = [
ranges[0] * 100., ranges[1], ranges[2], ranges[3] / 10.,
ranges[4] / 1000., ranges[5]]
result = DE(lnprob, bounds, maxiter=20)
init_eps = 4. * result.fun
print(" Initial threshold value: {:.2f}".format(init_eps))
# old_eps = init_eps
# TODO pass type of threshold from params file
# eps = threshold.LinearEps(T, 5000, init_eps)
eps = threshold.ConstEps(nsteps_abc, init_eps)
# Stddev values as full range.
std = np.eye(ndim) * (ranges.max(axis=1) - ranges.min(axis=1))
# Means as middle points in ranges.
means = (ranges.max(axis=1) + ranges.min(axis=1)) / 2.
# Scale values.
std[0], means[0] = std[0] * 100, means[0] * 100
std[3], means[3] = std[3] / 10, means[3] / 10
std[4], means[4] = std[4] / 1000., means[4] / 1000.
# Gaussian prior.
print(means)
print(std)
prior = sampler.GaussianPrior(mu=means, sigma=std)
# # We'll track how the average autocorrelation time estimate changes
# tau_index, autocorr_vals = 0, np.empty(nsteps_abc)
# # This will be useful to testing convergence
# old_tau = np.inf
# Check for convergence every 2% of steps or 100, whichever value
# is lower.
# N_steps_conv = min(int(nsteps_abc * 0.02), 100)
map_sol_old, N_models, prob_mean = [[], np.inf], 0, []
# N_eps_stuck = 0
chains_nruns, maf_steps, map_lkl = [], [], []
milestones = list(range(5, 101, 5))
for pool in abcsampler.sample(prior, eps):
print(
pool.t, pool.eps, pool.ratio, np.min(pool.dists),
np.mean(pool.dists))
chains_nruns.append(pool.thetas)
maf = pool.ratio
maf_steps.append([pool.t, maf])
N_models += nwalkers_abc / maf
# reduce eps value
# old_eps = eps.eps
eps.eps = np.percentile(pool.dists, alpha)
# # Check if threshold is stuck.
# if abs(eps.eps - old_eps) < eps_stuck_perc * eps.eps:
# N_eps_stuck += 1
# else:
# N_eps_stuck = 0
# if N_eps_stuck > N_eps_stuck_max:
# print(" Threshold is stuck (runs={}).".format(pool.t + 1))
# break
# if maf < af_low and pool.t > int(af_min_steps * nsteps_abc):
# print(" AF<{} (runs={})".format(af_low, pool.t + 1))
# break
if t.time() - start_t > (max_t_walker * nwalkers_abc):
print(" Sampler is stuck (runs={})".format(pool.t + 1))
break
elapsed += t.time() - start_t
if elapsed >= available_secs:
print(" Time consumed (runs={})".format(pool.t + 1))
break
start_t = t.time()
# # Only check convergence every 'N_steps_conv' steps
# if (pool.t + 1) % N_steps_conv:
# continue
# # Compute the autocorrelation time so far. Using tol=0 means that
# # we'll always get an estimate even if it isn't trustworthy.
# try:
# tau = autocorr.integrated_time(np.array(chains_nruns), tol=0)
# autocorr_vals[tau_index] = np.nanmean(tau)
# tau_index += 1
# # Check convergence
# converged = np.all(tau * N_conv < (pool.t + 1))
# converged &= np.all(np.abs(old_tau - tau) / tau < tol_conv)
# if converged:
# print(" Convergence achieved (runs={}).".format(pool.t + 1))
# break
# old_tau = tau
# except FloatingPointError:
# pass
# Store MAP solution in this iteration.
prob_mean.append([pool.t, np.mean(pool.dists)])
idx_best = np.argmin(pool.dists)
# Update if a new optimal solution was found.
if pool.dists[idx_best] < map_sol_old[1]:
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [pars[0] / 100., pars[1], pars[2], pars[3] * 10.,
pars[4] * 1000., pars[5]]
map_sol_old = [
closeSol(fundam_params, varIdxs, pars),
pool.dists[idx_best]]
map_lkl.append([pool.t, map_sol_old[1]])
# Print progress.
percentage_complete = (100. * (pool.t + 1) / nsteps_abc)
if len(milestones) > 0 and percentage_complete >= milestones[0]:
map_sol, logprob = map_sol_old
print("{:>3}% ({:.3f}) LP={:.1f} ({:g}, {:g}, {:.3f}, {:.2f}"
", {:g}, {:.2f})".format(
milestones[0], maf, logprob, *map_sol) +
" [{:.0f} m/s]".format(N_models / elapsed))
milestones = milestones[1:]
runs = pool.t + 1
# Evolution of the mean autocorrelation time.
tau_autocorr = np.array([np.nan] * 10) # autocorr_vals[:tau_index]
tau_index = np.nan
N_steps_conv = runs
# Final MAP fit.
idx_best = np.argmin(pool.dists)
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [
pars[0] / 100., pars[1], pars[2], pars[3] * 10., pars[4] * 1000.,
pars[5]]
map_sol = closeSol(fundam_params, varIdxs, pars)
map_lkl_final = pool.dists[idx_best]
abcsampler.close()
# Shape: (runs, nwalkers, ndim)
chains_nruns = np.array(chains_nruns)
# De-scale parameters.
chains_nruns[:, :, 0] = chains_nruns[:, :, 0] / 100.
chains_nruns[:, :, 3] = chains_nruns[:, :, 3] * 10.
chains_nruns[:, :, 4] = chains_nruns[:, :, 4] * 1000.
# Burn-in range.
Nb = int(runs * nburn_abc)
# Burn-in. Shape: (ndim, nwalkers, runs)
pars_chains_bi = discreteParams(
fundam_params, varIdxs, chains_nruns[:Nb, :, :]).T
# Change values for the discrete parameters with the closest valid values.
chains_nruns = discreteParams(
fundam_params, varIdxs, chains_nruns[Nb:, :, :])
mcmc_trace = chains_nruns.reshape(-1, ndim).T
# import matplotlib.pyplot as plt
# import corner
# corner.corner(
# mcmc_trace.T, quantiles=[0.16, 0.5, 0.84], show_titles=True)
# # levels=(1 - np.exp(-0.5),))
# plt.savefig("corner.png", dpi=300)
# Convergence parameters.
acorr_t, max_at_c, min_at_c, geweke_z, emcee_acorf, mcmc_ess, minESS,\
mESS, mESS_epsilon = convergenceVals(
'abc', ndim, varIdxs, N_conv, chains_nruns, mcmc_trace)
# Store mean solution.
mean_sol = closeSol(fundam_params, varIdxs, np.mean(mcmc_trace, axis=1))
isoch_fit_params = {
'varIdxs': varIdxs, 'nsteps_abc': runs, 'mean_sol': mean_sol,
'nburn_abc': Nb, 'map_sol': map_sol, 'map_lkl': map_lkl,
'map_lkl_final': map_lkl_final, 'prob_mean': prob_mean,
'mcmc_elapsed': elapsed, 'mcmc_trace': mcmc_trace,
'pars_chains_bi': pars_chains_bi, 'pars_chains': chains_nruns.T,
'maf_steps': maf_steps, 'autocorr_time': acorr_t,
'max_at_c': max_at_c, 'min_at_c': min_at_c,
'minESS': minESS, 'mESS': mESS, 'mESS_epsilon': mESS_epsilon,
'emcee_acorf': emcee_acorf, 'geweke_z': geweke_z,
'mcmc_ess': mcmc_ess,
'N_steps_conv': N_steps_conv, 'N_conv': N_conv, 'tol_conv': tol_conv,
'tau_index': tau_index, 'tau_autocorr': tau_autocorr
}
return isoch_fit_params
| gpl-3.0 | -8,437,757,124,377,861,000 | 35.786517 | 79 | 0.563225 | false |
konker/switchd | util/pidfile.py | 1 | 1188 | import fcntl
import os
# FROM: http://code.activestate.com/recipes/577911-context-manager-for-a-daemon-pid-file/
class PidFile(object):
"""Context manager that locks a pid file. Implemented as class
not generator because daemon.py is calling .__exit__() with no parameters
instead of the None, None, None specified by PEP-343."""
# pylint: disable=R0903
def __init__(self, path):
self.path = path
self.pidfile = None
def __enter__(self):
self.pidfile = open(self.path, "a+")
try:
fcntl.flock(self.pidfile.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
raise SystemExit("Already running according to " + self.path)
self.pidfile.seek(0)
self.pidfile.truncate()
self.pidfile.write(str(os.getpid()))
self.pidfile.flush()
self.pidfile.seek(0)
return self.pidfile
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
try:
self.pidfile.close()
except IOError as err:
# ok if file was just closed elsewhere
if err.errno != 9:
raise
os.remove(self.path)
| mit | 6,279,858,035,278,760,000 | 32 | 89 | 0.601852 | false |
BK-TN/Islander | systems.py | 1 | 4654 | import components, actions
import pygame
import math
from collections import defaultdict
from point import Point
class DrawingSystem:
def __init__(self, screen, camera_target):
self.screen = screen
self.camera_pos = Point(0,0,0)
self.camera_target = camera_target
self.tileset = pygame.image.load("tileset.png") #12x16
self.tileset.set_colorkey((0,0,0))
self.tilew = 12
self.tileh = 16
self.entities = []
def check_entity(self, entity):
pass
def process(self, world):
def draw(drawable, draw_background):
# Find the tile to use based on the ASCII value of the char to draw
src_x = ord(drawable.char) % 16
src_y = math.floor(ord(drawable.char) / 16)
# Create the rect this tile should be drawn in
rect = pygame.Rect(
(screentiles_x / 2 - self.camera_pos.x + x) * self.tilew,
(screentiles_y / 2 - self.camera_pos.y + y) * self.tileh,
self.tilew,
self.tileh)
# Set the tile color by changing the tileset's palette (Which is really fast)
self.tileset.set_palette_at(1,drawable.color)
if draw_background:
pygame.draw.rect(self.screen, drawable.bgcolor, rect)
# Draw tile
self.screen.blit(
self.tileset,
(rect.x,rect.y),
pygame.Rect(src_x * self.tilew, src_y * self.tileh, self.tilew, self.tileh)
)
if self.camera_target != None:
pos = world.find_pos(self.camera_target)
self.camera_pos = pos
self.screen.fill((0,0,0))
# Find the max amount of tiles that fit the with and height of the screen
# So we can calculate the center of it
screentiles_x = self.screen.get_width() / self.tilew
screentiles_y = self.screen.get_height() / self.tileh
# Calculate 'borders' to draw within
left = math.floor(self.camera_pos.x - screentiles_x/2)
right = math.floor(self.camera_pos.x + screentiles_x/2)
top = math.floor(self.camera_pos.y - screentiles_y/2)
bottom = math.floor(self.camera_pos.y + screentiles_y/2)
for x in range(left,right):
for y in range(top,bottom):
#gridslice = sorted(world.search_slice(x,y),key=lambda e: world.find_pos(e).z)
drawn = False
for z in range(self.camera_pos.z,10):
if drawn: break
entities_on_pos = world.check_spot(Point(x,y,z))
drawables = [d for d in (e.get(components.Drawable) for e in entities_on_pos) if d != None]
if len(drawables) > 0:
drawables = sorted(drawables, key=lambda d: d.depth)
draw(drawables[0], z == self.camera_pos.z)
drawn = True
pygame.display.flip()
class MovementSystem:
def __init__(self):
self.entities = []
def check_entity(self, entity):
if entity.has(components.Character) or entity.has(components.MoveRight):
self.entities.append(entity)
def process(self, world):
def try_move(world, entity, pos):
can_move = True
physical_comp = entity.get(components.Physical)
if physical_comp != None:
space_left = world.get_spot_space(pos)
if space_left < physical_comp.volume:
can_move = False
if can_move:
world.move_entity(entity, pos)
for e in self.entities:
character = e.get(components.Character)
moveright = e.get(components.MoveRight)
if character != None:
movement = e.get_actions(actions.MoveAction)
for mov in movement:
try_move(world, e, Point(mov.xtarget, mov.ytarget, 0)) #TODO: add a.ztarget
if moveright != None:
pos = world.find_pos(e)
try_move(world, e, Point(pos.x + 1, pos.y, pos.z))
class PhysicsSystem:
def __init__(self):
self.entities = []
def check_entity(self, entity):
if entity.has(components.Physical):
self.entities.append(entity)
def process(self, world):
for e in self.entities:
phys = e.get(components.Physical)
pos = world.find_pos(e)
pos_below = Point(pos.x,pos.y,pos.z+1)
space_below = world.get_spot_space(pos_below)
if space_below < phys.volume:
world.move_entity(e,pos_below)
| mit | 6,984,329,801,355,357,000 | 38.777778 | 111 | 0.561023 | false |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 | 3,089,997,945,052,346,400 | 28.202247 | 106 | 0.696037 | false |
JarbasAI/jarbas-core | mycroft/jarbas-skills/skill_apod/__init__.py | 1 | 4205 | from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
from mycroft.messagebus.message import Message
try:
# try to load display service
import sys
from os.path import dirname
sys.path.append(dirname(dirname(__file__)))
from service_display.displayservice import DisplayService
except:
# not installed, use webrowser
import webbrowser
import unirest
import urllib2
import os
__author__ = 'jarbas'
LOGGER = getLogger(__name__)
class FbPost():
def __init__(self, emitter):
self.emitter = emitter
def post_text(self, text, id="me", speech= "Making a post on face book", link= None):
self.emitter.emit(Message("fb_post_request", {"type":"text", "id":id, "link":link, "text":text, "speech":speech}))
def post_link(self, link, text="", id="me", speech= "Sharing a link on face book"):
self.emitter.emit(Message("fb_post_request", {"type":"link", "id":id, "link":link, "text":text, "speech":speech}))
class AstronomyPicSkill(MycroftSkill):
def __init__(self):
super(AstronomyPicSkill, self).__init__(name="AstronomyPicSkill")
try:
self.key = self.config_apis["NASAAPI"]
except:
try:
self.key = self.config["NASAAPI"]
except:
self.key = "DEMO_KEY"
self.save = True
try:
self.save_path = self.config_core["database_path"] + "/astronomy_picture_of_the_day"
self.txt_save_path = self.config_core["database_path"] + "/astronomy_picture_of_the_day_descriptions"
except:
try:
self.save_path = self.config["save_path"]
self.txt_save_path = self.config["txt_path"]
except:
self.save_path = os.path.dirname(__file__) + "/apod"
self.txt_save_path = os.path.dirname(__file__) + "/apod"
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
if not os.path.exists(self.txt_save_path):
os.makedirs(self.txt_save_path)
self.use_browser = False
def initialize(self):
apod_intent = IntentBuilder("ApodIntent").\
require("APODKeyword").build()
self.register_intent(apod_intent, self.handle_apod_intent)
fb_apod_intent = IntentBuilder("FbApodIntent"). \
require("fbAPODKeyword").build()
self.register_intent(fb_apod_intent, self.handle_fb_apod_intent)
try:
self.display_service = DisplayService(self.emitter)
except:
self.use_browser = True
self.poster = FbPost(self.emitter)
def handle_apod_intent(self, message):
apod_url = "https://api.nasa.gov/planetary/apod?api_key=" + self.key
response = unirest.get(apod_url)
title = response.body["title"]
url = response.body["url"]
summary = response.body["explanation"]
apod = urllib2.Request(url)
raw_img = urllib2.urlopen(apod).read()
save_path = self.save_path + "/" + title.replace(" ", "_") + ".jpg"
f = open(save_path, 'wb')
f.write(raw_img)
f.close()
self.speak(title)
if not self.use_browser:
self.display_service.show(save_path, message.data["utterance"])
else:
webbrowser.open(save_path)
self.speak(summary)
if self.save:
save_path = self.txt_save_path + "/" + title.replace(" ", "_") + ".txt"
# save description
f = open(save_path, 'wb')
summary = summary.encode('utf-8')
f.write(summary)
f.close()
def handle_fb_apod_intent(self, message):
apod_url = "https://api.nasa.gov/planetary/apod?api_key=" + self.key
response = unirest.get(apod_url)
text = response.body["title"]
url = response.body["url"]
text += "\n" + response.body["explanation"]
self.poster.post_link(link=url, text=text, speech="Sharing astronomy picture of the day on face book")
def stop(self):
pass
def create_skill():
return AstronomyPicSkill()
| gpl-3.0 | -1,056,637,569,881,109,000 | 32.110236 | 122 | 0.591677 | false |
dannykopping/mysql-utilities | mysql/utilities/common/tools.py | 1 | 12360 | #
# Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This module contains methods for working with mysql server tools.
"""
import os
import sys
import shutil
import time
import subprocess
import inspect
from mysql.utilities import PYTHON_MIN_VERSION, PYTHON_MAX_VERSION
from mysql.utilities.common.format import print_list
from mysql.utilities.exception import UtilError
def _add_basedir(search_paths, path_str):
"""Add a basedir and all known sub directories
This method builds a list of possible paths for a basedir for locating
special MySQL files like mysqld (mysqld.exe), etc.
search_paths[inout] List of paths to append
path_str[in] The basedir path to append
"""
search_paths.append(path_str)
search_paths.append(os.path.join(path_str, "sql")) # for source trees
search_paths.append(os.path.join(path_str, "client")) # for source trees
search_paths.append(os.path.join(path_str, "share"))
search_paths.append(os.path.join(path_str, "scripts"))
search_paths.append(os.path.join(path_str, "bin"))
search_paths.append(os.path.join(path_str, "libexec"))
search_paths.append(os.path.join(path_str, "mysql"))
def get_tool_path(basedir, tool, fix_ext=True, required=True,
defaults_paths=[], search_PATH=False):
"""Search for a MySQL tool and return the full path
basedir[in] The initial basedir to search (from mysql server)
tool[in] The name of the tool to find
fix_ext[in] If True (default is True), add .exe if running on
Windows.
required[in] If True (default is True), and error will be
generated and the utility aborted if the tool is
not found.
defaults_paths[in] Default list of paths to search for the tool.
By default an empty list is assumed, i.e. [].
search_PATH[in] Boolean value that indicates if the paths specified by
the PATH environment variable will be used to search
for the tool. By default the PATH will not be searched,
i.e. search_PATH=False.
Returns (string) full path to tool
"""
search_paths = []
if basedir:
# Add specified basedir path to search paths
_add_basedir(search_paths, basedir)
if defaults_paths and len(defaults_paths):
# Add specified default paths to search paths
for path in defaults_paths:
search_paths.append(path)
else:
# Add default basedir paths to search paths
_add_basedir(search_paths, "/usr/local/mysql/")
_add_basedir(search_paths, "/usr/sbin/")
_add_basedir(search_paths, "/usr/share/")
# Search in path from the PATH environment variable
if search_PATH:
for path in os.environ['PATH'].split(os.pathsep):
search_paths.append(path)
if os.name == "nt" and fix_ext:
tool = tool + ".exe"
# Search for the tool
for path in search_paths:
norm_path = os.path.normpath(path)
if os.path.isdir(norm_path):
toolpath = os.path.join(norm_path, tool)
if os.path.isfile(toolpath):
return toolpath
else:
if tool == "mysqld.exe":
toolpath = os.path.join(norm_path, "mysqld-nt.exe")
if os.path.isfile(toolpath):
return toolpath
if required:
raise UtilError("Cannot find location of %s." % tool)
return None
def delete_directory(dir):
"""Remove a directory (folder) and its contents.
dir[in] target directory
"""
import time
if os.path.exists(dir):
# It can take up to 10 seconds for Windows to 'release' a directory
# once a process has terminated. We wait...
if os.name == "nt":
stop = 10
i = 1
while i < stop and os.path.exists(dir):
shutil.rmtree(dir, True)
time.sleep(1)
i += 1
else:
shutil.rmtree(dir, True)
def execute_script(run_cmd, file=None, options=[], verbosity=False):
"""Execute a script.
This method spawns a subprocess to execute a script. If a file is
specified, it will direct output to that file else it will suppress
all output from the script.
run_cmd[in] command/script to execute
file[in] file path name to file, os.stdout, etc.
Default is None (do not log/write output)
options[in] arguments for script
Default is no arguments ([])
verbosity[in] show result of script
Default is False
Returns int - result from process execution
"""
if verbosity:
f_out = sys.stdout
else:
if not file:
file = os.devnull
f_out = open(file, 'w')
str_opts = [str(opt) for opt in options]
cmd_opts = " ".join(str_opts)
command = " ".join([run_cmd, cmd_opts])
if verbosity:
print "# SCRIPT EXECUTED:", command
proc = subprocess.Popen(command, shell=True,
stdout=f_out, stderr=f_out)
ret_val = proc.wait()
if not verbosity:
f_out.close()
return ret_val
def ping_host(host, timeout):
"""Execute 'ping' against host to see if it is alive.
host[in] hostname or IP to ping
timeout[in] timeout in seconds to wait
returns bool - True = host is reachable via ping
"""
if sys.platform == "darwin":
run_cmd = "ping -o -t %s %s" % (timeout, host)
elif os.name == "posix":
run_cmd = "ping -w %s %s" % (timeout, host)
else: # must be windows
run_cmd = "ping -n %s %s" % (timeout, host)
ret_val = execute_script(run_cmd)
return (ret_val == 0)
def get_mysqld_version(mysqld_path):
"""Return the version number for a mysqld executable.
mysqld_path[in] location of the mysqld executable
Returns tuple - (major, minor, release), or None if error
"""
import subprocess
args = [
" --version",
]
out = open("version_check", 'w')
proc = subprocess.Popen("%s --version" % mysqld_path,
stdout=out, stderr=out, shell=True)
proc.wait()
out.close()
out = open("version_check", 'r')
line = None
for line in out.readlines():
if "Ver" in line:
break
out.close()
try:
os.unlink('version_check')
except:
pass
if line is None:
return None
version = line.split(' ', 5)[3]
try:
maj, min, dev = version.split(".")
rel = dev.split("-")
return (maj, min, rel[0])
except:
return None
return None
def show_file_statistics(file_name, wild=False, out_format="GRID"):
"""Show file statistics for file name specified
file_name[in] target file name and path
wild[in] if True, get file statistics for all files with prefix of
file_name. Default is False
out_format[in] output format to print file statistics. Default is GRID.
"""
def _get_file_stats(path, file_name):
stats = os.stat(os.path.join(path, file_name))
return ((file_name, stats.st_size, time.ctime(stats.st_ctime),
time.ctime(stats.st_mtime)))
columns = ["File", "Size", "Created", "Last Modified"]
rows = []
path, filename = os.path.split(file_name)
if wild:
for root, dirs, files in os.walk(path):
for f in files:
if f.startswith(filename):
rows.append(_get_file_stats(path, f))
else:
rows.append(_get_file_stats(path, filename))
print_list(sys.stdout, out_format, columns, rows)
def remote_copy(filepath, user, host, local_path, verbosity=0):
"""Copy a file from a remote machine to the localhost.
filepath[in] The full path and file name of the file on the remote
machine
user[in] Remote login
local_path[in] The path to where the file is to be copie
Returns bool - True = succes, False = failure or exception
"""
if os.name == "posix": # use scp
run_cmd = "scp %s@%s:%s %s" % (user, host, filepath, local_path)
if verbosity > 1:
print("# Command =%s" % run_cmd)
print("# Copying file from %s:%s to %s:" % (host, filepath, local_path))
proc = subprocess.Popen(run_cmd, shell=True)
ret_val = proc.wait()
else:
print("Remote copy not supported. Please use UNC paths and omit "
"the --remote-login option to use a local copy operation.")
return True
def check_python_version(min_version=PYTHON_MIN_VERSION,
max_version=PYTHON_MAX_VERSION,
raise_exception_on_fail=False,
name=None):
"""Check the Python version compatibility.
By default this method uses constants to define the minimum and maximum
Python versions required. It's possible to override this by passing new
values on ``min_version`` and ``max_version`` parameters.
It will run a ``sys.exit`` or raise a ``UtilError`` if the version of
Python detected it not compatible.
min_version[in] Tuple with the minimum Python version
required (inclusive).
max_version[in] Tuple with the maximum Python version
required (exclusive).
raise_exception_on_fail[in] Boolean, it will raise a ``UtilError`` if
True and Python detected is not compatible.
name[in] String for a custom name, if not provided
will get the module name from where this
function was called.
"""
# Only use the fields: major, minor and micro
sys_version = sys.version_info[:3]
# Test min version compatibility
is_compat = min_version <= sys_version
# Test max version compatibility if it's defined
if is_compat and max_version:
is_compat = sys_version < max_version
if not is_compat:
if not name:
# Get the utility name by finding the module
# name from where this function was called
frm = inspect.stack()[1]
mod = inspect.getmodule(frm[0])
mod_name, ext = os.path.basename(mod.__file__).split('.')
name = '%s utility' % mod_name
# Build the error message
if max_version:
max_version_error_msg = 'or higher and lower than %s' % \
'.'.join(map(str, max_version))
else:
max_version_error_msg = 'or higher'
error_msg = (
'The %(name)s requires Python version %(min_version)s '
'%(max_version_error_msg)s. The version of Python detected was '
'%(sys_version)s. You may need to install or redirect the '
'execution of this utility to an environment that includes a '
'compatible Python version.'
) % {
'name': name,
'sys_version': '.'.join(map(str, sys_version)),
'min_version': '.'.join(map(str, min_version)),
'max_version_error_msg': max_version_error_msg
}
if raise_exception_on_fail:
raise UtilError(error_msg)
print('ERROR: %s' % error_msg)
sys.exit(1)
| gpl-2.0 | -1,920,105,852,500,991,700 | 34.014164 | 80 | 0.589401 | false |
pearu/sympycore | sympycore/heads/base_exp_dict.py | 1 | 11228 |
__all__ = ['BASE_EXP_DICT']
from .base import heads, heads_precedence, ArithmeticHead
from ..core import init_module, Expr
init_module.import_heads()
init_module.import_numbers()
init_module.import_lowlevel_operations()
class BaseExpDictHead(ArithmeticHead):
""" BASE_EXP_DICT expression data is a dictionary of base and
exponent pairs. All base parts must be Expr instances.
For example, ``Algebra(BASE_EXP_DICT. {x:2, y:a, 3:1, 2:1/2})``
represents ``3 * 2**(1/2) * x**2 * y**a``.
"""
def is_data_ok(self, cls, data):
if type(data) is dict:
for item in data.iteritems():
msg = POW.is_data_ok(cls, item)
if msg:
return 'POW data=%s: %s' % (item, msg)
b, e = item
if b.head is POW:
return 'BASE_EXP_DICT key cannot be POW'
else:
return 'data must be dict instance but got %s' % (type(data))
return
def __repr__(self): return 'BASE_EXP_DICT'
def data_to_str_and_precedence(self, cls, base_exp_dict):
factors = []
coeff = None
for base, exp in base_exp_dict.items():
if exp==1 and base.head is NUMBER:
coeff = base.data
else:
factors.append(cls(POW, (base, exp)))
if coeff is not None:
return TERM_COEFF.data_to_str_and_precedence(cls, (cls(MUL, factors), coeff))
return MUL.data_to_str_and_precedence(cls, factors)
def reevaluate(self, cls, data):
r = cls(NUMBER, 1)
for base, exp in data.iteritems():
r *= base ** exp
return r
def to_ADD(self, Algebra, base_exp_dict, expr):
return Algebra(ADD, [expr])
def term_coeff(self, cls, expr):
data = expr.data
coeff = base_exp_dict_get_coefficient(cls, data)
if coeff is not None:
data = data.copy()
del data[coeff]
r = base_exp_dict_new(cls, data)
t, c = r.head.term_coeff(cls, r)
return t, c * coeff
return expr, 1
def new(self, cls, base_exp_dict, evaluate=True):
return base_exp_dict_new(cls, base_exp_dict)
def neg(self, cls, expr):
data = expr.data
coeff = base_exp_dict_get_coefficient(cls, data)
if coeff is None:
return cls(TERM_COEFF, (expr, -1))
data = data.copy()
del data[coeff]
return term_coeff_new(cls, (base_exp_dict_new(cls, data), -coeff))
def inplace_commutative_data_mul(self, cls, data, rhs):
"""
Multiply base-exp-dictionary with rhs inplace.
"""
rhead, rdata = rhs.pair
if rhead is SYMBOL or rhead is ADD or rhead is APPLY or rhead is DIFF or rhead is FDIFF:
base_exp_dict_add_item(cls, data, rhs, 1)
elif rhead is NUMBER:
base_exp_dict_add_item(cls, data, rhs, 1)
elif rhead is TERM_COEFF:
term, coeff = rdata
base_exp_dict_add_item(cls, data, term, 1)
base_exp_dict_add_item(cls, data, cls(NUMBER, coeff), 1)
elif rhead is BASE_EXP_DICT:
base_exp_dict_add_dict(cls, data, rdata)
elif rhead is POW:
base, exp = rdata
base_exp_dict_add_item(cls, data, base, exp)
elif rhead is TERM_COEFF_DICT:
base_exp_dict_add_item(cls, data, rhs, 1)
else:
raise NotImplementedError(`self, cls, rhs.pair`)
def commutative_mul(self, cls, lhs, rhs):
data = lhs.data.copy()
self.inplace_commutative_data_mul(cls, data, rhs)
return base_exp_dict_new(cls, data)
def commutative_mul_number(self, cls, lhs, rhs):
return term_coeff_new(cls, (lhs, rhs))
def commutative_div_number(self, cls, lhs, rhs):
r = number_div(cls, 1, rhs)
if rhs==0:
return r * lhs
return term_coeff_new(cls, (lhs, r))
def commutative_div(self, cls, lhs, rhs):
rhead, rdata = rhs.pair
if rhead is NUMBER:
return self.commutative_div_number(cls, lhs, rdata)
if rhead is POW:
data = lhs.data.copy()
base, exp = rdata
base_exp_dict_sub_item(cls, data, base, exp)
return base_exp_dict_new(cls, data)
if rhead is BASE_EXP_DICT:
data = lhs.data.copy()
base_exp_dict_sub_dict(cls, data, rdata)
return base_exp_dict_new(cls, data)
if rhead is SYMBOL or rhead is TERM_COEFF_DICT or rhead is APPLY:
data = lhs.data.copy()
base_exp_dict_sub_item(cls, data, rhs, 1)
return base_exp_dict_new(cls, data)
if rhead is TERM_COEFF:
term, coeff = rhs.term_coeff()
return (lhs / term) / coeff
return ArithmeticHead.commutative_div(self, cls, lhs, rhs)
def commutative_rdiv_number(self, cls, lhs, rhs):
data = lhs.data.copy()
base_exp_dict_mul_value(cls, data, -1)
return base_exp_dict_new(cls, data) * rhs
def scan(self, proc, cls, data, target):
for b, e in data.iteritems():
b.head.scan(proc, cls, b.data, target)
if isinstance(e, Expr):
e.head.scan(proc, cls, e.data, target)
else:
NUMBER.scan(proc, cls, e, target)
proc(cls, self, data, target)
def walk(self, func, cls, data, target):
d = {}
flag = False
for b, e in data.iteritems():
b1 = b.head.walk(func, cls, b.data, b)
if isinstance(e, Expr):
e1 = e.head.walk(func, cls, e.data, e)
else:
e1 = NUMBER.walk(func, cls, e, e)
if b1 is not b or e1 is not e:
flag = True
self.inplace_commutative_data_mul(cls, d, b1**e1)
if flag:
r = base_exp_dict_new(cls, d)
return func(cls, r.head, r.data, r)
return func(cls, self, data, target)
def pow(self, cls, base, exp):
if type(exp) is cls:
h, d = exp.pair
if h is NUMBER and isinstance(d, numbertypes):
exp = d
if isinstance(exp, inttypes):
if exp:
data = base.data.copy()
base_exp_dict_mul_value(cls, data, exp)
return base_exp_dict_new(cls, data)
return cls(NUMBER, 1)
return pow_new(cls, (base, exp))
pow_number = pow
def expand(self, cls, expr):
data = {}
for b, e in expr.data.items():
f = pow_new(cls, (b, e)).expand()
h, d = f.pair
data1 = {}
if h is TERM_COEFF_DICT:
data2 = d
else:
t, c = f.term_coeff()
data2 = {t: c}
if data:
term_coeff_dict_mul_dict(cls, data1, data, data2)
data = data1
else:
data = data2
return term_coeff_dict_new(cls, data)
def diff(self, cls, data, expr, symbol, order, cache={}):
key = (expr, symbol, order)
result = cache.get(key)
if result is not None:
return result
key1 = (expr, symbol, 1)
result = cache.get(key1)
if result is None:
operands = data.items()
zero = cls(NUMBER, 0)
result = zero
for i in range(len(operands)):
p = pow_new(cls, operands[i])
d = p.head.diff(cls, p.data, p, symbol, 1, cache=cache)
if d==zero:
continue
be_dict = data.copy()
del be_dict[operands[i][0]]
r = base_exp_dict_new(cls, be_dict)
result += r * d
cache[key1] = result
if order>1:
result = result.head.diff(cls, result.data, result, symbol, order-1, cache=cache)
cache[key] = result
return result
def apply(self, cls, data, func, args):
result = cls(NUMBER, 1)
for base, exp in data.iteritems():
if isinstance(exp, Expr):
return NotImplemented
result *= base.head.apply(cls, base.data, base, args) ** exp
return result
def integrate_indefinite(self, cls, data, expr, x):
d1 = {} # f(x)**g(x)
d2 = {} # f(x)**const
d3 = {} # const**g(x)
d4 = {} # const**const
for base, exp in data.iteritems():
if x in base.symbols_data:
if type(exp) is cls and x in exp.symbols_data:
d1[base] = exp
else:
d2[base] = exp
elif type(exp) is cls and x in exp.symbols_data:
d3[base] = exp
else:
d4[base] = exp
if d1 or (d2 and d3) or (len(d2)>1) or (len(d3)>1):
raise NotImplementedError("don't know how to integrate %s over %s" % (expr, x))
if not (d2 or d3):
return expr * cls(SYMBOL, x)
if d4:
if len(d4)>1:
const = cls(BASE_EXP_DICT, d4)
else:
const = pow_new(cls, dict_get_item(d4))
else:
const = 1
if d2:
newexpr = pow_new(cls, dict_get_item(d2))
return newexpr.head.integrate_indefinite(cls, newexpr.data, newexpr, x) * const
if d3:
newexpr = pow_new(cls, dict_get_item(d3))
return newexpr.head.integrate_indefinite(cls, newexpr.data, newexpr, x) * const
raise NotImplementedError("don't know how to integrate %s over %s" % (expr, x))
def integrate_definite(self, cls, data, expr, x, a, b):
d1 = {} # f(x)**g(x)
d2 = {} # f(x)**const
d3 = {} # const**g(x)
d4 = {} # const**const
for base, exp in data.iteritems():
if x in base.symbols_data:
if type(exp) is cls and x in exp.symbols_data:
d1[base] = exp
else:
d2[base] = exp
elif type(exp) is cls and x in exp.symbols_data:
d3[base] = exp
else:
d4[base] = exp
if d1 or (d2 and d3) or (len(d2)>1) or (len(d3)>1):
raise NotImplementedError("don't know how to integrate %s over %s in [%s, %s]" % (expr, x, a, b))
if not (d2 or d3):
return (b-a) * cls(SYMBOL, x)
if d4:
if len(d4)>1:
const = cls(BASE_EXP_DICT, d4)
else:
const = pow_new(cls, dict_get_item(d4))
else:
const = 1
if d2:
newexpr = pow_new(cls, dict_get_item(d2))
return newexpr.head.integrate_definite(cls, newexpr.data, newexpr, x, a, b) * const
if d3:
newexpr = pow_new(cls, dict_get_item(d3))
return newexpr.head.integrate_definite(cls, newexpr.data, newexpr, x, a, b) * const
raise NotImplementedError("don't know how to integrate %s over %s in [%s, %s]" % (expr, x, a, b))
BASE_EXP_DICT = BaseExpDictHead()
| bsd-3-clause | 5,856,890,903,405,124,000 | 35.813115 | 109 | 0.512647 | false |
gcarq/freqtrade | freqtrade/configuration/check_exchange.py | 1 | 3242 | import logging
from typing import Any, Dict
from freqtrade.exceptions import OperationalException
from freqtrade.exchange import (available_exchanges, get_exchange_bad_reason, is_exchange_bad,
is_exchange_known_ccxt, is_exchange_officially_supported)
from freqtrade.state import RunMode
logger = logging.getLogger(__name__)
def remove_credentials(config: Dict[str, Any]) -> None:
"""
Removes exchange keys from the configuration and specifies dry-run
Used for backtesting / hyperopt / edge and utils.
Modifies the input dict!
"""
config['exchange']['key'] = ''
config['exchange']['secret'] = ''
config['exchange']['password'] = ''
config['exchange']['uid'] = ''
config['dry_run'] = True
def check_exchange(config: Dict[str, Any], check_for_bad: bool = True) -> bool:
"""
Check if the exchange name in the config file is supported by Freqtrade
:param check_for_bad: if True, check the exchange against the list of known 'bad'
exchanges
:return: False if exchange is 'bad', i.e. is known to work with the bot with
critical issues or does not work at all, crashes, etc. True otherwise.
raises an exception if the exchange if not supported by ccxt
and thus is not known for the Freqtrade at all.
"""
if (config['runmode'] in [RunMode.PLOT, RunMode.UTIL_NO_EXCHANGE, RunMode.OTHER]
and not config.get('exchange', {}).get('name')):
# Skip checking exchange in plot mode, since it requires no exchange
return True
logger.info("Checking exchange...")
exchange = config.get('exchange', {}).get('name').lower()
if not exchange:
raise OperationalException(
f'This command requires a configured exchange. You should either use '
f'`--exchange <exchange_name>` or specify a configuration file via `--config`.\n'
f'The following exchanges are available for Freqtrade: '
f'{", ".join(available_exchanges())}'
)
if not is_exchange_known_ccxt(exchange):
raise OperationalException(
f'Exchange "{exchange}" is not known to the ccxt library '
f'and therefore not available for the bot.\n'
f'The following exchanges are available for Freqtrade: '
f'{", ".join(available_exchanges())}'
)
if check_for_bad and is_exchange_bad(exchange):
raise OperationalException(f'Exchange "{exchange}" is known to not work with the bot yet. '
f'Reason: {get_exchange_bad_reason(exchange)}')
if is_exchange_officially_supported(exchange):
logger.info(f'Exchange "{exchange}" is officially supported '
f'by the Freqtrade development team.')
else:
logger.warning(f'Exchange "{exchange}" is known to the the ccxt library, '
f'available for the bot, but not officially supported '
f'by the Freqtrade development team. '
f'It may work flawlessly (please report back) or have serious issues. '
f'Use it at your own discretion.')
return True
| gpl-3.0 | -733,093,713,289,418,400 | 42.810811 | 99 | 0.624923 | false |
hugovk/terroroftinytown | terroroftinytown/tracker/base.py | 1 | 1068 | # encoding=utf-8
import tornado.web
from terroroftinytown.tracker.model import User
ACCOUNT_COOKIE_NAME = 'tottu'
ACCOUNT_TOKEN_COOKIE_NAME = 'tottt'
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
username_raw = self.get_secure_cookie(ACCOUNT_COOKIE_NAME)
token = self.get_secure_cookie(ACCOUNT_TOKEN_COOKIE_NAME)
if username_raw and token:
username = username_raw.decode('ascii')
if username and User.check_account_session(username, token):
return username
def prepare(self):
if self.application.is_maintenance_in_progress():
self._show_maintenance_page()
def _show_maintenance_page(self):
self.set_status(512, 'EXPORTING OUR SHIT')
self.render('maintenance.html')
raise tornado.web.Finish()
def user_audit_text(self, text):
return '[{username} - {ip_address}] {text}'.format(
username=self.current_user,
ip_address=self.request.remote_ip,
text=text,
)
| mit | -4,946,636,474,839,554,000 | 28.666667 | 72 | 0.640449 | false |
yuwen41200/biodiversity-analysis | src/controller/main_action.py | 1 | 10383 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from dateutil.parser import parse
from pprint import pformat
from traceback import format_exc
from model.dataset import Dataset
from model.species import Species
from model.leaflet_map import LeafletMap
from model.scatter_plot import ScatterPlot
from view.spatial_analysis_widget import SpatialAnalysisWidget
from view.temporal_analysis_widget import TemporalAnalysisWidget
from view.cooccurrence_analysis_widget import CooccurrenceAnalysisWidget
from view.set_filters_dialog import SetFiltersDialog
from view.add_species_dialog import AddSpeciesDialog
from controller.correlation_table import CorrelationTable
from controller.cooccurrence_calculation import CooccurrenceCalculation
from lib.dataset_processor import DatasetProcessor
# noinspection PyPep8Naming
class MainAction:
def __init__(self, dataset, mainWindow, process, pipe):
"""
Initialize the controller for the main window.
:param dataset: Dataset model.
:param mainWindow: MainWindow view.
:param process: Worker subprocess.
:param pipe: Message pipe for the worker subprocess.
"""
self.spatialData = dataset.spatialData
self.temporalData = dataset.temporalData
self.auxiliaryData = dataset.auxiliaryData
self.selectedSpecies = dataset.selectedSpecies
self.map = LeafletMap(dataset, "Landscape")
self.plot = ScatterPlot(dataset)
spatial = SpatialAnalysisWidget(self.map.webView)
temporal = TemporalAnalysisWidget(self.plot.mplCanvas)
cooccurrence = CooccurrenceAnalysisWidget()
self.correlationTable = CorrelationTable(dataset, spatial, temporal)
self.cooccurrenceCalculation = CooccurrenceCalculation(
dataset, cooccurrence, process, pipe
)
self.mainWindow = mainWindow
self.mainWindow.setupWidgets(
spatial, temporal, cooccurrence, self, self.cooccurrenceCalculation
)
self.mainWindow.show()
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList, PyBroadException
def importData(self):
"""
Import data from a Darwin Core Archive (DwC-A) file. |br|
Store them in ``Dataset``.
:return: None.
"""
if self.spatialData:
title = "Dataset Already Imported"
content = "To import new data, please clear data first."
self.mainWindow.alert(title, content, 3)
return
title, extension = "Select a DwC-A File", "DwC-A File (*.zip)"
filename = self.mainWindow.openFile(title, extension)
if filename:
try:
archiveData, archiveMeta = DatasetProcessor.extractDarwinCoreArchive(filename)
if archiveMeta["coreType"] not in Dataset.supportedCores:
title = "Unsupported DwC Type"
content = (
"The provided file has core type of " + archiveMeta["coreType"] + ".\n"
"This program only support " + ", ".join(Dataset.supportedCores) + "."
)
self.mainWindow.alert(title, content, 3)
return
columns = [
("individualCount", True),
("eventDate", True),
("decimalLatitude", True),
("decimalLongitude", True),
("scientificName", True),
("vernacularName", False)
]
try:
dataList = DatasetProcessor.extractCsv(archiveData, archiveMeta, columns)
except ValueError as e:
title = "Invalid DwC-A File"
content = str(e) + "\nPlease select a DwC-A file with such field."
self.mainWindow.alert(title, content, 3)
return
except:
title = "Invalid DwC-A File"
content = (
"The provided file is either not in DwC-A format or corrupted.\n"
"Please select a valid one.\n\n"
)
self.mainWindow.alert(title, content + format_exc(), 3)
return
for r in dataList:
try:
r0int = int(r[0])
r1datetime = parse(r[1])
r2float = float(r[2])
r3float = float(r[3])
if not r[4]:
raise ValueError("Field \"scientificName\" is empty.")
except:
title = "Invalid Record Found"
content = "The following record is invalid and will be ignored:\n"
self.mainWindow.alert(title, content + pformat(r), 2)
else:
self.spatialData[r[4]] = ((r2float, r3float), r0int)
self.temporalData[r[4]] = (r1datetime, r0int)
self.auxiliaryData[r[4]] = r[5]
title = "Dataset Successfully Imported"
content = "{:,d} records have been loaded.".format(len(dataList))
self.mainWindow.alert(title, content, 0)
# noinspection PyTypeChecker
def setFilters(self):
"""
Only leave filtered data in ``Dataset``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
else:
xCoordinates = [n[0][1] for m in self.spatialData.values() for n in m]
yCoordinates = [n[0][0] for m in self.spatialData.values() for n in m]
timestamps = [n[0] for m in self.temporalData.values() for n in m]
xCoordinateMinMax = (min(xCoordinates), max(xCoordinates))
yCoordinateMinMax = (min(yCoordinates), max(yCoordinates))
timestampMinMax = (min(timestamps), max(timestamps))
dialog = SetFiltersDialog(xCoordinateMinMax, yCoordinateMinMax, timestampMinMax)
dialog.exec_()
if not dialog.xCoordinateMinMax:
return
for k in list(self.spatialData.keys()):
for i, u in enumerate(self.spatialData[k]):
v = self.temporalData[k][i]
if (
dialog.xCoordinateMinMax[0] <= u[0][1] <= dialog.xCoordinateMinMax[1] and
dialog.yCoordinateMinMax[0] <= u[0][0] <= dialog.yCoordinateMinMax[1] and
dialog.timestampMinMax[0] <= v[0] <= dialog.timestampMinMax[1]
):
break
else:
if k in self.selectedSpecies:
self.removeSpecies(k + " " + self.auxiliaryData[k])
del self.spatialData[k]
del self.temporalData[k]
del self.auxiliaryData[k]
self.cooccurrenceCalculation.halt()
self.plot.resetCache()
length = len([n for m in self.spatialData.values() for n in m])
title = "Filter Result"
content = "{:,d} records matches the specified range.".format(length)
self.mainWindow.alert(title, content, 0)
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList
def addSpecies(self):
"""
Select a species from ``Dataset.spatialData``, append it to ``Dataset.selectedSpecies``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
elif not Species.available():
title = "Too Many Species"
content = ("Selecting more than " + str(Species.nColor) +
" species is not supported.")
self.mainWindow.alert(title, content, 3)
else:
species = [(k, self.auxiliaryData[k]) for k in self.spatialData.keys()
if k not in self.selectedSpecies]
dialog = AddSpeciesDialog(species)
dialog.exec_()
if dialog.newSpecies:
newSpecies, vernacularName = dialog.newSpecies
self.selectedSpecies[newSpecies] = Species()
newColor = self.selectedSpecies[newSpecies].color
self.mainWindow.addSpeciesToLayout(newSpecies, vernacularName, newColor)
self.map.add(newSpecies)
self.map.refresh()
self.plot.rebuild()
self.correlationTable.add(newSpecies)
def removeSpecies(self, oldSpecies):
"""
Remove the specified species from ``Dataset.selectedSpecies``.
:param oldSpecies: Name of the old species to be removed.
:return: None.
"""
oldSpeciesShort = oldSpecies
for k in self.selectedSpecies.keys():
if oldSpecies.startswith(k):
oldSpeciesShort = k
del self.selectedSpecies[k]
break
self.mainWindow.removeSpeciesFromLayout(oldSpecies)
self.map.remove()
self.map.refresh()
self.plot.rebuild()
self.correlationTable.remove(oldSpeciesShort)
def clearData(self):
"""
Clear ``Dataset``.
:return: None.
"""
if not self.spatialData:
title, content = "Empty Dataset", "Please import data first."
self.mainWindow.alert(title, content, 3)
else:
self.spatialData.clear()
self.temporalData.clear()
self.auxiliaryData.clear()
self.selectedSpecies.clear()
self.mainWindow.removeSpeciesFromLayout()
self.map.rebuild()
self.map.refresh()
self.plot.resetCache()
self.plot.rebuild()
self.correlationTable.remove()
self.cooccurrenceCalculation.halt()
# noinspection PyCallByClass, PyTypeChecker, PyArgumentList
def about(self):
"""
Show information about this program.
:return: None.
"""
title = "About Biodiversity Analysis"
content = Dataset.license()
self.mainWindow.alert(title, content, 4)
| gpl-3.0 | -5,330,190,177,571,852,000 | 36.756364 | 97 | 0.571896 | false |
brain-research/mirage-rl-bpttv | baselines/acktr/run_atari.py | 1 | 1510 | #!/usr/bin/env python
import os, logging, gym
from baselines import logger
from baselines.common import set_global_seeds
from baselines import bench
from baselines.acktr.acktr_disc import learn
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.acktr.policies import CnnPolicy
def train(env_id, num_timesteps, seed, num_cpu):
def make_env(rank):
def _thunk():
env = make_atari(env_id)
env.seed(seed + rank)
env = bench.Monitor(env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))
gym.logger.setLevel(logging.WARN)
return wrap_deepmind(env)
return _thunk
set_global_seeds(seed)
env = SubprocVecEnv([make_env(i) for i in range(num_cpu)])
policy_fn = CnnPolicy
learn(policy_fn, env, seed, total_timesteps=int(num_timesteps * 1.1), nprocs=num_cpu)
env.close()
def main():
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num-timesteps', type=int, default=int(10e6))
args = parser.parse_args()
logger.configure()
train(args.env, num_timesteps=args.num_timesteps, seed=args.seed, num_cpu=32)
if __name__ == '__main__':
main()
| mit | 1,472,891,303,127,896,000 | 38.736842 | 100 | 0.691391 | false |
dwavesystems/dimod | dimod/core/sampler.py | 1 | 9510 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The :class:`.Sampler` abstract base class (see :mod:`abc`) helps you create new
dimod samplers.
Any new dimod sampler must define a subclass of :class:`.Sampler` that implements
abstract properties :attr:`~.Sampler.parameters` and :attr:`~.Sampler.properties`
and one of the abstract methods :meth:`~.Sampler.sample`, :meth:`~.Sampler.sample_ising`,
or :meth:`~.Sampler.sample_qubo`. The :class:`.Sampler` class provides the complementary
methods as mixins and ensures consistent responses.
Implemented sample methods must accept, and warn on, unknown keyword arguments
`**kwargs`. This means that all implemented sample methods must have the
`**kwargs` parameter. :meth:`~.Sampler.remove_unknown_kwargs` is a convenience
method provided for this purpose.
For example, the following steps show how to easily create a dimod sampler. It is
sufficient to implement a single method (in this example the :meth:`sample_ising` method)
to create a dimod sampler with the :class:`.Sampler` class.
.. testcode::
class LinearIsingSampler(dimod.Sampler):
def sample_ising(self, h, J, **kwargs):
kwargs = self.remove_unknown_kwargs(**kwargs)
sample = linear_ising(h, J)
energy = dimod.ising_energy(sample, h, J)
return dimod.SampleSet.from_samples([sample], vartype='SPIN', energy=[energy])
@property
def properties(self):
return dict()
@property
def parameters(self):
return dict()
For this example, the implemented sampler :meth:`~.Sampler.sample_ising` can be based on
a simple placeholder function, which returns a sample that minimizes the linear terms:
.. testcode::
def linear_ising(h, J):
sample = {}
for v in h:
if h[v] < 0:
sample[v] = +1
else:
sample[v] = -1
return sample
The :class:`.Sampler` ABC provides the other sample methods "for free"
as mixins.
>>> sampler = LinearIsingSampler()
...
... # Implemented by class LinearIsingSampler:
>>> response = sampler.sample_ising({'a': -1}, {})
...
... # Mixins provided by Sampler class:
>>> response = sampler.sample_qubo({('a', 'a'): 1})
>>> response = sampler.sample(dimod.BinaryQuadraticModel.from_ising({'a': -1}, {}))
Below is a more complex version of the same sampler, where the :attr:`properties` and
:attr:`parameters` properties return non-empty dicts.
.. testcode::
class FancyLinearIsingSampler(dimod.Sampler):
def __init__(self):
self._properties = {'description': 'a simple sampler that only considers the linear terms'}
self._parameters = {'verbose': []}
def sample_ising(self, h, J, verbose=False, **kwargs):
kwargs = self.remove_unknown_kwargs(**kwargs)
sample = linear_ising(h, J)
energy = dimod.ising_energy(sample, h, J)
if verbose:
print(sample)
return dimod.SampleSet.from_samples([sample], energy=[energy])
@property
def properties(self):
return self._properties
@property
def parameters(self):
return self._parameters
"""
import abc
import warnings
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dimod.exceptions import InvalidSampler, SamplerUnknownArgWarning
from dimod.meta import SamplerABCMeta, samplemixinmethod
from dimod.vartypes import Vartype
__all__ = ['Sampler']
class Sampler(metaclass=SamplerABCMeta):
"""Abstract base class for dimod samplers.
Provides all methods :meth:`~.Sampler.sample`, :meth:`~.Sampler.sample_ising`,
:meth:`~.Sampler.sample_qubo` assuming at least one is implemented.
Also includes utility method :meth:`~.Sampler.remove_unknown_kwargs`, which
may be used in sample methods to handle unknown kwargs.
"""
@abc.abstractproperty # for python2 compatibility
def parameters(self):
"""dict: A dict where keys are the keyword parameters accepted by the sampler
methods and values are lists of the properties relevent to each parameter.
"""
pass
@abc.abstractproperty # for python2 compatibility
def properties(self):
"""dict: A dict containing any additional information about the sampler.
"""
pass
@samplemixinmethod
def sample(self, bqm, **parameters):
"""Sample from a binary quadratic model.
This method is inherited from the :class:`.Sampler` base class.
Converts the binary quadratic model to either Ising or QUBO format and
then invokes an implemented sampling method (one of
:meth:`.sample_ising` or :meth:`.sample_qubo`).
Args:
:obj:`.BinaryQuadraticModel`:
A binary quadratic model.
**kwargs:
See the implemented sampling for additional keyword definitions.
Unknown keywords are accepted but a warning will be raised.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample_ising`, :meth:`.sample_qubo`
"""
# we try to use the matching sample method if possible
if bqm.vartype is Vartype.SPIN:
if not getattr(self.sample_ising, '__issamplemixin__', False):
# sample_ising is implemented
h, J, offset = bqm.to_ising()
sampleset = self.sample_ising(h, J, **parameters)
else:
Q, offset = bqm.to_qubo()
sampleset = self.sample_qubo(Q, **parameters)
elif bqm.vartype is Vartype.BINARY:
if not getattr(self.sample_qubo, '__issamplemixin__', False):
# sample_qubo is implemented
Q, offset = bqm.to_qubo()
sampleset = self.sample_qubo(Q, **parameters)
else:
h, J, offset = bqm.to_ising()
sampleset = self.sample_ising(h, J, **parameters)
else:
raise RuntimeError("binary quadratic model has an unknown vartype")
# if the vartype already matches this will just adjust the offset
return sampleset.change_vartype(bqm.vartype, energy_offset=offset)
@samplemixinmethod
def sample_ising(self, h, J, **parameters):
"""Sample from an Ising model using the implemented sample method.
This method is inherited from the :class:`.Sampler` base class.
Converts the Ising model into a :obj:`.BinaryQuadraticModel` and then
calls :meth:`.sample`.
Args:
h (dict/list):
Linear biases of the Ising problem. If a dict, should be of the
form `{v: bias, ...}` where is a spin-valued variable and `bias`
is its associated bias. If a list, it is treated as a list of
biases where the indices are the variable labels.
J (dict[(variable, variable), bias]):
Quadratic biases of the Ising problem.
**kwargs:
See the implemented sampling for additional keyword definitions.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample`, :meth:`.sample_qubo`
"""
bqm = BinaryQuadraticModel.from_ising(h, J)
return self.sample(bqm, **parameters)
@samplemixinmethod
def sample_qubo(self, Q, **parameters):
"""Sample from a QUBO using the implemented sample method.
This method is inherited from the :class:`.Sampler` base class.
Converts the QUBO into a :obj:`.BinaryQuadraticModel` and then
calls :meth:`.sample`.
Args:
Q (dict):
Coefficients of a quadratic unconstrained binary optimization
(QUBO) problem. Should be a dict of the form `{(u, v): bias, ...}`
where `u`, `v`, are binary-valued variables and `bias` is their
associated coefficient.
**kwargs:
See the implemented sampling for additional keyword definitions.
Returns:
:obj:`.SampleSet`
See also:
:meth:`.sample`, :meth:`.sample_ising`
"""
bqm = BinaryQuadraticModel.from_qubo(Q)
return self.sample(bqm, **parameters)
def remove_unknown_kwargs(self, **kwargs):
"""Check that all `kwargs` are accepted by the sampler. If a
keyword is unknown, a warning is raised and the argument is removed.
Args:
**kwargs:
Keyword arguments to be validated.
Returns:
dict: Updated `kwargs`
"""
for kw in [k for k in kwargs if k not in self.parameters]:
msg = "Ignoring unknown kwarg: {!r}".format(kw)
warnings.warn(msg, SamplerUnknownArgWarning, stacklevel=3)
kwargs.pop(kw)
return kwargs
| apache-2.0 | -2,800,460,866,468,964,000 | 34.485075 | 103 | 0.625973 | false |
dontnod/weblate | weblate/trans/tests/test_edit.py | 1 | 24404 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Test for translation views."""
from __future__ import unicode_literals
import time
from django.urls import reverse
from weblate.trans.models import Change
from weblate.trans.tests.test_views import ViewTestCase
from weblate.utils.hash import hash_to_checksum
from weblate.utils.state import STATE_FUZZY, STATE_TRANSLATED
class EditTest(ViewTestCase):
"""Test for manipulating translation."""
has_plurals = True
source = 'Hello, world!\n'
target = 'Nazdar svete!\n'
second_target = 'Ahoj svete!\n'
already_translated = 0
def setUp(self):
super(EditTest, self).setUp()
self.translation = self.get_translation()
self.translate_url = reverse('translate', kwargs=self.kw_translation)
def test_edit(self):
response = self.edit_unit(self.source, self.target)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
unit = self.get_unit(source=self.source)
self.assertEqual(unit.target, self.target)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assert_backend(self.already_translated + 1)
# Test that second edit with no change does not break anything
response = self.edit_unit(self.source, self.target)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
unit = self.get_unit(source=self.source)
self.assertEqual(unit.target, self.target)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assert_backend(self.already_translated + 1)
# Test that third edit still works
response = self.edit_unit(self.source, self.second_target)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
unit = self.get_unit(source=self.source)
self.assertEqual(unit.target, self.second_target)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assert_backend(self.already_translated + 1)
def test_plurals(self):
"""Test plural editing."""
if not self.has_plurals:
return
response = self.edit_unit(
'Orangutan',
'Opice má %d banán.\n',
target_1='Opice má %d banány.\n',
target_2='Opice má %d banánů.\n',
)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
# Check translations
unit = self.get_unit('Orangutan')
plurals = unit.get_target_plurals()
self.assertEqual(len(plurals), 3)
self.assertEqual(
plurals[0],
'Opice má %d banán.\n',
)
self.assertEqual(
plurals[1],
'Opice má %d banány.\n',
)
self.assertEqual(
plurals[2],
'Opice má %d banánů.\n',
)
def test_fuzzy(self):
"""Test for fuzzy flag handling."""
unit = self.get_unit(source=self.source)
self.assertNotEqual(unit.state, STATE_FUZZY)
self.edit_unit(self.source, self.target, fuzzy='yes', review='10')
unit = self.get_unit(source=self.source)
self.assertEqual(unit.state, STATE_FUZZY)
self.assertEqual(unit.target, self.target)
self.assertFalse(unit.has_failing_check)
self.edit_unit(self.source, self.target)
unit = self.get_unit(source=self.source)
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assertEqual(unit.target, self.target)
self.assertFalse(unit.has_failing_check)
self.edit_unit(self.source, self.target, fuzzy='yes')
unit = self.get_unit(source=self.source)
self.assertEqual(unit.state, STATE_FUZZY)
self.assertEqual(unit.target, self.target)
# Should have was translated check
self.assertTrue(unit.has_failing_check)
class EditValidationTest(ViewTestCase):
def edit(self, **kwargs):
"""Editing with no specific params."""
unit = self.get_unit()
params = {'checksum': unit.checksum}
params.update(kwargs)
return self.client.post(
unit.translation.get_translate_url(),
params,
follow=True
)
def test_edit_invalid(self):
"""Editing with invalid params."""
response = self.edit()
self.assertContains(response, 'Missing translated string!')
def test_suggest_invalid(self):
"""Suggesting with invalid params."""
response = self.edit(suggest='1')
self.assertContains(response, 'Missing translated string!')
def test_edit_spam(self):
"""Editing with spam trap."""
response = self.edit(content='1')
self.assertContains(response, 'po/cs.po, string 2')
def test_merge(self):
"""Merging with invalid parameter."""
unit = self.get_unit()
response = self.client.get(
unit.translation.get_translate_url(),
{'checksum': unit.checksum, 'merge': 'invalid'},
follow=True,
)
self.assertContains(response, 'Invalid merge request!')
def test_merge_lang(self):
"""Merging across languages."""
unit = self.get_unit()
trans = self.component.translation_set.exclude(
language_code='cs'
)[0]
other = trans.unit_set.get(content_hash=unit.content_hash)
response = self.client.get(
unit.translation.get_translate_url(),
{'checksum': unit.checksum, 'merge': other.pk},
follow=True,
)
self.assertContains(response, 'Invalid merge request!')
def test_revert(self):
unit = self.get_unit()
# Try the merge
response = self.client.get(
unit.translation.get_translate_url(),
{'checksum': unit.checksum, 'revert': 'invalid'},
follow=True,
)
self.assertContains(response, 'Invalid revert request!')
# Try the merge
response = self.client.get(
unit.translation.get_translate_url(),
{'checksum': unit.checksum, 'revert': -1},
follow=True,
)
self.assertContains(response, 'Invalid revert request!')
class EditResourceTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_android()
class EditResourceSourceTest(ViewTestCase):
"""Source strings (template) editing."""
has_plurals = False
def __init__(self, *args, **kwargs):
self._language_code = 'en'
super(EditResourceSourceTest, self).__init__(*args, **kwargs)
def test_edit(self):
translate_url = reverse(
'translate',
kwargs={
'project': 'test',
'component': 'test',
'lang': 'en'
}
)
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assert_redirects_offset(response, translate_url, 2)
unit = self.get_unit('Nazdar svete!\n')
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assert_backend(4)
def test_edit_revert(self):
self._language_code = 'cs'
translation = self.component.translation_set.get(
language_code='cs'
)
# Edit translation
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
self._language_code = 'en'
unit = translation.unit_set.get(context='hello')
self.assertEqual(unit.state, STATE_TRANSLATED)
# Edit source
self.edit_unit(
'Hello, world!\n',
'Hello, universe!\n'
)
unit = translation.unit_set.get(context='hello')
self.assertEqual(unit.state, STATE_FUZZY)
# Revert source
self.edit_unit(
'Hello, universe!\n',
'Hello, world!\n'
)
unit = translation.unit_set.get(context='hello')
self.assertEqual(unit.state, STATE_TRANSLATED)
def get_translation(self, language=None):
return self.component.translation_set.get(
language_code=self._language_code
)
def create_component(self):
return self.create_android()
class EditBranchTest(EditTest):
def create_component(self):
return self.create_po_branch()
class EditMercurialTest(EditTest):
def create_component(self):
return self.create_po_mercurial()
class EditPoMonoTest(EditTest):
def create_component(self):
return self.create_po_mono()
def test_new_unit(self):
def add(key):
return self.client.post(
reverse(
'new-unit',
kwargs={
'project': 'test',
'component': 'test',
'lang': 'en',
}
),
{'key': key, 'value_0': 'Source string'},
follow=True,
)
response = add('key')
self.assertEqual(response.status_code, 403)
self.make_manager()
response = add('key')
self.assertContains(
response, 'New string has been added'
)
response = add('key')
self.assertContains(
response, 'Translation with this key seem to already exist'
)
response = add('')
self.assertContains(
response, 'Error in parameter key'
)
class EditIphoneTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_iphone()
class EditJSONTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_json()
class EditJoomlaTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_joomla()
class EditRubyYAMLTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_ruby_yaml()
class EditDTDTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_dtd()
class EditJSONMonoTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_json_mono()
class EditJavaTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_java()
class EditAppStoreTest(EditTest):
has_plurals = False
source = 'Weblate - continuous localization'
target = 'Weblate - průběžná lokalizace'
second_target = 'Weblate - průběžný překlad'
already_translated = 2
def create_component(self):
return self.create_appstore()
class EditXliffComplexTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_xliff('complex')
def test_invalid_xml(self):
self.edit_unit('Hello, world!\n', 'Nazdar & svete!\n')
self.assert_backend(1)
class EditXliffResnameTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_xliff('only-resname')
class EditXliffTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_xliff()
class EditXliffMonoTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_xliff_mono()
class EditLinkTest(EditTest):
def create_component(self):
return self.create_link()
class EditTSTest(EditTest):
def create_component(self):
return self.create_ts()
class EditTSMonoTest(EditTest):
has_plurals = False
def create_component(self):
return self.create_ts_mono()
class ZenViewTest(ViewTestCase):
def test_zen(self):
response = self.client.get(
reverse('zen', kwargs=self.kw_translation)
)
self.assertContains(
response,
'Thank you for using Weblate.'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
self.assertContains(
response,
'The translation has come to an end.'
)
def test_zen_invalid(self):
response = self.client.get(
reverse('zen', kwargs=self.kw_translation),
{'type': 'nonexisting-type'},
follow=True
)
self.assertContains(response, 'Please choose a valid filter type.')
def test_load_zen(self):
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation)
)
self.assertContains(
response,
'Thank you for using Weblate.'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
self.assertContains(
response,
'The translation has come to an end.'
)
def test_load_zen_offset(self):
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation),
{'offset': '2'}
)
self.assertNotContains(
response,
'Hello, world'
)
self.assertContains(
response,
'Orangutan has %d bananas'
)
response = self.client.get(
reverse('load_zen', kwargs=self.kw_translation),
{'offset': 'bug'}
)
self.assertContains(
response,
'Hello, world'
)
def test_save_zen(self):
unit = self.get_unit()
params = {
'checksum': unit.checksum,
'contentsum': hash_to_checksum(unit.content_hash),
'translationsum': hash_to_checksum(unit.get_target_hash()),
'target_0': 'Zen translation',
'review': '20',
}
response = self.client.post(
reverse('save_zen', kwargs=self.kw_translation),
params
)
self.assertContains(
response,
'Following fixups were applied to translation: '
'Trailing and leading whitespace'
)
def test_save_zen_lock(self):
self.component.locked = True
self.component.save()
unit = self.get_unit()
params = {
'checksum': unit.checksum,
'contentsum': hash_to_checksum(unit.content_hash),
'translationsum': hash_to_checksum(unit.get_target_hash()),
'target_0': 'Zen translation',
'review': '20',
}
response = self.client.post(
reverse('save_zen', kwargs=self.kw_translation),
params
)
self.assertContains(
response, 'Insufficient privileges for saving translations.'
)
class EditComplexTest(ViewTestCase):
"""Test for complex manipulating translation."""
def setUp(self):
super(EditComplexTest, self).setUp()
self.translation = self.get_translation()
self.translate_url = reverse('translate', kwargs=self.kw_translation)
def test_merge(self):
# Translate unit to have something to start with
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
unit = self.get_unit()
# Try the merge
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'merge': unit.id}
)
self.assert_backend(1)
# We should stay on same message
self.assert_redirects_offset(
response, self.translate_url, unit.position + 1
)
# Test error handling
unit2 = self.translation.unit_set.get(
source='Thank you for using Weblate.'
)
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'merge': unit2.id}
)
self.assertContains(response, 'Invalid merge request!')
def test_revert(self):
source = 'Hello, world!\n'
target = 'Nazdar svete!\n'
target_2 = 'Hei maailma!\n'
self.edit_unit(
source,
target
)
# Ensure other edit gets different timestamp
time.sleep(1)
self.edit_unit(
source,
target_2
)
unit = self.get_unit()
changes = Change.objects.content().filter(unit=unit).order()
self.assertEqual(changes[1].target, target)
self.assertEqual(changes[0].target, target_2)
self.assert_backend(1)
# revert it
self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'revert': changes[1].id}
)
unit = self.get_unit()
self.assertEqual(unit.target, target_2)
# check that we cannot revert to string from another translation
self.edit_unit(
'Thank you for using Weblate.',
'Kiitoksia Weblaten kaytosta.'
)
unit2 = self.get_unit(
source='Thank you for using Weblate.'
)
change = Change.objects.filter(unit=unit2).order()[0]
response = self.client.get(
self.translate_url,
{'checksum': unit.checksum, 'revert': change.id}
)
self.assertContains(response, 'Invalid revert request!')
self.assert_backend(2)
def test_edit_fixup(self):
# Save with failing check
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!'
)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(len(unit.active_checks()), 0)
self.assertEqual(unit.translation.stats.allchecks, 0)
self.assert_backend(1)
def test_edit_check(self):
# Save with failing check
response = self.edit_unit(
'Hello, world!\n',
'Hello, world!\n',
)
# We should stay on current message
self.assert_redirects_offset(response, self.translate_url, 1)
unit = self.get_unit()
self.assertEqual(unit.target, 'Hello, world!\n')
self.assertTrue(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 1)
self.assertEqual(len(unit.active_checks()), 1)
self.assertEqual(unit.translation.stats.allchecks, 1)
# Ignore check
check_id = unit.active_checks()[0].id
response = self.client.post(
reverse('js-ignore-check', kwargs={'check_id': check_id})
)
self.assertContains(response, 'ok')
# Should have one less failing check
unit = self.get_unit()
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 1)
self.assertEqual(len(unit.active_checks()), 0)
self.assertEqual(unit.translation.stats.allchecks, 0)
# Ignore check for all languages
ignore_url = reverse(
'js-ignore-check-source',
kwargs={'check_id': check_id, 'pk': unit.source_info.pk}
)
response = self.client.post(ignore_url)
self.assertEqual(response.status_code, 403)
self.user.is_superuser = True
self.user.save()
response = self.client.post(ignore_url)
self.assertContains(response, 'ok')
# Should have one less check
unit = self.get_unit()
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(len(unit.active_checks()), 0)
self.assertEqual(unit.translation.stats.allchecks, 0)
# Save with no failing checks
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should stay on current message
self.assert_redirects_offset(response, self.translate_url, 2)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertFalse(unit.has_failing_check)
self.assertEqual(len(unit.checks()), 0)
self.assertEqual(unit.translation.stats.allchecks, 0)
self.assert_backend(1)
def test_commit_push(self):
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assert_redirects_offset(response, self.translate_url, 2)
self.assertTrue(self.translation.needs_commit())
self.assertTrue(self.component.needs_commit())
self.assertTrue(self.component.project.needs_commit())
self.translation.commit_pending('test', self.user)
self.assertFalse(self.translation.needs_commit())
self.assertFalse(self.component.needs_commit())
self.assertFalse(self.component.project.needs_commit())
self.assertTrue(self.translation.repo_needs_push())
self.assertTrue(self.component.repo_needs_push())
self.assertTrue(self.component.project.repo_needs_push())
self.translation.do_push(self.get_request())
self.assertFalse(self.translation.repo_needs_push())
self.assertFalse(self.component.repo_needs_push())
self.assertFalse(self.component.project.repo_needs_push())
def test_edit_locked(self):
self.component.locked = True
self.component.save()
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
# We should get to second message
self.assertContains(
response,
'This translation is currently locked for updates.'
)
self.assert_backend(0)
def test_edit_changed_source(self):
# We use invalid contentsum here
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
contentsum='aaa',
)
# We should get an error message
self.assertContains(
response,
'Source string has been changed meanwhile'
)
self.assert_backend(0)
def test_edit_changed_translation(self):
# We use invalid translationsum here
response = self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n',
translationsum='aaa',
)
# We should get an error message
self.assertContains(
response,
'Translation of the string has been changed meanwhile'
)
self.assert_backend(0)
def test_edit_view(self):
url = self.get_unit('Hello, world!\n').get_absolute_url()
response = self.client.get(url)
form = response.context['form']
params = {}
for field in form.fields.keys():
params[field] = form[field].value()
params['target_0'] = 'Nazdar svete!\n'
response = self.client.post(url, params)
unit = self.get_unit()
self.assertEqual(unit.target, 'Nazdar svete!\n')
self.assertEqual(unit.state, STATE_TRANSLATED)
self.assert_backend(1)
| gpl-3.0 | 5,844,940,577,075,464,000 | 30.57772 | 77 | 0.590491 | false |
daanwierstra/pybrain | pybrain/rl/tasks/pomdp/pomdp.py | 1 | 1502 | __author__ = 'Tom Schaul, [email protected]'
from scipy import ndarray
from pybrain.rl.tasks import EpisodicTask
from pybrain.utilities import Named, drawIndex
class POMDPTask(EpisodicTask, Named):
""" Partially observable episodic MDP (with discrete actions)
Has actions that can be performed, and observations in every state.
By default, the observation is a vector, and the actions are integers.
"""
# number of observations
observations = 4
# number of possible actions
actions = 4
# maximal number of steps before the episode is stopped
maxSteps = None
# the lower bound on the reward value
minReward = 0
def __init__(self, **args):
self.setArgs(**args)
self.steps = 0
@property
def indim(self):
return self.actions
@property
def outdim(self):
return self.observations
def reset(self):
self.steps = 0
EpisodicTask.reset(self)
def isFinished(self):
if self.maxSteps != None:
return self.steps >= self.maxSteps
return False
def performAction(self, action):
""" POMDP tasks, as they have discrete actions, can me used by providing either an index,
or an array with a 1-in-n coding (which can be stochastic). """
if type(action) == ndarray:
action = drawIndex(action, tolerant = True)
self.steps += 1
EpisodicTask.performAction(self, action) | bsd-3-clause | -4,077,918,592,508,438,500 | 27.358491 | 97 | 0.629161 | false |
starcroce/PyAlgoDataStructure | linked_list/linked_list_to_queue.py | 1 | 1314 | import MyDoubleLinkedList
class DoubleLinkedListStack:
def __init__(self):
self.front = None
self.rear = None
self.content = None
def push(self, val):
# push to empty queue
if self.content is None:
self.content = MyDoubleLinkedList.LinkedList(val)
self.front = self.content.head
self.rear = self.content.tail
else:
self.content.insert_before(self.content.head, val)
self.front = self.content.head
def pop(self):
if self.is_empty() is True:
print 'Pop from empty queue'
return
self.content.remove(self.rear)
self.rear = self.content.tail
def is_empty(self):
return self.content is None
def print_queue(self):
if self.is_empty():
print 'None'
else:
curr = self.front
while curr != self.rear:
print str(curr.val) + ' ->',
curr = curr.next
print str(curr.val)
def main():
my_queue = DoubleLinkedListStack()
my_queue.print_queue()
for i in range(10):
my_queue.push(i)
my_queue.print_queue()
for i in range(5):
my_queue.pop()
my_queue.print_queue()
if __name__ == '__main__':
main()
| gpl-2.0 | 7,239,916,938,824,079,000 | 22.464286 | 62 | 0.541857 | false |
aditigupta96/DealBazaar | welcome.py | 1 | 26070 | import os
import couchdb
import uuid
import requests
from datetime import datetime
from flask import Flask, jsonify, session, render_template, request, redirect, g, url_for, flash
# from .models import User
from datetime import datetime
from couchdb.mapping import Document, TextField, DateTimeField, ListField, FloatField, IntegerField, ViewField
from werkzeug.utils import secure_filename
from werkzeug import FileStorage
from flask_uploads import (UploadSet, configure_uploads, IMAGES, UploadNotAllowed)
# from cloudant.view import View
from tokens import generate_confirmation_token, confirm_token
from flask_mail import Mail
from emails import send_email
# UPLOADED_PHOTOS_DEST = 'uploads'
GOOGLE_GEOCODE_URL = 'https://maps.googleapis.com/maps/api/geocode/json?place_id={0}&key={1}'
GOOGLE_API_KEY = 'AIzaSyDVE9osSCgxkIPp4LGEp1xwhmGrMVxNpnc'
GOOGLE_DISTANCE_URL = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={0},{1}&destinations={2},{3}&key={4}'
cloudant_data = {
"username": "052ca863-0f20-49a8-9813-330b0813683a-bluemix",
"password": "68e8bdaa4739229b83095bf31b9c8256d5790022a184e8cdfefec270ea2be740",
"host": "052ca863-0f20-49a8-9813-330b0813683a-bluemix.cloudant.com",
"port": '443',
}
DATABASE_URL = "https://052ca863-0f20-49a8-9813-330b0813683a-bluemix.cloudant.com/bazaardata/"
app = Flask(__name__)
app.config.from_object(__name__)
# app.config.from_envvar('DEALBAZAAR_SETTINGS', silent=True)
app.secret_key = os.urandom(24)
mail = Mail(app)
app.config.update(
DEBUG = True,
SECURITY_PASSWORD_SALT = 'random',
BCRYPT_LOG_ROUNDS = 13,
MAIL_SERVER = 'smtp.gmail.com',
MAIL_PORT = 587,
MAIL_USE_TLS = True,
MAIL_USE_SSL = False,
MAIL_USERNAME = os.environ['DEALBAZAAR_USERNAME'],
MAIL_PASSWORD = os.environ['DEALBAZAAR_PASSWORD'],
MAIL_DEFAULT_SENDER = '[email protected]'
)
mail = Mail(app)
# uploaded_photos = UploadSet('photos', IMAGES)
# configure_uploads(app, uploaded_photos)
class User(Document):
doc_type = 'user'
name = TextField()
email = TextField()
password = TextField()
contact = IntegerField()
college = TextField()
city = TextField()
address = TextField()
confirmed = IntegerField(default=0)
createdate = DateTimeField(default=datetime.now)
latitude = TextField()
longitude = TextField()
place_id = TextField()
@classmethod
def get_user(cls,email):
db = get_db()
user = db.get(email,None)
if user is None:
return None
return cls.wrap(user)
def confirm(self):
db = get_db()
self.confirmed = 1
self.store(db)
def calculate_geocode(self):
place_id = self.place_id
data = requests.get(GOOGLE_GEOCODE_URL.format(self.place_id, GOOGLE_API_KEY))
self.latitude = str(data.json()['results'][0]['geometry']['location']['lat'])
self.longitude = str(data.json()['results'][0]['geometry']['location']['lng'])
def update(self, contact=None, password=None, city = None, college=None, address=None, placeid=None):
db = get_db()
if contact and contact != "":
self.contact = contact
if city and city != "":
self.city = city
if college and college != "":
self.college = college
if password and password != "":
self.password = password
if address and address != "" and placeid != "":
self.address = address
self.place_id = placeid
self.calculate_geocode()
self.store(db)
class Item(Document):
doc_type = TextField(default='item')
name = TextField()
item_type = TextField()
description = TextField()
original_price = FloatField()
mrp = FloatField()
date = DateTimeField(default=datetime.now)
user = TextField()
filename = TextField()
sold = IntegerField(default=0)
@classmethod
def all(cls,db):
return cls.view(db,'_design/items/_view/all-items')
def confirmSold(self,id):
db = get_db()
self.sold = 1
self.store(db)
@classmethod
def by_date(cls,limit = None):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byDate',
descending=True,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
if limit is not None:
return items[0:limit]
return items
@classmethod
def by_user(cls,email):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byUser',
key=email,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def by_item_type(cls,item_type):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byItemType',
key=item_type,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def by_item_name(cls,name):
db = get_db()
item_obj = cls.view(
db,
'_design/items/_view/byItemName',
key=name,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
@classmethod
def get_item(cls,id):
db = get_db()
item = db.get(id,None)
if item is None:
return None
return cls.wrap(item)
def calculate_distance(self, customer_id):
customer = User.get_user(customer_id)
seller = User.get_user(self.user)
data = requests.get(GOOGLE_DISTANCE_URL.format(customer.latitude,
customer.longitude, seller.latitude,
seller.longitude, GOOGLE_API_KEY))
distance_text = str(data.json()['rows'][0]['elements'][0]['distance']['text'])
distance_value = int(data.json()['rows'][0]['elements'][0]['distance']['value'])
time = str(data.json()['rows'][0]['elements'][0]['duration']['text'])
distance = [distance_text, distance_value, time]
return distance
class Bid(Document):
doc_type = TextField(default='bid')
amount = FloatField()
user = TextField()
item = TextField()
created = DateTimeField()
@classmethod
def get_bid(cls,id):
db = get_db()
bid = db.get(id,None)
if bid is None:
return None
return cls.wrap(bid)
@classmethod
def get_by_item(cls,db,item_id):
# print '_design/bids/_view/get-bids'+item_id
bids = []
bids_obj = cls.view(
db,
'_design/bids/_view/get-bids',
key=item_id,
include_docs=True
)
for row in bids_obj:
bids.append(cls.wrap(row))
return bids
class Purchased(Document):
doc_type = TextField(default='purchase')
item_id = TextField()
buyer = TextField()
seller = TextField()
date = DateTimeField()
@classmethod
def by_user(cls,buyer):
db = get_db()
item_obj = cls.view(
db,
'_design/purchased/_view/get_byUser',
key=buyer,
include_docs=True
)
items = []
for item in item_obj:
items.append(cls.wrap(item))
return items
def get_db():
if not hasattr(g, 'db'):
server = couchdb.Server("https://"+cloudant_data['username']+':'+cloudant_data['password']
+'@'+cloudant_data['host']+':'+cloudant_data['port'])
try:
g.db = server.create('bazaardata')
except:
g.db = server['bazaardata']
return g.db
# @app.teardown_appcontext
# def close_db(error):
# if hasattr(g, 'db')
@app.before_request
def before_request():
g.user = None
if 'user' in session:
g.user = session['user']
# @app.route('/')
# def Welcome():
# return render_template('signup.html')
@app.route('/signup', methods=['GET', 'POST'])
def signup():
if request.method == 'POST':
user = User()
form_data = request.form
print form_data
if form_data.get('name'):
user.name = form_data.get('name',None)
else:
flash('Name field is required', category = "error")
return render_template('signup.html')
if form_data.get('email'):
email = form_data.get('email',None)
if User.get_user(email) is None:
user.email = email
else:
flash("User already exists", category='error')
return render_template('signup.html')
else:
flash('Email field is required', category = "error")
return render_template('signup.html')
if form_data.get('password'):
user.password = form_data.get('password',None)
else:
flash('Password field is required', category = "error")
return render_template('signup.html')
if form_data.get('contact'):
if len(form_data.get('contact')) == 10 and int(form_data.get('contact')) > 0:
user.contact = form_data.get('contact',None)
else:
flash('Invalid Mobile Number', category = "error")
return render_template('signup.html')
else:
flash('Contact field is required', category = "error")
return render_template('signup.html')
if form_data.get('college'):
user.college = form_data.get('college',None)
else:
flash('College field is required', category = "error")
return render_template('signup.html')
if form_data.get('city'):
user.city = form_data.get('city',None)
else:
flash('City field is required', category = "error")
return render_template('signup.html')
if form_data.get('address', None):
user.address = form_data.get('address',None)
else:
flash('Address field is required', category = "error")
return render_template('signup.html')
# print "place ", form_data.get('placeid')
user.place_id = form_data.get('placeid')
# print user
user.confirmed = 0
user.calculate_geocode()
db = get_db()
db[user.email] = user._data
token = generate_confirmation_token(user.email)
confirm_url = url_for('confirm_email', token=token, _external=True)
html = render_template('activate.html', confirm_url=confirm_url)
subject = "Please confirm your email"
#print user.email
send_email(user.email, subject, html)
flash('A confirmation link is sent to your email_id.Please confirm before logging in.', category = "error")
return redirect(url_for('login'))
return render_template('signup.html')
@app.route('/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
session.pop('user', None)
email = request.form['email']
# db = get_db()
user = User.get_user(email)
if user is not None:
if not user.confirmed:
flash('Please confirm your account first...!!!', category="error")
elif request.form['password'] == user.password:
session['user'] = user._data
return redirect(url_for('after_login'))
else:
flash('Invalid password', category="error")
else:
flash('Invalid email', category="error")
return render_template('login.html')
# if request.form['password'] == 'password':
# session['user'] = request.form['email']
# return redirect(url_for('after_login'))
return render_template('login.html')
@app.route('/home')
def after_login():
if g.user:
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('home1.html', recent_items = recent_items)
return redirect(url_for('login'))
@app.route('/confirm/<token>')
def confirm_email(token):
try:
# print token
email = confirm_token(token)
# print "email ",email
except:
flash('The confirmation link is invalid or has expired.', category='error')
if email:
user = User.get_user(email)
if user.confirmed:
return 'Account already confirmed. Please login.'
else:
user.confirm()
else:
flash("Unexpected error", category="error")
return redirect(url_for('login'))
@app.route('/posted_items')
def posted_items():
if g.user:
user_items = Item.by_user(g.user['email'])
for i in user_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print i.src
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('posted_items.html', items = user_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/sell', methods=['GET', 'POST'])
def post_item():
if g.user:
if request.method == 'POST':
item = Item()
form_data = request.form
if request.files.get('photo'):
photo = request.files.get('photo')
else:
flash('Image is required', category = "error")
return render_template('upload1.html')
if form_data.get('item_name'):
item.name = form_data.get('item_name',None)
else:
flash('Item Name is required', category = "error")
return render_template('upload1.html')
if form_data.get('description'):
if len(form_data.get('description')) > 25 and len(form_data.get('description')) < 251:
item.description = form_data.get('description',None)
else:
flash('Description length should be between 25-250 characters.', category = "error")
return render_template('upload1.html')
else:
flash('Description is required', category = "error")
return render_template('upload1.html')
if form_data.get('item_type'):
item.item_type = form_data.get('item_type', None).lower()
else:
flash('Item type is required', category = "error")
return render_template('upload1.html')
if int(form_data.get('original_price')) > 0:
#print "adadad"
item.original_price = form_data.get('original_price',None)
else:
#print "errrrrr"
flash('Invalid price', category = "error")
return render_template('upload1.html')
if int(form_data.get('mrp')) > 0:
#print "adadad"
item.mrp = form_data.get('mrp',None)
else:
#print "errrrrr"
flash('Invalid MRP.', category = "error")
return render_template('upload1.html')
item.user = g.user.get('email', None)
#item.date = datetime.datetime.now
db = get_db()
# try:
# filename = uploaded_photos.save(photo)
# except UploadNotAllowed:
# flash("The upload was not allowed")
# else:
# item.filename = filename
item.id = uuid.uuid4().hex
item.store(db)
db.put_attachment(item,photo,filename=str(item.name)+'.jpg',content_type='image/jpeg')
flash('Your item has been posted.', category = "error")
return redirect(url_for('after_login'))
return render_template('upload1.html')
else:
return redirect(url_for('login'))
@app.route('/view/', methods=['GET', 'POST'])
def view():
if g.user:
if request.method == 'POST':
query_text = request.form.get('search')
query_text = query_text.lower()
item_type_filter = Item.by_item_type(query_text) + Item.by_item_name(query_text)
for i in item_type_filter:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
print item_type_filter
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = item_type_filter, recent_items=recent_items)
else:
db = get_db()
it = Item.all(db)
for i in it:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print i.src
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = it, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/view/<id>', methods=['GET', 'POST'])
def item_details(id=None):
if request.method == 'POST':
owner = Item.get_item(id).user
if g.user['email'] == owner:
flash("You cannot place bid for this item.", category='error')
return redirect('/view/'+id)
else:
bid = Bid()
if int(request.form.get('amount')) > 0:
bid.amount = request.form.get('amount')
else:
flash('Invalid Bid', category = "error")
return redirect('/view/'+id)
bid.item = id
bid.user = g.user['email']
db = get_db()
bid.id = uuid.uuid4().hex
bid.store(db)
flash('Your bid has been placed successfully..!!!', category='error')
return redirect('/view/'+id)
else:
if(id):
db = get_db()
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
distance = item.calculate_distance(g.user['email'])
return render_template('item_description.html', item=items, src=src, distance=distance)
@app.route('/view/<id>/bid')
def view_bids(id=None):
if g.user:
db = get_db()
bids = Bid.get_by_item(db,id)
for bid in bids:
x = User.get_user(bid.user)
bid.name = x.name
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
flash('Buyer details have been sent to your emailid.', category='error')
return render_template('view_bids1.html',bids=bids,src=src,item=items)
else:
return redirect(url_for('login'))
@app.route('/view/<id>/bid/<bid_id>/accept', methods=['GET'])
def accept_bid(id=None, bid_id=None):
if g.user:
buyer_email = Bid.get_bid(bid_id).user
seller_email = Item.get_item(id).user
buyer = User.get_user(buyer_email)
seller = User.get_user(seller_email)
db = get_db()
item = Item.get_item(id)
items = item._data
src = DATABASE_URL + id + '/' + item.name + '.jpg/'
html = render_template('seller.html', name=buyer.name, email=buyer_email, contact=buyer.contact,
college=buyer.college, city=buyer.city, address=buyer.address,
item=items, src=src )
subject = "Buyer details"
send_email(seller_email, subject, html)
html1 = render_template('buyer.html', name=seller.name, email=seller_email, contact=seller.contact,
college=seller.college, city=seller.city, address=seller.address,
item=items, src=src)
subject1 = "Seller details"
send_email(buyer_email, subject1, html1)
item.confirmSold(id)
purchase = Purchased()
purchase.buyer = buyer_email
purchase.item_id = id
purchase.seller = seller.name
purchase.date = datetime.now()
db = get_db()
purchase.id = uuid.uuid4().hex
purchase.store(db)
print purchase
flash("Confirmation Email is sent to your email id.", category='error')
return redirect(url_for('view_bids', id=id))
return redirect(url_for('login'))
@app.route('/sold_items')
def sold_items():
if g.user:
user_items = Item.by_user(g.user['email'])
sold_items = []
for i in user_items:
if i.sold == 1:
sold_items.append(i)
for i in sold_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('sold_items.html', sold_items = sold_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/purchased_items')
def purchased_items():
if g.user:
purchase = Purchased.by_user(g.user['email'])
print "purchase",purchase
if len(purchase)>0:
purchased_items = []
if len(purchase) > 0:
for i in purchase:
item_id = i.item_id
item = Item.get_item(item_id)
if item:
item.seller = i.seller
item.sold_date = i.date.date()
purchased_items.append(item)
for i in purchased_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
#print purchased_items
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('purchased_items.html', items = purchased_items, recent_items=recent_items)
else:
purchased_items = []
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('purchased_items.html', items = purchased_items, recent_items=recent_items)
return redirect(url_for('login'))
@app.route('/views/<filter>', methods=['GET', 'POST'])
def filter_byLocation(filter=None):
if g.user:
db = get_db()
it = Item.all(db)
items = []
for i in it:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
i.distance = i.calculate_distance(g.user['email'])
items.append(i)
items.sort(key = lambda x : x.distance[1])
recent_items = Item.by_date(4)
for i in recent_items:
i.src = DATABASE_URL + i.id + '/' + i.name + '.jpg/'
return render_template('search.html', items = items, recent_items=recent_items)
@app.route('/logout', methods=['GET'])
def logout():
if g.user:
session.pop('user', None)
flash('You have been successfully logged out.', category="error")
return render_template('login.html')
@app.route('/settings', methods=['GET', 'POST'])
def update():
if g.user:
if request.method == "POST":
form_data = request.form
#print form_data.get('placeid') == ""
email = g.user.get('email', None)
user = User.get_user(email)
#call user update function here
user.update(form_data.get('contact', None), form_data.get('password', None),
form_data.get('city', None), form_data.get('college', None),
form_data.get('address', None), form_data.get('placeid', None))
user_data = {}
user_data['name'] = user.name
user_data['email'] = user.email
user_data['city'] = user.city
user_data['college'] = user.college
user_data['address'] = user.address
user_data['contact'] = user.contact
flash("Account details have been updated.", category="error")
return render_template('profile.html', data = user_data)
else:
email = g.user.get('email', None)
user = User.get_user(email)
user_data = {}
user_data['name'] = user.name
user_data['email'] = user.email
user_data['city'] = user.city
user_data['college'] = user.college
user_data['address'] = user.address
user_data['contact'] = user.contact
return render_template('profile.html' , data = user_data)
else:
return redirect(url_for('login'))
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port), debug=True)
| apache-2.0 | -2,056,090,969,468,917,500 | 30.909425 | 125 | 0.535405 | false |
jucimarjr/IPC_2017-1 | lista1.5/lista1.5_questao15.py | 1 | 1739 | # ----------------------------------------------------------
# Introdução a Programação de Computadores - IPC
# Universidade do Estado do Amazonas - UEA
# Prof. Jucimar Jr
#
# Carlos Eduardo Tapudima de Oliveira 1715310030
# Frederico Victor Alfaia Rodrigues 1515200030
# Joelson Pereira Lima 1715310060
# Lucas Gabriel Silveira Duarte 1715310053
# Reinaldo da Silva Vargas 1715310054
# Walter Nobre da Silva Conceição 1715310057
#
# 15 - João recebeu seu salário e precisa pagar duas contas que estão
# atrasadas, João terá de pagar multa de 2% sobre cada conta. Faça um programa
# que calcule e mostre quanto restará do salário de João.
# ----------------------------------------------------------
salary = float(input('Entre com seu salario: ')) # Recebe o salário
bill01 = float(input('Entre com o valor da primeira conta: ')) # Recebe o valor da primeira conta
bill02 = float(input('Entre com o valor da segunda conta: ')) # Recebe o valor da segunda conta
bill01 = bill01 * 1.2 # Aplica a multa de 2%
bill02 = bill02 * 1.2 # Aplica a multa de 2%
bills_acomulator = bill01+bill02 # Calcula o valor total das contas a pagar
remaining_salary = salary - bills_acomulator # Subtraiu o valor das contas do salário
print('O valor da primeira conta com 2% de multa é: ', bill01) # Mostra o valor da primeira conta com multa
print('O valor da segunda conta com 2% de multa é: ',bill02) # Mostra o valor da segunda conta com multa
print('O valor total de contas a pagar é: ',bills_acomulator) # Mostra o somatório das duas contas
print('O valor que resta do salário após o pagamento é: ', round(remaining_salary,2)) # Mostra o salário que sobra
| apache-2.0 | 6,655,789,381,350,949,000 | 60.214286 | 114 | 0.670945 | false |
jtpereyda/boofuzz | boofuzz/pgraph/graph.py | 1 | 18926 | #
# pGRAPH
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import copy
import pydot
from builtins import object
from future.utils import listvalues
class Graph(object):
"""
@todo: Add support for clusters
@todo: Potentially swap node list with a node dictionary for increased performance
"""
id = None
clusters = []
edges = {}
nodes = {}
def __init__(self, graph_id=None):
self.id = graph_id
self.clusters = []
self.edges = {}
self.nodes = {}
def add_cluster(self, cluster):
"""
Add a pgraph cluster to the graph.
@type cluster: pGRAPH Cluster
@param cluster: Cluster to add to graph
"""
self.clusters.append(cluster)
return self
def add_edge(self, graph_edge, prevent_dups=True):
"""
Add a pgraph edge to the graph. Ensures a node exists for both the source and destination of the edge.
@type graph_edge: pGRAPH Edge
@param graph_edge: Edge to add to graph
@type prevent_dups: Boolean
@param prevent_dups: (Optional, Def=True) Flag controlling whether or not the addition of duplicate edges is ok
"""
if prevent_dups:
if graph_edge.id in self.edges:
return self
# ensure the source and destination nodes exist.
if self.find_node("id", graph_edge.src) is not None and self.find_node("id", graph_edge.dst) is not None:
self.edges[graph_edge.id] = graph_edge
return self
def add_graph(self, other_graph):
"""
Alias of graph_cat(). Concatenate the other graph into the current one.
@todo: Add support for clusters
@see: graph_cat()
@type other_graph: pgraph.Graph
@param other_graph: Graph to concatenate into this one.
"""
return self.graph_cat(other_graph)
def add_node(self, node):
"""
Add a pgraph node to the graph. Ensures a node with the same id does not already exist in the graph.
@type node: pGRAPH Node
@param node: Node to add to graph
"""
node.number = len(self.nodes)
if node.id not in self.nodes:
self.nodes[node.id] = node
return self
def del_cluster(self, cluster_id):
"""
Remove a cluster from the graph.
@type cluster_id: Mixed
@param cluster_id: Identifier of cluster to remove from graph
"""
for cluster in self.clusters:
if cluster.id == cluster_id:
self.clusters.remove(cluster)
break
return self
def del_edge(self, graph_id=None, src=None, dst=None):
"""
Remove an edge from the graph. There are two ways to call this routine, with an edge id::
graph.del_edge(id)
or by specifying the edge source and destination::
graph.del_edge(src=source, dst=destination)
@type graph_id: Mixed
@param graph_id: (Optional) Identifier of edge to remove from graph
@type src: Mixed
@param src: (Optional) Source of edge to remove from graph
@type dst: Mixed
@param dst: (Optional) Destination of edge to remove from graph
"""
if not graph_id:
graph_id = (src << 32) + dst # pytype: disable=unsupported-operands
if graph_id in self.edges:
del self.edges[graph_id]
return self
def del_graph(self, other_graph):
"""
Alias of graph_sub(). Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@see: graph_sub()
@type other_graph: pgraph.Graph
@param other_graph: Graph to diff/remove against
"""
return self.graph_sub(other_graph)
def del_node(self, node_id):
"""
Remove a node from the graph.
@type node_id: Mixed
@param node_id: Identifier of node to remove from graph
"""
if node_id in self.nodes:
del self.nodes[node_id]
return self
def edges_from(self, edge_id):
"""
Enumerate the edges from the specified node.
@type edge_id: Mixed
@param edge_id: Identifier of node to enumerate edges from
@rtype: list
@return: List of edges from the specified node
"""
return [edge_value for edge_value in listvalues(self.edges) if edge_value.src == edge_id]
def edges_to(self, edge_id):
"""
Enumerate the edges to the specified node.
@type edge_id: Mixed
@param edge_id: Identifier of node to enumerate edges to
@rtype: list
@return: List of edges to the specified node
"""
return [edge_value for edge_value in listvalues(self.edges) if edge_value.dst == edge_id]
def find_cluster(self, attribute, value):
"""
Find and return the cluster with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if attribute / value pair is matched. None otherwise.
"""
for cluster in self.clusters:
if hasattr(cluster, attribute):
if getattr(cluster, attribute) == value:
return cluster
return None
def find_cluster_by_node(self, attribute, value):
"""
Find and return the cluster that contains the node with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Cluster, if node with attribute / value pair is matched. None otherwise.
"""
for cluster in self.clusters:
for node in cluster:
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return cluster
return None
def find_edge(self, attribute, value):
"""
Find and return the edge with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: Mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Edge, if attribute / value pair is matched. None otherwise.
"""
# if the attribute to search for is the id, simply return the edge from the internal hash.
if attribute == "id" and value in self.edges:
return self.edges[value]
# step through all the edges looking for the given attribute/value pair.
else:
# TODO: Verify that this actually works? Was broken when I got here ;-P
for node_edge in listvalues(self.edges):
if hasattr(node_edge, attribute):
if getattr(node_edge, attribute) == value:
return node_edge
return None
def find_node(self, attribute, value):
"""
Find and return the node with the specified attribute / value pair.
@type attribute: str
@param attribute: Attribute name we are looking for
@type value: mixed
@param value: Value of attribute we are looking for
@rtype: Mixed
@return: Node, if attribute / value pair is matched. None otherwise.
"""
# if the attribute to search for is the id, simply return the node from the internal hash.
if attribute == "id" and value in self.nodes:
return self.nodes[value]
# step through all the nodes looking for the given attribute/value pair.
else:
for node in listvalues(self.nodes):
if hasattr(node, attribute):
if getattr(node, attribute) == value:
return node
return None
def graph_cat(self, other_graph):
"""
Concatenate the other graph into the current one.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to concatenate into this one.
"""
for other_node in listvalues(other_graph.nodes):
self.add_node(other_node)
for other_edge in listvalues(other_graph.edges):
self.add_edge(other_edge)
return self
def graph_down(self, from_node_id, max_depth=-1):
"""
Create a new graph, looking down, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of down graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in down graph (-1 for infinite)
@rtype: pgraph.Graph
@return: Down graph around specified node.
"""
down_graph = Graph()
from_node = self.find_node("id", from_node_id)
if not from_node:
print("unable to resolve node {:08x}".format(from_node_id))
raise Exception
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
# noinspection PyChainedComparisons
if current_depth > max_depth and max_depth != -1:
break
for node in level:
down_graph.add_node(copy.copy(node))
for edge in self.edges_from(node.id):
to_add = self.find_node("id", edge.dst)
if not down_graph.find_node("id", edge.dst):
next_level.append(to_add)
down_graph.add_node(copy.copy(to_add))
down_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return down_graph
def graph_intersect(self, other_graph):
"""
Remove all elements from the current graph that do not exist in the other graph.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to intersect with
"""
for node in listvalues(self.nodes):
if not other_graph.find_node("id", node.id):
self.del_node(node.id)
for edge in listvalues(self.edges):
if not other_graph.find_edge("id", edge.id):
self.del_edge(edge.id)
return self
def graph_proximity(self, center_node_id, max_depth_up=2, max_depth_down=2):
"""
Create a proximity graph centered around the specified node.
@type center_node_id: pgraph.node
@param center_node_id: Node to use as center of proximity graph
@type max_depth_up: Integer
@param max_depth_up: (Optional, Def=2) Number of upward levels to include in proximity graph
@type max_depth_down: Integer
@param max_depth_down: (Optional, Def=2) Number of downward levels to include in proximity graph
@rtype: pgraph.Graph
@return: Proximity graph around specified node.
"""
prox_graph = self.graph_down(center_node_id, max_depth_down)
prox_graph.add_graph(self.graph_up(center_node_id, max_depth_up))
return prox_graph
def graph_sub(self, other_graph):
"""
Remove the elements shared between the current graph and other graph from the current
graph.
@todo: Add support for clusters
@type other_graph: pgraph.Graph
@param other_graph: Graph to diff/remove against
"""
for other_node in listvalues(other_graph.nodes):
self.del_node(other_node.id)
for other_edge in listvalues(other_graph.edges):
self.del_edge(None, other_edge.src, other_edge.dst)
return self
def graph_up(self, from_node_id, max_depth=-1):
"""
Create a new graph, looking up, from the specified node id to the specified depth.
@type from_node_id: pgraph.node
@param from_node_id: Node to use as start of up graph
@type max_depth: Integer
@param max_depth: (Optional, Def=-1) Number of levels to include in up graph (-1 for infinite)
@rtype: pgraph.Graph
@return: Up graph to the specified node.
"""
up_graph = Graph()
from_node = self.find_node("id", from_node_id)
levels_to_process = []
current_depth = 1
levels_to_process.append([from_node])
for level in levels_to_process:
next_level = []
# noinspection PyChainedComparisons
if current_depth > max_depth and max_depth != -1:
break
for node in level:
up_graph.add_node(copy.copy(node))
for edge in self.edges_to(node.id):
to_add = self.find_node("id", edge.src)
if not up_graph.find_node("id", edge.src):
next_level.append(to_add)
up_graph.add_node(copy.copy(to_add))
up_graph.add_edge(copy.copy(edge))
if next_level:
levels_to_process.append(next_level)
current_depth += 1
return up_graph
def render_graph_gml(self):
"""
Render the GML graph description.
:returns: GML graph description.
:rtype: str
"""
gml = 'Creator "pGRAPH - Pedram Amini <[email protected]>"\n'
gml += "directed 1\n"
# open the graph tag.
gml += "graph [\n"
# add the nodes to the GML definition.
for node in listvalues(self.nodes):
gml += node.render_node_gml()
# add the edges to the GML definition.
for edge in listvalues(self.edges):
gml += edge.render_edge_gml(self)
# close the graph tag.
gml += "\n]\n"
"""
TODO: Complete cluster rendering
# if clusters exist.
if len(self.clusters):
# open the rootcluster tag.
gml += 'rootcluster [\n'
# add the clusters to the GML definition.
for cluster in self.clusters:
gml += cluster.render()
# add the clusterless nodes to the GML definition.
for node in self.nodes:
if not self.find_cluster_by_node("id", node.id):
gml += ' vertex "%d"\n' % node.id
# close the rootcluster tag.
gml += ']\n'
"""
return gml
def render_graph_graphviz(self):
"""
Render the graphviz graph structure.
Example to create a png:
.. code-block::
with open('somefile.png', 'wb') as file:
file.write(session.render_graph_graphviz().create_png())
:returns: Pydot object representing entire graph
:rtype: pydot.Dot
"""
dot_graph = pydot.Dot()
for node in listvalues(self.nodes):
dot_graph.add_node(node.render_node_graphviz())
for edge in listvalues(self.edges):
dot_graph.add_edge(edge.render_edge_graphviz())
return dot_graph
def render_graph_udraw(self):
"""
Render the uDraw graph description.
:returns: uDraw graph description.
:rtype: str
"""
udraw = "["
# render each of the nodes in the graph.
# the individual nodes will handle their own edge rendering.
for node in listvalues(self.nodes):
udraw += node.render_node_udraw(self)
udraw += ","
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + "\n]"
return udraw
def render_graph_udraw_update(self):
"""
Render the uDraw graph update description.
:returns: uDraw graph description.
:rtype: str
"""
udraw = "["
for node in listvalues(self.nodes):
udraw += node.render_node_udraw_update()
udraw += ","
for edge in listvalues(self.edges):
udraw += edge.render_edge_udraw_update()
udraw += ","
# trim the extraneous comment and close the graph.
udraw = udraw[0:-1] + "]"
return udraw
def update_node_id(self, current_id, new_id):
"""
Simply updating the id attribute of a node will sever the edges to / from the given node. This routine will
correctly update the edges as well.
@type current_id: long
@param current_id: Current ID of node whose ID we want to update
@type new_id: long
@param new_id: New ID to update to.
"""
if current_id not in self.nodes:
return
# update the node.
node = self.nodes[current_id]
del self.nodes[current_id]
node.id = new_id
self.nodes[node.id] = node
# update the edges.
for edge in [edge for edge in listvalues(self.edges) if current_id in (edge.src, edge.dst)]:
del self.edges[edge.id]
if edge.src == current_id:
edge.src = new_id
if edge.dst == current_id:
edge.dst = new_id
edge.id = (edge.src << 32) + edge.dst
self.edges[edge.id] = edge
def sorted_nodes(self):
"""
Return a list of the nodes within the graph, sorted by id.
@rtype: List
@return: List of nodes, sorted by id.
"""
node_keys = list(self.nodes)
node_keys.sort()
return [self.nodes[key] for key in node_keys]
| gpl-2.0 | -7,393,530,051,731,024,000 | 29.427653 | 119 | 0.57524 | false |
mortbauer/openfoam-extend-Breeder-other-scripting-PyFoam | PyFoam/Basics/DataStructures.py | 1 | 16332 | """Data structures in Foam-Files that can't be directly represented by Python-Structures"""
from __future__ import division
from copy import deepcopy
from collections import OrderedDict
import math
import re
# import FoamFileGenerator in the end to avoid circular dependencies
from PyFoam.ThirdParty.six import integer_types,PY3,string_types
if PY3:
def cmp(a,b):
if a<b:
return -1
elif a==b:
return 0
else:
return 1
class FoamDataType(object):
def __repr__(self):
return "'"+str(self)+"'"
def __eq__(self,other):
"""Implementation to make __cmp__ work again in Python3
Implementing this method means that these objects are not hashable.
But that is OK
"""
return self.__cmp__(other)==0
def __lt__(self,other):
"Implementation to make __cmp__ work again in Python3"
return self.__cmp__(other)<0
def __ne__(self,other):
return self.__cmp__(other)!=0
def __gt__(self,other):
return self.__cmp__(other)>0
def __ge__(self,other):
return self.__cmp__(other)>=0
def __le__(self,other):
return self.__cmp__(other)<=0
class Field(FoamDataType):
def __init__(self,val,name=None):
self.val=val
self.name=name
if type(val) in[list,UnparsedList,BinaryList]:
self.uniform=False
elif self.name==None:
self.uniform=True
else:
raise TypeError("Type",type(val),"of value",val,"can not be used to determine uniformity")
def __str__(self):
result=""
if self.uniform:
result+="uniform "
else:
result+="nonuniform "
if self.name:
result+=self.name+" "
result+=str(
PyFoam.Basics.FoamFileGenerator.FoamFileGenerator(
self.val,
longListThreshold=-1,
useFixedType=False
))
return result
def __cmp__(self,other):
if other==None or type(other)!=Field:
return 1
if self.uniform!=other.uniform:
return cmp(self.uniform,other.uniform)
elif self.name!=other.name:
return cmp(self.name,other.name)
else:
return cmp(self.val,other.val)
def __getitem__(self,key):
assert(not self.uniform)
return self.val[key]
def __setitem__(self,key,value):
assert(not self.uniform)
self.val[key]=value
def isUniform(self):
return self.uniform
def isBinary(self):
return type(self.val)==BinaryList
def binaryString(self):
return "nonuniform "+self.name+" <BINARY DATA>"
def value(self):
return self.val
def setUniform(self,data):
self.val=data
self.uniform=True
self.name=None
class Dimension(FoamDataType):
def __init__(self,*dims):
assert(len(dims)==7)
self.dims=list(dims)
def __str__(self):
result="[ "
for v in self.dims:
result+=str(v)+" "
result+="]"
return result
def __cmp__(self,other):
if other==None:
return 1
return cmp(self.dims,other.dims)
def __getitem__(self,key):
return self.dims[key]
def __setitem__(self,key,value):
self.dims[key]=value
class FixedLength(FoamDataType):
def __init__(self,vals):
self.vals=vals[:]
def __str__(self):
return "("+" ".join(["%g"%v for v in self.vals])+")"
def __cmp__(self,other):
if other==None or not issubclass(type(other),FixedLength):
return 1
return cmp(self.vals,other.vals)
def __getitem__(self,key):
return self.vals[key]
def __setitem__(self,key,value):
self.vals[key]=value
def __len__(self):
return len(self.vals)
class Vector(FixedLength):
def __init__(self,x,y,z):
FixedLength.__init__(self,[x,y,z])
def __add__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]+y[0],x[1]+y[1],x[2]+y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]+y,x[1]+y,x[2]+y)
else:
return NotImplemented
def __radd__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(x[0]+y,x[1]+y,x[2]+y)
else:
return NotImplemented
def __sub__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]-y[0],x[1]-y[1],x[2]-y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]-y,x[1]-y,x[2]-y)
else:
return NotImplemented
def __rsub__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(y-x[0],y-x[1],y-x[2])
else:
return NotImplemented
def __mul__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]*y[0],x[1]*y[1],x[2]*y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]*y,x[1]*y,x[2]*y)
else:
return NotImplemented
def __rmul__(self,y):
x=self
if type(y) in integer_types+(float,):
return Vector(y*x[0],y*x[1],y*x[2])
else:
return NotImplemented
def __div__(self,y):
x=self
if type(y)==Vector:
return Vector(x[0]/y[0],x[1]/y[1],x[2]/y[2])
elif type(y) in integer_types+(float,):
return Vector(x[0]/y,x[1]/y,x[2]/y)
else:
return NotImplemented
def __truediv__(self,y):
return self.__div__(y)
def __xor__(self,y):
x=self
if type(y)==Vector:
return Vector(x[1]*y[2]-x[2]*y[1],
x[2]*y[0]-x[0]*y[2],
x[0]*y[1]-x[1]*y[0])
else:
return NotImplemented
def __abs__(self):
x=self
return math.sqrt(x[0]*x[0]+x[1]*x[1]+x[2]*x[2])
def __neg__(self):
x=self
return Vector(-x[0],-x[1],-x[2])
def __pos__(self):
x=self
return Vector( x[0], x[1], x[2])
class Tensor(FixedLength):
def __init__(self,v1,v2,v3,v4,v5,v6,v7,v8,v9):
FixedLength.__init__(self,[v1,v2,v3,v4,v5,v6,v7,v8,v9])
class SymmTensor(FixedLength):
def __init__(self,v1,v2,v3,v4,v5,v6):
FixedLength.__init__(self,[v1,v2,v3,v4,v5,v6])
class BoolProxy(object):
"""Wraps a boolean parsed from a file. Optionally stores a textual
representation
"""
TrueStrings=["on",
"yes",
"true",
# "y" # this breaks parsing certain files
]
FalseStrings=[
"off",
"no",
"false",
# "n", # this breaks parsing certain files
"none",
"invalid"
]
def __init__(self,val=None,textual=None):
if val==None and textual==None:
raise TypeError("'BoolProxy' initialized without values")
elif val==None:
if textual in BoolProxy.TrueStrings:
self.val=True
elif textual in BoolProxy.FalseStrings:
self.val=False
else:
raise TypeError(str(textual)+" not in "+str(BoolProxy.TrueStrings)
+" or "+str(BoolProxy.TrueStrings))
else:
if val not in [True,False]:
raise TypeError(str(val)+" is not a boolean")
self.val=val
self.textual=textual
if self.textual:
if self.val:
if self.textual not in BoolProxy.TrueStrings:
raise TypeError(self.textual+" not in "
+str(BoolProxy.TrueStrings))
else:
if self.textual not in BoolProxy.FalseStrings:
raise TypeError(self.textual+" not in "
+str(BoolProxy.FalseStrings))
def __nonzero__(self):
return self.val
# for Python 3
def __bool__(self):
return self.val
def __str__(self):
if self.textual==None:
if self.val:
return "yes"
else:
return "no"
else:
return self.textual
def __repr__(self):
return self.__str__()
def __eq__(self,o):
if type(o) in [bool,BoolProxy]:
return self.val==o
elif isinstance(o,string_types):
if self.textual==o:
return True
else:
try:
return self.val==BoolProxy(textual=o)
except TypeError:
return False
else:
# raise TypeError("Can't compare BoolProxy with "+str(type(o)))
return self.val==o
def __ne__(self,o):
if type(o) in [bool,BoolProxy]:
return self.val!=o
elif isinstance(o,string_types):
if self.textual!=o:
return True
else:
try:
return self.val!=BoolProxy(textual=o)
except TypeError:
return False
else:
raise TypeError("Can't compare BoolProxy with "+str(type(o)))
class DictRedirection(object):
"""This class is in charge of handling redirections to other directories"""
def __init__(self,fullCopy,reference,name):
self._fullCopy=fullCopy
self._reference=reference
self._name=name
def useAsRedirect(self):
self._fullCopy=None
def getContent(self):
result=self._fullCopy
self._fullCopy=None
return result
def __call__(self):
return self._reference
def __str__(self):
return "$"+self._name
def __float__(self):
return float(self._reference)
def keys(self):
if self._fullCopy:
return self._fullCopy.keys()
else:
return self._reference.keys()
class DictProxy(dict):
"""A class that acts like a dictionary, but preserves the order
of the entries. Used to beautify the output"""
def __init__(self):
dict.__init__(self)
self._order=[]
self._decoration={}
self._regex=[]
self._redirects=[]
def isRegexp(self,key):
if type(key)==str:
if key[0]=='"' and key[-1]=='"':
return True
return False
def __setitem__(self,key,value):
if self.isRegexp(key):
exp=re.compile(key[1:-1])
self._regex=[(key,exp,value)]+self._regex
dict.__setitem__(self,key,value)
else:
dict.__setitem__(self,key,value)
if key not in self._order:
self._order.append(key)
def __getitem__(self,key):
try:
return dict.__getitem__(self,key)
except KeyError:
for k,e,v in self._regex:
if e.match(key):
return v
for r in self._redirects:
try:
return r()[key]
except KeyError:
pass
raise KeyError(key)
def __delitem__(self,key):
dict.__delitem__(self,key)
self._order.remove(key)
if key in self._decoration:
del self._decoration[key]
def __deepcopy__(self,memo):
new=DictProxy()
for k in self._order:
if type(k)==DictRedirection:
new.addRedirection(k)
else:
try:
new[k]=deepcopy(self[k],memo)
except KeyError:
new[k]=deepcopy(self.getRegexpValue(k),memo)
return new
def __contains__(self,key):
if dict.__contains__(self,key):
return True
else:
for k,e,v in self._regex:
if e.match(key):
return True
for r in self._redirects:
if key in r():
return True
return False
def __enforceString(self,v,toString):
if not isinstance(v,string_types) and toString:
r=str(v)
if isinstance(v,(list,dict)):
r='"'+r+'"'
return r
else:
return v
def update(self,other=None,toString=False,**kwargs):
"""Emulate the regular update of dict"""
if other:
if hasattr(other,"keys"):
for k in other.keys():
self[k]=self.__enforceString(other[k],toString)
else:
for k,v in other:
self[k]=self.__enforceString(v,toString)
for k in kwargs:
self[k]=self.__enforceString(kwargs[k],toString)
def keys(self):
result=[x for x in self._order if x not in self._redirects]
for r in self._redirects:
for k in r.keys():
if not k in result:
result.append(k)
return result
def __iter__(self):
s=set()
for k in self._order:
if k not in self._redirects:
s.add(k)
yield k
for r in self._redirects:
for k in r.keys():
if not k in s:
s.add(k)
yield k
def __str__(self):
first=True
result="{"
for k in self.keys():
v=self[k]
if first:
first=False
else:
result+=", "
result+="%s: %s" % (repr(k),repr(v))
result+="}"
return result
def iteritems(self):
lst=[]
for k in self:
lst.append((k,self[k]))
return lst
# needed for python 3. Should be a generator, but ...
def items(self):
return self.iteritems()
def addDecoration(self,key,text):
if key in self:
if key not in self._decoration:
self._decoration[key]=""
self._decoration[key]+=text
def getDecoration(self,key):
if key in self._decoration:
return " \t"+self._decoration[key]
else:
return ""
def getRegexpValue(self,key):
for k,e,v in self._regex:
if k==key:
return v
raise KeyError(key)
def addRedirection(self,redir):
self._order.append(redir)
redir.useAsRedirect()
self._redirects.append(redir)
class TupleProxy(list):
"""Enables Tuples to be manipulated"""
def __init__(self,tup=()):
list.__init__(self,tup)
class Unparsed(object):
"""A class that encapsulates an unparsed string"""
def __init__(self,data):
self.data=data
def __str__(self):
return self.data
def __hash__(self):
return hash(self.data)
def __lt__(self,other):
return self.data<other.data
class BinaryBlob(Unparsed):
"""Represents a part of the file with binary data in it"""
def __init__(self,data):
Unparsed.__init__(self,data)
class Codestream(str):
"""A class that encapsulates an codestream string"""
def __str__(self):
return "#{" + str.__str__(self) + "#}"
class UnparsedList(object):
"""A class that encapsulates a list that was not parsed for
performance reasons"""
def __init__(self,lngth,data):
self.data=data
self.length=lngth
def __len__(self):
return self.length
def __cmp__(self,other):
return cmp(self.data,other.data)
def __eq__(self,other):
return self.data==other.data
def __lt__(self,other):
return self.data<other.data
class BinaryList(UnparsedList):
"""A class that represents a list that is saved as binary data"""
def __init__(self,lngth,data):
UnparsedList.__init__(self,lngth,data)
def makePrimitiveString(val):
"""Make strings of types that might get written to a directory"""
if isinstance(val,(Dimension,FixedLength,BoolProxy)):
return str(val)
else:
return val
# Moved to the end to avoid circular dependencies
import PyFoam.Basics.FoamFileGenerator
# Should work with Python3 and Python2
| gpl-2.0 | -4,579,020,472,537,835,500 | 25.995041 | 102 | 0.515246 | false |
jtraver/dev | python3/selenium/apihelper.py | 1 | 1826 | #!/usr/bin/env python3
#!/usr/bin/python
"""Cheap and simple API helper
This program is part of "Dive Into Python", a free Python book for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
"""
__author__ = "Mark Pilgrim ([email protected])"
__version__ = "$Revision: 1.3 $"
__date__ = "$Date: 2004/05/05 21:57:19 $"
__copyright__ = "Copyright (c) 2001 Mark Pilgrim"
__license__ = "Python"
# While this is a good example script to teach about introspection,
# in real life it has been superceded by PyDoc, which is part of the
# standard library in Python 2.1 and later.
#
# Your IDE may already import the "help" function from pydoc
# automatically on startup; if not, do this:
#
# >>> from pydoc import help
#
# The help function in this module takes the object itself to get
# help on, but PyDoc can also take a string, like this:
#
# >>> help("string") # gets help on the string module
# >>> help("apihelper.help") # gets help on the function below
# >>> help() # enters an interactive help mode
#
# PyDoc can also act as an HTTP server to dynamically produce
# HTML-formatted documentation of any module in your path.
# That's wicked cool. Read more about PyDoc here:
# http://www.onlamp.com/pub/a/python/2001/04/18/pydoc.html
def info(object, spacing=10, collapse=1):
"""Print methods and doc strings.
Takes module, class, list, dictionary, or string."""
methodList = [e for e in dir(object) if callable(getattr(object, e))]
processFunc = collapse and (lambda s: " ".join(s.split())) or (lambda s: s)
print("\n".join(["\n%s\n\t%s" %
(method.ljust(spacing),
processFunc(str(getattr(object, method).__doc__)))
for method in methodList]))
if __name__ == "__main__":
print(help.__doc__)
| mit | 5,549,527,252,694,956,000 | 34.803922 | 79 | 0.664294 | false |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/resources/types/domain_category.py | 1 | 3952 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.resources',
marshal='google.ads.googleads.v7',
manifest={
'DomainCategory',
},
)
class DomainCategory(proto.Message):
r"""A category generated automatically by crawling a domain. If a
campaign uses the DynamicSearchAdsSetting, then domain
categories will be generated for the domain. The categories can
be targeted using WebpageConditionInfo. See:
https://support.google.com/google-ads/answer/2471185
Attributes:
resource_name (str):
Output only. The resource name of the domain category.
Domain category resource names have the form:
``customers/{customer_id}/domainCategories/{campaign_id}~{category_base64}~{language_code}``
campaign (str):
Output only. The campaign this category is
recommended for.
category (str):
Output only. Recommended category for the
website domain. e.g. if you have a website about
electronics, the categories could be "cameras",
"televisions", etc.
language_code (str):
Output only. The language code specifying the
language of the website. e.g. "en" for English.
The language can be specified in the
DynamicSearchAdsSetting required for dynamic
search ads. This is the language of the pages
from your website that you want Google Ads to
find, create ads for, and match searches with.
domain (str):
Output only. The domain for the website. The
domain can be specified in the
DynamicSearchAdsSetting required for dynamic
search ads.
coverage_fraction (float):
Output only. Fraction of pages on your site
that this category matches.
category_rank (int):
Output only. The position of this category in
the set of categories. Lower numbers indicate a
better match for the domain. null indicates not
recommended.
has_children (bool):
Output only. Indicates whether this category
has sub-categories.
recommended_cpc_bid_micros (int):
Output only. The recommended cost per click
for the category.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
campaign = proto.Field(
proto.STRING,
number=10,
optional=True,
)
category = proto.Field(
proto.STRING,
number=11,
optional=True,
)
language_code = proto.Field(
proto.STRING,
number=12,
optional=True,
)
domain = proto.Field(
proto.STRING,
number=13,
optional=True,
)
coverage_fraction = proto.Field(
proto.DOUBLE,
number=14,
optional=True,
)
category_rank = proto.Field(
proto.INT64,
number=15,
optional=True,
)
has_children = proto.Field(
proto.BOOL,
number=16,
optional=True,
)
recommended_cpc_bid_micros = proto.Field(
proto.INT64,
number=17,
optional=True,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | 2,978,796,001,538,029,000 | 30.870968 | 104 | 0.620445 | false |
spawnedc/MeCanBlog | dbindexer/lookups.py | 1 | 8808 | from django.db import models
from djangotoolbox.fields import ListField
from copy import deepcopy
import re
regex = type(re.compile(''))
class LookupDoesNotExist(Exception):
pass
class LookupBase(type):
def __new__(cls, name, bases, attrs):
new_cls = type.__new__(cls, name, bases, attrs)
if not isinstance(new_cls.lookup_types, (list, tuple)):
new_cls.lookup_types = (new_cls.lookup_types, )
return new_cls
class ExtraFieldLookup(object):
'''Default is to behave like an exact filter on an ExtraField.'''
__metaclass__ = LookupBase
lookup_types = 'exact'
def __init__(self, model=None, field_name=None, lookup_def=None,
new_lookup='exact', field_to_add=models.CharField(
max_length=500, editable=False, null=True)):
self.field_to_add = field_to_add
self.new_lookup = new_lookup
self.contribute(model, field_name, lookup_def)
def contribute(self, model, field_name, lookup_def):
self.model = model
self.field_name = field_name
self.lookup_def = lookup_def
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, self.lookup_types[0])
def convert_lookup(self, value, lookup_type):
# TODO: can value be a list or tuple? (in case of in yes)
if isinstance(value, (tuple, list)):
value = [self._convert_lookup(val, lookup_type)[1] for val in value]
else:
_, value = self._convert_lookup(value, lookup_type)
return self.new_lookup, value
def _convert_lookup(self, value, lookup_type):
return lookup_type, value
def convert_value(self, value):
if value is not None:
if isinstance(value, (tuple, list)):
value = [self._convert_value(val) for val in value]
else:
value = self._convert_value(value)
return value
def _convert_value(self, value):
return value
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type in self.lookup_types \
and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if lookup_def in cls.lookup_types:
return True
return False
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(self.field_to_add)
if isinstance(field_to_index, ListField):
field_to_add = ListField(field_to_add, editable=False, null=True)
return field_to_add
class DateLookup(ExtraFieldLookup):
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'exact',
'field_to_add': models.IntegerField(editable=False, null=True)}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
class Day(DateLookup):
lookup_types = 'day'
def _convert_value(self, value):
return value.day
class Month(DateLookup):
lookup_types = 'month'
def _convert_value(self, value):
return value.month
class Year(DateLookup):
lookup_types = 'year'
def _convert_value(self, value):
return value.year
class Weekday(DateLookup):
lookup_types = 'week_day'
def _convert_value(self, value):
return value.isoweekday()
class Contains(ExtraFieldLookup):
lookup_types = 'contains'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith',
'field_to_add': ListField(models.CharField(500),
editable=False, null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def get_field_to_add(self, field_to_index):
# always return a ListField of CharFields even in the case of
# field_to_index being a ListField itself!
return deepcopy(self.field_to_add)
def convert_value(self, value):
new_value = []
if isinstance(value, (tuple, list)):
for val in value:
new_value.extend(self.contains_indexer(val))
else:
new_value = self.contains_indexer(value)
return new_value
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value
def contains_indexer(self, value):
# In indexing mode we add all postfixes ('o', 'lo', ..., 'hello')
result = []
if value:
result.extend([value[count:] for count in range(len(value))])
return result
class Icontains(Contains):
lookup_types = 'icontains'
def convert_value(self, value):
return [val.lower() for val in Contains.convert_value(self, value)]
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
class Iexact(ExtraFieldLookup):
lookup_types = 'iexact'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Istartswith(ExtraFieldLookup):
lookup_types = 'istartswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value.lower()
def _convert_value(self, value):
return value.lower()
class Endswith(ExtraFieldLookup):
lookup_types = 'endswith'
def __init__(self, *args, **kwargs):
defaults = {'new_lookup': 'startswith'}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1]
def _convert_value(self, value):
return value[::-1]
class Iendswith(Endswith):
lookup_types = 'iendswith'
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, value[::-1].lower()
def _convert_value(self, value):
return value[::-1].lower()
class RegexLookup(ExtraFieldLookup):
lookup_types = ('regex', 'iregex')
def __init__(self, *args, **kwargs):
defaults = {'field_to_add': models.NullBooleanField(editable=False,
null=True)
}
defaults.update(kwargs)
ExtraFieldLookup.__init__(self, *args, **defaults)
def contribute(self, model, field_name, lookup_def):
ExtraFieldLookup.contribute(self, model, field_name, lookup_def)
if isinstance(lookup_def, regex):
self.lookup_def = re.compile(lookup_def.pattern, re.S | re.U |
(lookup_def.flags & re.I))
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name,
self.lookup_def.pattern.encode('hex'))
def is_icase(self):
return self.lookup_def.flags & re.I
def _convert_lookup(self, value, lookup_type):
return self.new_lookup, True
def _convert_value(self, value):
if self.lookup_def.match(value):
return True
return False
def matches_filter(self, model, field_name, lookup_type, value):
return self.model == model and lookup_type == \
'%sregex' % ('i' if self.is_icase() else '') and \
value == self.lookup_def.pattern and field_name == self.field_name
@classmethod
def matches_lookup_def(cls, lookup_def):
if isinstance(lookup_def, regex):
return True
return False
class StandardLookup(ExtraFieldLookup):
''' Creates a copy of the field_to_index in order to allow querying for
standard lookup_types on a JOINed property. '''
# TODO: database backend can specify standardLookups
lookup_types = ('exact', 'gt', 'gte', 'lt', 'lte', 'in', 'range', 'isnull')
@property
def index_name(self):
return 'idxf_%s_l_%s' % (self.field_name, 'standard')
def convert_lookup(self, value, lookup_type):
return lookup_type, value
def get_field_to_add(self, field_to_index):
field_to_add = deepcopy(field_to_index)
if isinstance(field_to_add, (models.DateTimeField,
models.DateField, models.TimeField)):
field_to_add.auto_now_add = field_to_add.auto_now = False
return field_to_add
| bsd-3-clause | 6,995,313,191,681,633,000 | 32.618321 | 83 | 0.592643 | false |
googleads/google-ads-python | google/ads/googleads/v8/services/types/geo_target_constant_service.py | 1 | 5566 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.resources.types import (
geo_target_constant as gagr_geo_target_constant,
)
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={
"GetGeoTargetConstantRequest",
"SuggestGeoTargetConstantsRequest",
"SuggestGeoTargetConstantsResponse",
"GeoTargetConstantSuggestion",
},
)
class GetGeoTargetConstantRequest(proto.Message):
r"""Request message for
[GeoTargetConstantService.GetGeoTargetConstant][google.ads.googleads.v8.services.GeoTargetConstantService.GetGeoTargetConstant].
Attributes:
resource_name (str):
Required. The resource name of the geo target
constant to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class SuggestGeoTargetConstantsRequest(proto.Message):
r"""Request message for
[GeoTargetConstantService.SuggestGeoTargetConstants][google.ads.googleads.v8.services.GeoTargetConstantService.SuggestGeoTargetConstants].
Attributes:
locale (str):
If possible, returned geo targets are
translated using this locale. If not, en is used
by default. This is also used as a hint for
returned geo targets.
country_code (str):
Returned geo targets are restricted to this
country code.
location_names (google.ads.googleads.v8.services.types.SuggestGeoTargetConstantsRequest.LocationNames):
The location names to search by. At most 25
names can be set.
geo_targets (google.ads.googleads.v8.services.types.SuggestGeoTargetConstantsRequest.GeoTargets):
The geo target constant resource names to
filter by.
"""
class LocationNames(proto.Message):
r"""A list of location names.
Attributes:
names (Sequence[str]):
A list of location names.
"""
names = proto.RepeatedField(proto.STRING, number=2,)
class GeoTargets(proto.Message):
r"""A list of geo target constant resource names.
Attributes:
geo_target_constants (Sequence[str]):
A list of geo target constant resource names.
"""
geo_target_constants = proto.RepeatedField(proto.STRING, number=2,)
locale = proto.Field(proto.STRING, number=6, optional=True,)
country_code = proto.Field(proto.STRING, number=7, optional=True,)
location_names = proto.Field(
proto.MESSAGE, number=1, oneof="query", message=LocationNames,
)
geo_targets = proto.Field(
proto.MESSAGE, number=2, oneof="query", message=GeoTargets,
)
class SuggestGeoTargetConstantsResponse(proto.Message):
r"""Response message for
[GeoTargetConstantService.SuggestGeoTargetConstants][google.ads.googleads.v8.services.GeoTargetConstantService.SuggestGeoTargetConstants].
Attributes:
geo_target_constant_suggestions (Sequence[google.ads.googleads.v8.services.types.GeoTargetConstantSuggestion]):
Geo target constant suggestions.
"""
geo_target_constant_suggestions = proto.RepeatedField(
proto.MESSAGE, number=1, message="GeoTargetConstantSuggestion",
)
class GeoTargetConstantSuggestion(proto.Message):
r"""A geo target constant suggestion.
Attributes:
locale (str):
The language this GeoTargetConstantSuggestion
is currently translated to. It affects the name
of geo target fields. For example, if locale=en,
then name=Spain. If locale=es, then name=España.
The default locale will be returned if no
translation exists for the locale in the
request.
reach (int):
Approximate user population that will be
targeted, rounded to the nearest 100.
search_term (str):
If the request searched by location name,
this is the location name that matched the geo
target.
geo_target_constant (google.ads.googleads.v8.resources.types.GeoTargetConstant):
The GeoTargetConstant result.
geo_target_constant_parents (Sequence[google.ads.googleads.v8.resources.types.GeoTargetConstant]):
The list of parents of the geo target
constant.
"""
locale = proto.Field(proto.STRING, number=6, optional=True,)
reach = proto.Field(proto.INT64, number=7, optional=True,)
search_term = proto.Field(proto.STRING, number=8, optional=True,)
geo_target_constant = proto.Field(
proto.MESSAGE,
number=4,
message=gagr_geo_target_constant.GeoTargetConstant,
)
geo_target_constant_parents = proto.RepeatedField(
proto.MESSAGE,
number=5,
message=gagr_geo_target_constant.GeoTargetConstant,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,300,285,635,369,971,000 | 35.854305 | 142 | 0.679245 | false |
novirael/os-simulation | memory/simulation.py | 1 | 1520 | from copy import copy
from random import randint
from memory import (
FirstInFirstOutAlgorithm,
TheOptimalAlgorithm,
LastRecentlyUsedAlgorithm,
ApproximalLastRecentlyUsedAlgorithm,
RandomAlgorithm
)
PAGE_SIZE = 100
FRAMES = 10
NUM_REQUESTS = 1000
def test(page_size, frames_size, num_requests, draw=False):
summary = {}
query = [randint(1, page_size+1) for _ in range(num_requests)]
algorithms = [
FirstInFirstOutAlgorithm(copy(query), frames_size),
TheOptimalAlgorithm(copy(query), frames_size),
LastRecentlyUsedAlgorithm(copy(query), frames_size),
ApproximalLastRecentlyUsedAlgorithm(copy(query), frames_size),
RandomAlgorithm(copy(query), frames_size)
]
for alg in algorithms:
alg.execute()
if draw:
print 'Page faults for {title}: {faults}/{requests}'.format(
title=alg.title,
faults=alg.page_faults,
requests=num_requests
)
summary[alg.title] = alg.page_faults
return summary
def statistic(frames, times=50):
stat = {}
for i in range(times):
results = test(PAGE_SIZE, frames, NUM_REQUESTS)
if not stat:
stat = copy(results)
else:
for alg, result in results.iteritems():
stat[alg] += result
print stat
if __name__ == "__main__":
# test(PAGE_SIZE, FRAMES, NUM_REQUESTS, draw=True)
for frames in [10, 20, 30, 40]:
statistic(frames)
| mit | -3,993,133,811,072,129,000 | 24.333333 | 72 | 0.617105 | false |
mattsep/TDSE | src/animate.py | 1 | 1444 | import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# animation of the probability density of the wavefunction over the course
# of time
def probabilityDensity(x, t, V, psi):
# convert to the probability density
Nt = len(t)
rho = sp.real(sp.conjugate(psi)*psi)
# set the first frame properties and grab the line handles
fig, ax = plt.subplots()
line1, line2, line3, line4 = ax.plot(x, rho[:,1], 'k',
x, sp.real(psi[:,1]), 'b:',
x, sp.imag(psi[:,1]), 'r:',
x, V, 'm--',
linewidth=2.0)
ax.set_xlabel("Position")
ax.set_ylabel("Probability Density")
ax.set_ylim([-rho.max(), rho.max()])
ax.set_xlim([min(x), max(x)])
# the animation function, to be called repeatedly
def animate(i):
# set the new data each frame
line1.set_ydata(rho[:,i])
line2.set_ydata(sp.real(psi[:,i]))
line3.set_ydata(sp.imag(psi[:,i]))
return line1, line2, line3
# the initialization function, useful when blit=True
def init():
line1.set_ydata(sp.ma.array(x, mask=True))
line2.set_ydata(sp.ma.array(x, mask=True))
line3.set_ydata(sp.ma.array(x, mask=True))
return line1, line2, line3
# perform the animation
ani = animation.FuncAnimation(fig, animate, sp.arange(1,Nt),
init_func=init, interval=25, blit=True)
plt.show()
| gpl-3.0 | 1,779,452,634,299,674,600 | 31.088889 | 74 | 0.607341 | false |
ticklemepierce/osf.io | api_tests/nodes/views/test_node_children_list.py | 1 | 22121 | # -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from framework.auth.core import Auth
from website.models import Node, NodeLog
from website.util import permissions
from website.util.sanitize import strip_html
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase, fake
from tests.factories import (
NodeFactory,
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
RetractedRegistrationFactory
)
class TestNodeChildrenList(ApiTestCase):
def setUp(self):
super(TestNodeChildrenList, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory()
self.project.add_contributor(self.user, permissions=[permissions.READ, permissions.WRITE])
self.project.save()
self.component = NodeFactory(parent=self.project, creator=self.user)
self.pointer = ProjectFactory()
self.project.add_pointer(self.pointer, auth=Auth(self.user), save=True)
self.private_project_url = '/{}nodes/{}/children/'.format(API_BASE, self.project._id)
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_project.save()
self.public_component = NodeFactory(parent=self.public_project, creator=self.user, is_public=True)
self.public_project_url = '/{}nodes/{}/children/'.format(API_BASE, self.public_project._id)
self.user_two = AuthUserFactory()
def test_node_children_list_does_not_include_pointers(self):
res = self.app.get(self.private_project_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_return_public_node_children_list_logged_out(self):
res = self.app.get(self.public_project_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['id'], self.public_component._id)
def test_return_public_node_children_list_logged_in(self):
res = self.app.get(self.public_project_url, auth=self.user_two.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['id'], self.public_component._id)
def test_return_private_node_children_list_logged_out(self):
res = self.app.get(self.private_project_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert 'detail' in res.json['errors'][0]
def test_return_private_node_children_list_logged_in_contributor(self):
res = self.app.get(self.private_project_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(len(res.json['data']), 1)
assert_equal(res.json['data'][0]['id'], self.component._id)
def test_return_private_node_children_list_logged_in_non_contributor(self):
res = self.app.get(self.private_project_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert 'detail' in res.json['errors'][0]
def test_node_children_list_does_not_include_unauthorized_projects(self):
private_component = NodeFactory(parent=self.project)
res = self.app.get(self.private_project_url, auth=self.user.auth)
assert_equal(len(res.json['data']), 1)
def test_node_children_list_does_not_include_deleted(self):
child_project = NodeFactory(parent=self.public_project, creator=self.user)
child_project.save()
res = self.app.get(self.public_project_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
ids = [node['id'] for node in res.json['data']]
assert_in(child_project._id, ids)
assert_equal(2, len(ids))
child_project.is_deleted = True
child_project.save()
res = self.app.get(self.public_project_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
ids = [node['id'] for node in res.json['data']]
assert_not_in(child_project._id, ids)
assert_equal(1, len(ids))
def test_node_children_list_does_not_include_node_links(self):
pointed_to = ProjectFactory(is_public=True)
self.public_project.add_pointer(pointed_to, auth=Auth(self.public_project.creator))
res = self.app.get(self.public_project_url, auth=self.user.auth)
ids = [node['id'] for node in res.json['data']]
assert_in(self.public_component._id, ids) # sanity check
assert_equal(len(ids), len([e for e in self.public_project.nodes if e.primary]))
assert_not_in(pointed_to._id, ids)
def test_cannot_access_retracted_children(self):
registration = RegistrationFactory(creator=self.user, project=self.public_project)
retraction = RetractedRegistrationFactory(registration=registration, user=self.user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
class TestNodeChildrenListFiltering(ApiTestCase):
def test_node_child_filtering(self):
user = AuthUserFactory()
project = ProjectFactory(creator=user)
title1, title2 = fake.bs(), fake.bs()
component = NodeFactory(title=title1, parent=project)
component2 = NodeFactory(title=title2, parent=project)
url = '/{}nodes/{}/children/?filter[title]={}'.format(
API_BASE,
project._id,
title1
)
res = self.app.get(url, auth=user.auth)
ids = [node['id'] for node in res.json['data']]
assert_in(component._id, ids)
assert_not_in(component2._id, ids)
class TestNodeChildCreate(ApiTestCase):
def setUp(self):
super(TestNodeChildCreate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.url = '/{}nodes/{}/children/'.format(API_BASE, self.project._id)
self.child = {
'data': {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
}
def test_creates_child_logged_out_user(self):
res = self.app.post_json_api(self.url, self.child, expect_errors=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_creates_child_logged_in_owner(self):
res = self.app.post_json_api(self.url, self.child, auth=self.user.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['title'], self.child['data']['attributes']['title'])
assert_equal(res.json['data']['attributes']['description'], self.child['data']['attributes']['description'])
assert_equal(res.json['data']['attributes']['category'], self.child['data']['attributes']['category'])
self.project.reload()
assert_equal(res.json['data']['id'], self.project.nodes[0]._id)
assert_equal(self.project.nodes[0].logs[0].action, NodeLog.PROJECT_CREATED)
def test_creates_child_logged_in_write_contributor(self):
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], auth=Auth(self.user), save=True)
res = self.app.post_json_api(self.url, self.child, auth=self.user_two.auth)
assert_equal(res.status_code, 201)
assert_equal(res.json['data']['attributes']['title'], self.child['data']['attributes']['title'])
assert_equal(res.json['data']['attributes']['description'], self.child['data']['attributes']['description'])
assert_equal(res.json['data']['attributes']['category'], self.child['data']['attributes']['category'])
self.project.reload()
child_id = res.json['data']['id']
assert_equal(child_id, self.project.nodes[0]._id)
assert_equal(Node.load(child_id).logs[0].action, NodeLog.PROJECT_CREATED)
def test_creates_child_logged_in_read_contributor(self):
self.project.add_contributor(self.user_two, permissions=[permissions.READ], auth=Auth(self.user), save=True)
res = self.app.post_json_api(self.url, self.child, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_creates_child_logged_in_non_contributor(self):
res = self.app.post_json_api(self.url, self.child, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_creates_child_creates_child_and_sanitizes_html_logged_in_owner(self):
title = '<em>Cool</em> <strong>Project</strong>'
description = 'An <script>alert("even cooler")</script> child'
res = self.app.post_json_api(self.url, {
'data': {
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}
}, auth=self.user.auth)
child_id = res.json['data']['id']
assert_equal(res.status_code, 201)
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data']['attributes']['title'], strip_html(title))
assert_equal(res.json['data']['attributes']['description'], strip_html(description))
assert_equal(res.json['data']['attributes']['category'], 'project')
self.project.reload()
child_id = res.json['data']['id']
assert_equal(child_id, self.project.nodes[0]._id)
assert_equal(Node.load(child_id).logs[0].action, NodeLog.PROJECT_CREATED)
def test_cannot_create_child_on_a_registration(self):
registration = RegistrationFactory(project=self.project, creator=self.user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = self.app.post_json_api(url, {
'data': {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_creates_child_no_type(self):
child = {
'data': {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/type')
def test_creates_child_incorrect_type(self):
child = {
'data': {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'Resource identifier does not match server endpoint.')
def test_creates_child_properties_not_nested(self):
child = {
'data': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/attributes.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/attributes')
class TestNodeChildrenBulkCreate(ApiTestCase):
def setUp(self):
super(TestNodeChildrenBulkCreate, self).setUp()
self.user = AuthUserFactory()
self.user_two = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=True)
self.url = '/{}nodes/{}/children/'.format(API_BASE, self.project._id)
self.child = {
'type': 'nodes',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project'
}
}
self.child_two = {
'type': 'nodes',
'attributes': {
'title': 'second child',
'description': 'this is my hypothesis',
'category': 'hypothesis'
}
}
def test_bulk_children_create_blank_request(self):
res = self.app.post_json_api(self.url, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
def test_bulk_creates_children_limits(self):
res = self.app.post_json_api(self.url, {'data': [self.child] * 11},
auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Bulk operation limit is 10, got 11.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data')
def test_bulk_creates_children_logged_out_user(self):
res = self.app.post_json_api(self.url, {'data': [self.child, self.child_two]}, expect_errors=True, bulk=True)
assert_equal(res.status_code, 401)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_logged_in_owner(self):
res = self.app.post_json_api(self.url, {'data': [self.child, self.child_two]}, auth=self.user.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.json['data'][0]['attributes']['title'], self.child['attributes']['title'])
assert_equal(res.json['data'][0]['attributes']['description'], self.child['attributes']['description'])
assert_equal(res.json['data'][0]['attributes']['category'], self.child['attributes']['category'])
assert_equal(res.json['data'][1]['attributes']['title'], self.child_two['attributes']['title'])
assert_equal(res.json['data'][1]['attributes']['description'], self.child_two['attributes']['description'])
assert_equal(res.json['data'][1]['attributes']['category'], self.child_two['attributes']['category'])
self.project.reload()
assert_equal(res.json['data'][0]['id'], self.project.nodes[0]._id)
assert_equal(res.json['data'][1]['id'], self.project.nodes[1]._id)
assert_equal(self.project.nodes[0].logs[0].action, NodeLog.PROJECT_CREATED)
assert_equal(self.project.nodes[1].logs[0].action, NodeLog.PROJECT_CREATED)
def test_bulk_creates_children_child_logged_in_write_contributor(self):
self.project.add_contributor(self.user_two, permissions=[permissions.READ, permissions.WRITE], auth=Auth(self.user), save=True)
res = self.app.post_json_api(self.url, {'data': [self.child, self.child_two]}, auth=self.user_two.auth, bulk=True)
assert_equal(res.status_code, 201)
assert_equal(res.json['data'][0]['attributes']['title'], self.child['attributes']['title'])
assert_equal(res.json['data'][0]['attributes']['description'], self.child['attributes']['description'])
assert_equal(res.json['data'][0]['attributes']['category'], self.child['attributes']['category'])
assert_equal(res.json['data'][1]['attributes']['title'], self.child_two['attributes']['title'])
assert_equal(res.json['data'][1]['attributes']['description'], self.child_two['attributes']['description'])
assert_equal(res.json['data'][1]['attributes']['category'], self.child_two['attributes']['category'])
self.project.reload()
child_id = res.json['data'][0]['id']
child_two_id = res.json['data'][1]['id']
assert_equal(child_id, self.project.nodes[0]._id)
assert_equal(child_two_id, self.project.nodes[1]._id)
assert_equal(Node.load(child_id).logs[0].action, NodeLog.PROJECT_CREATED)
assert_equal(self.project.nodes[1].logs[0].action, NodeLog.PROJECT_CREATED)
def test_bulk_creates_children_logged_in_read_contributor(self):
self.project.add_contributor(self.user_two, permissions=[permissions.READ], auth=Auth(self.user), save=True)
res = self.app.post_json_api(self.url, {'data': [self.child, self.child_two]}, auth=self.user_two.auth,
expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_logged_in_non_contributor(self):
res = self.app.post_json_api(self.url, {'data': [self.child, self.child_two]},
auth=self.user_two.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_and_sanitizes_html_logged_in_owner(self):
title = '<em>Cool</em> <strong>Project</strong>'
description = 'An <script>alert("even cooler")</script> child'
res = self.app.post_json_api(self.url, {
'data': [{
'type': 'nodes',
'attributes': {
'title': title,
'description': description,
'category': 'project',
'public': True
}
}]
}, auth=self.user.auth, bulk=True)
child_id = res.json['data'][0]['id']
assert_equal(res.status_code, 201)
url = '/{}nodes/{}/'.format(API_BASE, child_id)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.json['data']['attributes']['title'], strip_html(title))
assert_equal(res.json['data']['attributes']['description'], strip_html(description))
assert_equal(res.json['data']['attributes']['category'], 'project')
self.project.reload()
child_id = res.json['data']['id']
assert_equal(child_id, self.project.nodes[0]._id)
assert_equal(Node.load(child_id).logs[0].action, NodeLog.PROJECT_CREATED)
def test_cannot_bulk_create_children_on_a_registration(self):
registration = RegistrationFactory(project=self.project, creator=self.user)
url = '/{}nodes/{}/children/'.format(API_BASE, registration._id)
res = self.app.post_json_api(url, {
'data': [self.child_two, {
'type': 'nodes',
'attributes': {
'title': fake.catch_phrase(),
'description': fake.bs(),
'category': 'project',
'public': True,
}
}]
}, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 403)
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_no_type(self):
child = {
'data': [self.child_two, {
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'This field may not be null.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/1/type')
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_incorrect_type(self):
child = {
'data': [self.child_two, {
'type': 'Wrong type.',
'attributes': {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}
}]
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 409)
assert_equal(res.json['errors'][0]['detail'], 'Resource identifier does not match server endpoint.')
self.project.reload()
assert_equal(len(self.project.nodes), 0)
def test_bulk_creates_children_properties_not_nested(self):
child = {
'data': [self.child_two, {
'title': 'child',
'description': 'this is a child project',
'category': 'project',
}]
}
res = self.app.post_json_api(self.url, child, auth=self.user.auth, expect_errors=True, bulk=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], 'Request must include /data/attributes.')
assert_equal(res.json['errors'][0]['source']['pointer'], '/data/attributes')
self.project.reload()
assert_equal(len(self.project.nodes), 0) | apache-2.0 | 2,755,910,195,690,702,300 | 43.155689 | 135 | 0.59717 | false |
liumengjun/django-static-precompiler | static_precompiler/templatetags/base.py | 1 | 1478 | import inspect
try:
# Django>=1.9
from django.template import library
except ImportError:
# Django<1.9
from django.template import base as library
def container_tag(register, name=None):
def dec(func):
params, varargs, varkw, defaults = inspect.getargspec(func)
params = params[1:]
tag_name = name or func.__name__
class InlineCompileNode(library.TagHelperNode):
def __init__(self, nodelist, *args):
super(InlineCompileNode, self).__init__(*args)
self.nodelist = nodelist
def render(self, context):
args, kwargs = self.get_resolved_arguments(context)
return func(self.nodelist, *args, **kwargs)
def compile_func(parser, token):
takes_context = True
bits = token.split_contents()[1:]
args, kwargs = library.parse_bits(parser, bits, params, varargs, varkw,
defaults, takes_context, tag_name)
nodelist = parser.parse(('end' + tag_name,))
parser.delete_first_token()
try:
# Django<1.9
return InlineCompileNode(nodelist, takes_context, args, kwargs)
except TypeError:
# Django>=1.9
return InlineCompileNode(nodelist, func, takes_context, args, kwargs)
register.tag(tag_name, compile_func)
return func
return dec
| mit | 4,570,853,144,020,041,700 | 32.590909 | 85 | 0.567659 | false |
JustinSGray/Kona | src/kona/linalg/matrices/preconds/nested.py | 1 | 1272 | from kona.linalg.matrices.common import IdentityMatrix
from kona.linalg.matrices.hessian import ReducedKKTMatrix
class NestedKKTPreconditioner(ReducedKKTMatrix):
"""
This object preconditions the KKT system by doing approximate solutions
of the 2nd order adjoints using the PDE preconditioner.
The approximate product using the approximate adjoints are then used in a
nested Krylov solver to produce an inverse estimate.
"""
def _linear_solve(self, rhs_vec, solution, rel_tol=1e-8):
self.dRdU.linearize(self.at_design, self.at_state)
self.dRdU.precond(rhs_vec, solution)
def _adjoint_solve(self, rhs_vec, solution, rel_tol=1e-8):
self.dRdU.linearize(self.at_design, self.at_state)
self.dRdU.T.precond(rhs_vec, solution)
def solve(self, rhs, solution, rel_tol=None):
# make sure we have a krylov solver
if self.krylov is None:
raise AttributeError('krylov solver not set')
# set tolerance
if isinstance(rel_tol, float):
self.krylov.rel_tol = rel_tol
# define the preconditioner
eye = IdentityMatrix()
precond = eye.product
# trigger the solution
self.krylov.solve(self.product, rhs, solution, precond)
| lgpl-3.0 | 340,065,703,489,517,200 | 36.411765 | 77 | 0.683962 | false |
DeepSOIC/Lattice | latticeBaseFeature.py | 1 | 11471 | #***************************************************************************
#* *
#* Copyright (c) 2015 - Victor Titov (DeepSOIC) *
#* <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
__title__="Base feature module for lattice object of lattice workbench for FreeCAD"
__author__ = "DeepSOIC"
__url__ = ""
import FreeCAD as App
import Part
from latticeCommon import *
import latticeCompoundExplorer as LCE
import latticeMarkers
import latticeExecuter
def getDefLatticeFaceColor():
return (1.0, 0.7019608020782471, 0.0, 0.0) #orange
def getDefShapeColor():
clr = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/View").GetUnsigned("DefaultShapeColor")
#convert color in int to color in tuple of 4 floats.
#This is probably implemented already somewhere, but I couldn't find, so I rolled my own --DeepSOIC
# clr in hex looks like this: 0xRRGGBBOO (r,g,b,o = red, green, blue, opacity)
o = clr & 0x000000FFL
b = (clr >> 8) & 0x000000FFL
g = (clr >> 16) & 0x000000FFL
r = (clr >> 24) & 0x000000FFL
return (r/255.0, g/255.0, b/255.0, (255-o)/255.0)
def makeLatticeFeature(name, AppClass, ViewClass):
'''makeLatticeFeature(name, AppClass, ViewClass = None): makes a document object for a LatticeFeature-derived object.'''
obj = FreeCAD.ActiveDocument.addObject("Part::FeaturePython",name)
AppClass(obj)
if ViewClass:
vp = ViewClass(obj.ViewObject)
else:
vp = ViewProviderLatticeFeature(obj.ViewObject)
return obj
def isObjectLattice(documentObject):
'''isObjectLattice(documentObject): When operating on the object, it is to be treated as a lattice object. If False, treat as a regular shape.'''
ret = False
if hasattr(documentObject,"isLattice"):
if 'On' in documentObject.isLattice:
ret = True
return ret
def getMarkerSizeEstimate(ListOfPlacements):
'''getMarkerSizeEstimate(ListOfPlacements): computes the default marker size for the array of placements'''
if len(ListOfPlacements) == 0:
return 1.0
pathLength = 0
for i in range(1, len(ListOfPlacements)):
pathLength += (ListOfPlacements[i].Base - ListOfPlacements[i-1].Base).Length
sz = pathLength/len(ListOfPlacements)/2.0
#FIXME: make hierarchy-aware
if sz < DistConfusion*10:
sz = 1.0
return sz
class LatticeFeature():
"Base object for lattice objects (arrays of placements)"
def __init__(self,obj):
# please, don't override. Override derivedInit instead.
self.Type = "latticeFeature"
prop = "NumElements"
obj.addProperty("App::PropertyInteger",prop,"Lattice","Info: number of placements in the array")
obj.setEditorMode(prop, 1) # set read-only
obj.addProperty("App::PropertyLength","MarkerSize","Lattice","Size of placement markers (set to zero for automatic).")
obj.addProperty("App::PropertyEnumeration","MarkerShape","Lattice","Choose the preferred shape of placement markers.")
obj.MarkerShape = ["tetra-orimarker","paperplane-orimarker"]
obj.MarkerShape = "paperplane-orimarker" #TODO: setting for choosing the default
obj.addProperty("App::PropertyEnumeration","isLattice","Lattice","Sets whether this object should be treated as a lattice by further operations")
obj.isLattice = ['Auto-Off','Auto-On','Force-Off','Force-On']
# Auto-On an Auto-Off can be modified when recomputing. Force values are going to stay.
#Hidden properties affecting some standard behaviours
prop = "SingleByDesign"
obj.addProperty("App::PropertyBool",prop,"Lattice","Makes the element be populated into object's Placement property")
obj.setEditorMode(prop, 2) # set hidden
self.derivedInit(obj)
obj.Proxy = self
def derivedInit(self, obj):
'''for overriding by derived classes'''
pass
def execute(self,obj):
# please, don't override. Override derivedExecute instead.
plms = self.derivedExecute(obj)
if plms is not None:
obj.NumElements = len(plms)
shapes = []
markerSize = obj.MarkerSize
if markerSize < DistConfusion:
markerSize = getMarkerSizeEstimate(plms)
marker = latticeMarkers.getPlacementMarker(scale= markerSize, markerID= obj.MarkerShape)
#FIXME: make hierarchy-aware
if obj.SingleByDesign:
if len(plms) != 1:
latticeExecuter.warning(obj,"Multiple placements are being fed, but object is single by design. Only fisrt placement will be used...")
obj.Shape = marker.copy()
obj.Placement = plms[0]
else:
for plm in plms:
sh = marker.copy()
sh.Placement = plm
shapes.append(sh)
if len(shapes) == 0:
obj.Shape = latticeMarkers.getNullShapeShape(markerSize)
raise ValueError('Lattice object is null') #Feeding empty compounds to FreeCAD seems to cause rendering issues, otherwise it would have been a good idea to output nothing.
sh = Part.makeCompound(shapes)
obj.Shape = sh
if obj.isLattice == 'Auto-Off':
obj.isLattice = 'Auto-On'
else:
# DerivedExecute didn't return anything. Thus we assume it
# has assigned the shape, and thus we don't do anything.
# Moreover, we assume that it is no longer a lattice object, so:
if obj.isLattice == 'Auto-On':
obj.isLattice = 'Auto-Off'
obj.NumElements = len(obj.Shape.childShapes(False,False))
return
def derivedExecute(self,obj):
'''For overriding by derived class. If this returns a list of placements,
it's going to be used to build the shape. If returns None, it is assumed that
derivedExecute has already assigned the shape, and no further actions are needed.
Moreover, None is a signal that the object is not a lattice array, and it will
morph into a non-lattice if isLattice is set to auto'''
return []
def verifyIntegrity(self):
if self.__init__.__func__ is not LatticeFeature.__init__.__func__:
FreeCAD.Console.PrintError("__init__() of lattice object is overridden. Please don't! Fix it!\n")
if self.execute.__func__ is not LatticeFeature.execute.__func__:
FreeCAD.Console.PrintError("execute() of lattice object is overridden. Please don't! Fix it!\n")
def onChanged(self, obj, prop): #prop is a string - name of the property
if prop == 'isLattice':
if obj.ViewObject is not None:
try:
if isObjectLattice(obj):
#obj.ViewObject.DisplayMode = 'Shaded'
obj.ViewObject.ShapeColor = getDefLatticeFaceColor()
obj.ViewObject.Lighting = 'One side'
else:
#obj.ViewObject.DisplayMode = 'Flat Lines'
obj.ViewObject.ShapeColor = getDefShapeColor()
except App.Base.FreeCADError as err:
#these errors pop up while loading project file, apparently because
# viewprovider is up already, but the shape vis mesh wasn't yet
# created. It is safe to ignore them, as DisplayMode is eventually
# restored to the correct values.
#Proper way of dealing with it would have been by testing for
# isRestoring(??), but I failed to find the way to do it.
#--DeepSOIC
pass
class ViewProviderLatticeFeature:
"A View Provider for base lattice object"
def __init__(self,vobj):
'''Don't override. Override derivedInit, please!'''
vobj.Proxy = self
prop = "DontUnhideOnDelete"
vobj.addProperty("App::PropertyBool",prop,"Lattice","Makes the element be populated into object's Placement property")
vobj.setEditorMode(prop, 2) # set hidden
self.derivedInit(vobj)
def derivedInit(self,vobj):
pass
def verifyIntegrity(self):
if self.__init__.__func__ is not ViewProviderLatticeFeature.__init__.__func__:
FreeCAD.Console.PrintError("__init__() of lattice object view provider is overridden. Please don't! Fix it!\n")
def getIcon(self):
return getIconPath("Lattice.svg")
def attach(self, vobj):
self.ViewObject = vobj
self.Object = vobj.Object
def setEdit(self,vobj,mode):
return False
def unsetEdit(self,vobj,mode):
return
def __getstate__(self):
return None
def __setstate__(self,state):
return None
def claimChildren(self):
self.Object.Proxy.verifyIntegrity()
self.verifyIntegrity()
return []
def onDelete(self, feature, subelements): # subelements is a tuple of strings
try:
if hasattr(self.ViewObject,"DontUnhideOnDelete") and self.ViewObject.DontUnhideOnDelete:
pass
else:
children = self.claimChildren()
if children and len(children) > 0:
marker = latticeMarkers
for child in children:
child.ViewObject.show()
except Exception as err:
# catch all exceptions, because we don't want to prevent deletion if something goes wrong
FreeCAD.Console.PrintError("Error in onDelete: " + err.message)
return True
| lgpl-2.1 | -9,035,229,541,461,682,000 | 42.954023 | 191 | 0.575277 | false |
jgoppert/pymola | test/xml_test.py | 1 | 4050 | #!/usr/bin/env python
"""
Test XML backend
"""
import os
import sys
import time
import unittest
import pymoca.parser as mo_parser
from pymoca.backends.xml import analysis, generator, sim_scipy
from pymoca.backends.xml import parser as xml_parser
# get matplotlib from analysis, since logic for plotting
# without display already handled there
from pymoca.backends.xml.analysis import plt
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(TEST_DIR, 'models')
GENERATED_DIR = os.path.join(TEST_DIR, 'generated')
class XmlTest(unittest.TestCase):
"""
Xml tests
"""
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def flush():
sys.stdout.flush()
sys.stdout.flush()
time.sleep(0.01)
def test_noise(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'Noise.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'Noise')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'Noise.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_simple_circuit(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'SimpleCircuit.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'SimpleCircuit')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'SimpleCircuit.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'c', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_bouncing_ball(self):
# generate
with open(os.path.join(MODEL_DIR, 'BouncingBall.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
generator.generate(ast_tree, 'BouncingBall')
# parse
example_file = os.path.join(
MODEL_DIR, 'bouncing-ball.xml')
model = xml_parser.parse_file(example_file, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
model_ode.prop['x']['start'] = 1
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True})
# plot
analysis.plot(data, linewidth=0.5, marker='.', markersize=0.5)
plt.draw()
plt.pause(0.1)
plt.close()
# simulate in soft real-time
do_realtime = False
if do_realtime:
print('\nsoft-realtime simulation')
time_start = time.time()
def realtime_callback(t, x, y, m, p, c):
t_real = time.time() - time_start
lag = t_real - t
if abs(lag) > 0.1:
print("real: {:10f} > sim: {:10f}, lag: {:10f}".format(t_real, t, lag))
elif lag < 0:
time.sleep(-lag)
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True},
user_callback=realtime_callback)
# plt.gca().set_ylim(-2, 2)
self.flush()
| bsd-3-clause | -4,475,261,726,014,995,000 | 27.321678 | 91 | 0.550617 | false |
intel-ctrlsys/actsys | actsys/control/diagnostics/mock_diagnostics/mock_diagnostics.py | 1 | 6050 | #
#Copyright (c) 2017 Intel Corp.
#
"""
Interface for all diagnostic tests plugins.
"""
from control.console_log.mock_console_log.ipmi_mock import MockConsoleLog
from control.diagnostics.diagnostics import Diagnostics
from control.plugin import DeclarePlugin
@DeclarePlugin('mock', 100)
class MockDiagnostics(Diagnostics):
"""This class controls launching the inband diagnostic tests
This needs the input of a file """
mock_provision = False
Test_Status = {}
def __init__(self, **kwargs):
Diagnostics.__init__(self, **kwargs)
self.reboot_true = False
self.img = kwargs['diag_image']
self.old_image = None
self.kargs = kwargs['test_name']
self.old_kargs = None
self.console_log = None
self.device = None
self.bmc = None
self.device_name = None
self.plugin_manager = kwargs['plugin_manager']
self.resource_manager = None
self.provisioner = None
self.power_manager = None
def _verify_provisioning(self, device, img):
self.old_image = self.device.get("image")
self.old_kargs = self.device.get("provisioner_kernel_args")
if self.mock_provision is True:
self.provisioner.add(self.device)
self.provisioner.set_image(self.device, img)
try:
device_list = self.provisioner.list()
img_list = self.provisioner.list_images()
except Exception as ex:
raise Exception(
"Error: Failed to read data from provisioner because {0}. No tests will be run.".format(str(ex)))
if device not in device_list or img not in img_list:
raise Exception(
"Error: Device does not exist in provisioner, provision device to continue")
else:
self.old_image = self.device.get("image")
self.old_kargs = self.device.get("provisioner_kernel_args")
def _provision_image(self, img, args):
try:
self.provisioner.set_image(self.device, img)
self.provisioner.set_kernel_args(self.device, args)
except Exception as ex:
raise Exception("Failed to set image {0} or test {1}. Provisioner returned error {2}. "
"Cannot run diagnostics. ".format(img, args, str(ex)))
def _set_node_state(self, state):
result = self.power_manager.set_device_power_state(state)
if result[self.device_name] is not True:
raise Exception("Failed to power {0} node during provisioning "
"diagnostic image. No tests will be run.".format(state))
def launch_diags(self, device, bmc):
"""launches the diagnostic tests"""
self.device = device
result_list = dict()
self.bmc = bmc
self.device_name = self.device.get("hostname")
if self.device.get("provisioner") is None or self.device.get("resource_controller") is None or \
self.device.get("device_power_control") is None:
raise Exception("You are missing the provisioner or resource_controller or device_power_control key in your"
" config file. Please edit the file and try again.")
self.provisioner = self.plugin_manager.create_instance('provisioner', self.device.get("provisioner"))
self.resource_manager = self.plugin_manager.create_instance('resource_control',
self.device.get("resource_controller"))
power_options = self._pack_options()
self.power_manager = self.plugin_manager.create_instance('power_control',
self.device.get("device_power_control"),
**power_options)
if self.device.get("provisioner") in "mock":
self.mock_provision = True
self._verify_provisioning(self.device_name, self.img)
MockDiagnostics.Test_Status[self.device_name] = 'Running'
# Step 1: Remove node from resource pool
dev_l = list()
dev_l.append(self.device_name)
current_state = self.resource_manager.check_nodes_state(dev_l)[1]
if "idle" in current_state:
result = self.resource_manager.remove_nodes_from_resource_pool(dev_l)
if result[0] != 0:
raise Exception(
"Cannot remove node from resource pool for running diagnostics since {0}".format(result[1]))
else:
raise Exception("Cannot remove node from resource pool. {}".format(current_state))
# start console log
self.console_log = MockConsoleLog(self.device_name, '127.0.0.1', 'user', 'password')
console_log_returned, result = self.console_log.start_log_capture('End of Diagnostics', 'Return Code: ')
result_list[self.device_name] = result
# Step 2: Provision diagnostic image
self._provision_image(self.img, self.kargs)
self._set_node_state('Off')
self._set_node_state('On')
# Step 3: Run tests and parse log for completion
# Step 4: Provision node back to old image
if not self.reboot_true:
self._provision_image(self.old_image, self.old_kargs)
self._set_node_state('Off')
self._set_node_state('On')
# Step 5: Add node back to resource pool
result = self.resource_manager.add_nodes_to_resource_pool(dev_l)
if result[0] != 0:
raise Exception("Failed to add node back to resource pool")
return result_list
def _pack_options(self):
"""Return the node power control options based on the node_name and
configuration object."""
options = {}
dev_l = list()
dev_l.append(self.device)
options['device_list'] = dev_l
options['bmc_list'] = [self.bmc]
options['plugin_manager'] = self.plugin_manager
return options
| apache-2.0 | 1,918,063,083,031,206,000 | 42.52518 | 120 | 0.600496 | false |
PyCon/pycon | symposion/proposals/migrations/0012_auto_20180921_1053.py | 1 | 1034 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0011_proposal_created_updated'),
]
operations = [
migrations.AlterField(
model_name='proposalbase',
name='additional_notes',
field=models.TextField(help_text='Anything else you would like to share with the committee:<br> <b>Please do not submit any personally identifiable information.</b> The initial round of reviews are annonymous, and this field will visible to reviewers.<br> Speaker public speaking experience.<br> Speaker subject matter experience.<br> Have the speaker(s) given this presentation before elsewhere?<br> Links to recordings, slides, blog posts, code, or other material. <br> Specific needs or special requests \u2014 accessibility, audio (will you need to play pre-recorded sound?), or restrictions on when your talk can be scheduled.', blank=True),
),
]
| bsd-3-clause | -448,724,437,121,979,970 | 53.421053 | 658 | 0.713733 | false |
5StevenWu/Coursepy | L05/表达式的yield的用途.py | 1 | 1842 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#此为最终结果 已经可以查找到有关键字的文件
'''
grep -rl 'python' /root
'''
import os
def init(func):
def wrapper(*args,**kwargs):
res = func(*args,**kwargs)
next(res)
return res
return wrapper
@init #生成器调用装饰器
def search(target):
'''取出所有文件路径'''
#更改为生成器
while True:
search_path = yield
g=os.walk(search_path)
for par_dir,_,files in g:
for file in files:
file_abs_path=r'%s\%s'%(par_dir,file)
# print('file_abs_path is ==>: ',file_abs_path)
target.send(file_abs_path)
#g=search()
#d=r'D:\code\py\py3\Coursepy'
#g.send(d)
@init
def opener(target):
while True:
file_abs_path=yield
# print('opener==>: ',file_abs_path)
with open(file_abs_path,encoding='utf-8') as f:
target.send((file_abs_path,f))
# pass
#o=opener()
#o.__next__
#o.send('/2.py')
#g=search(opener()) # 将opener函数传送给search 直接在search函数里直接打开
#g.send(d) 测试发送文件打开
@init
def cat(target):
'''遍历文件内容'''
while True:
file_abs_path,f=yield
for line in f:
#print(line)
# print('file_abs_path & line : ',file_abs_path,line)
target.send((file_abs_path,line))
@init
def grep(target,pattern):
while True:
file_abs_path,line=yield
if pattern in line:
target.send(file_abs_path)
@init
def printer():
while True:
file_abs_path=yield
print(file_abs_path)
#将文件路径发送给函数
xx=r'D:\code\py\py3\Coursepy\L05\a\b\b'
x=r'D:\code\py\py3\Coursepy\L05\a'
gg=search(opener(cat(grep(printer(),'python'))))
#print(gg)
gg.send(x) | apache-2.0 | 4,955,264,629,553,978,000 | 21.689189 | 64 | 0.567938 | false |
asi1024/Othbot | main.py | 1 | 3481 | #!/usr/local/bin/python
import Image
import autopy
import os
import time
startX, spanX, startY, spanY = 30, 150, 130, 40
pX, pY = 180, 225
size, span = 30, 56
boxs = size ** 2
starts = spanX * spanY
# draw
def draw_start(im):
for i in range(spanY):
for j in range(spanX):
im.putpixel ((startX + j, startY + i), (0, 0, 0))
def draw_board(im):
for row in range(8):
for col in range(8):
x, y = pX + col * span, pY + row * span
for i in range(size):
for j in range(size):
im.putpixel ((x + j, y + i), (0, 0, 0))
def draw():
os.system("mkdir -p tmp")
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
draw_start(im)
draw_board(im)
im.show()
#take
def take_start(im):
R, G, B = 0, 0, 0
for i in range(spanY):
for j in range(spanX):
r, g, b = im.getpixel ((startX + j, startY + i))
R, G, B = R + r, G + g, B + b
R, G, B = R / starts, G / starts, B / starts
return (R + G + B > 430)
def take_box(im, row, col):
x, y = pX + col * span, pY + row * span
R, G, B = 0, 0, 0
for i in range(size):
for j in range(size):
r, g, b = im.getpixel ((x + j, y + i))
R, G, B = R + r, G + g, B + b
R, G, B = R / boxs, G / boxs, B / boxs
if G - B > 10:
return 0
elif B > 200:
return 3
elif B > 105:
return 4
elif R > 53:
return 2
else:
return 1
def output_data(dat):
for ary in dat:
for i in ary:
print i,
print ""
def board_data(im):
dat = [[0 for i in range(8)] for j in range(8)]
for i in range(8):
for j in range(8):
dat[i][j] = take_box(im, i, j)
return dat
def run():
os.system("mkdir -p tmp")
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
if (take_start(im)):
autopy.mouse.move(startX + spanX / 2, startY + spanY / 2)
time.sleep(2)
autopy.mouse.click()
return
board = board_data(im)
if board[3][3] == 0:
autopy.mouse.move(300, 500)
autopy.mouse.click()
return
flag1, flag2 = 0, 0
for ary in board:
for i in ary:
if i == 4:
flag1 += 1
if i == 2:
flag2 += 1
if flag1 >= 2 or flag2 >= 2:
time.sleep(5)
print "waiting..."
return
if True:
f = open('tmp/input', 'w')
for ary in board:
for i in ary:
if i == 0:
f.write(".")
elif i <= 2:
f.write("x")
elif i <= 4:
f.write("o")
f.write("\n")
f.close()
os.system("./a.out < tmp/input > tmp/output")
x, y = 0, 0
for line in open('tmp/output', 'r'):
items = line.split(' ')
y = int(items[0])
x = int(items[1])
xx = pX + span / 2 + span * x
yy = pY + span / 2 + span * y
autopy.mouse.move(xx, yy)
os.system("screencapture tmp/sc.png")
im = Image.open("tmp/sc.png")
board2 = board_data(im)
if board == board2:
autopy.mouse.click()
time.sleep(1)
def main():
# draw()
# return
os.system("g++-4.9 --std=c++11 src/othello.cpp")
while True:
time.sleep(1)
run()
main()
| mit | 5,038,916,443,684,973,000 | 24.043165 | 65 | 0.460213 | false |
kdyq007/cmdb-api | lib/ci.py | 1 | 27854 | # -*- coding:utf-8 -*-
import uuid
import time
import datetime
import json
from flask import current_app
from flask import abort
from sqlalchemy import or_
from extensions import db
from extensions import rd
from models.ci import CI
from models.ci_relation import CIRelation
from models.ci_type import CITypeAttribute
from models.ci_type import CITypeCache
from models.ci_type import CITypeSpecCache
from models.history import CIAttributeHistory
from models.attribute import CIAttributeCache
from lib.const import TableMap
from lib.const import type_map
from lib.value import AttributeValueManager
from lib.history import CIAttributeHistoryManger
from lib.history import CIRelationHistoryManager
from lib.query_sql import QUERY_HOSTS_NUM_BY_PRODUCT
from lib.query_sql import QUERY_HOSTS_NUM_BY_BU
from lib.query_sql import QUERY_HOSTS_NUM_BY_PROJECT
from lib.query_sql import QUERY_CIS_BY_IDS
from lib.query_sql import QUERY_CIS_BY_VALUE_TABLE
from tasks.cmdb import ci_cache
from tasks.cmdb import ci_delete
class CIManager(object):
""" manage CI interface
"""
def __init__(self):
pass
def get_ci_by_id(self, ci_id, ret_key="name",
fields=None, need_children=True, use_master=False):
"""@params: `ret_key` is one of 'name', 'id', 'alias'
`fields` is list of attribute name/alias/id
"""
ci = CI.query.get(ci_id) or \
abort(404, "CI {0} is not existed".format(ci_id))
res = dict()
if need_children:
children = self.get_children(ci_id, ret_key=ret_key) # one floor
res.update(children)
ci_type = CITypeCache.get(ci.type_id)
res["ci_type"] = ci_type.type_name
uniq_key = CIAttributeCache.get(ci_type.uniq_id)
if not fields: # fields are all attributes
attr_ids = db.session.query(CITypeAttribute.attr_id).filter_by(
type_id=ci.type_id)
fields = [CIAttributeCache.get(_.attr_id).attr_name
for _ in attr_ids]
if uniq_key.attr_name not in fields:
fields.append(uniq_key.attr_name)
if fields:
value_manager = AttributeValueManager()
_res = value_manager._get_attr_values(
fields, ci_id,
ret_key=ret_key, uniq_key=uniq_key, use_master=use_master)
res.update(_res)
res['_type'] = ci_type.type_id
res['_id'] = ci_id
return res
def get_ci_by_ids(self, ci_id_list, ret_key="name", fields=None):
result = list()
for ci_id in ci_id_list:
res = self.get_ci_by_id(ci_id, ret_key=ret_key, fields=fields)
result.append(res)
return result
def get_children(self, ci_id, ret_key='name', relation_type="contain"):
second_cis = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).filter(or_(
CIRelation.relation_type == relation_type,
CIRelation.relation_type == "deploy"))
second_ci_ids = (second_ci.second_ci_id for second_ci in second_cis)
ci_types = {}
for ci_id in second_ci_ids:
type_id = db.session.query(CI.type_id).filter(
CI.ci_id == ci_id).first().type_id
if type_id not in ci_types:
ci_types[type_id] = [ci_id]
else:
ci_types[type_id].append(ci_id)
res = {}
for type_id in ci_types:
ci_type = CITypeCache.get(type_id)
children = get_cis_by_ids(map(str, ci_types.get(type_id)),
ret_key=ret_key)
res[ci_type.type_name] = children
return res
def get_cis_by_type(self, type_id, ret_key="name", fields="",
page=1, per_page=None):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = db.session.query(CI.ci_id).filter(CI.type_id == type_id)
numfound = cis.count()
cis = cis.offset((page - 1) * per_page).limit(per_page)
res = list()
ci_ids = [str(ci.ci_id) for ci in cis]
if ci_ids:
res = get_cis_by_ids(ci_ids, ret_key, fields)
return numfound, page, res
def ci_is_exist(self, ci_type, unique_key, unique):
table = TableMap(attr_name=unique_key.attr_name).table
unique = db.session.query(table).filter(
table.attr_id == unique_key.attr_id).filter(
table.value == unique).first()
if unique:
return db.session.query(CI).filter(
CI.ci_id == unique.ci_id).first()
def _delete_ci_by_id(self, ci_id):
db.session.query(CI.ci_id).filter(CI.ci_id == ci_id).delete()
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("delete ci is error, {0}".format(str(e)))
def add(self, ci_type_name, exist_policy="replace",
_no_attribute_policy="ignore", **ci_dict):
ci_existed = False
ci_type = CITypeCache.get(ci_type_name) or \
abort(404, "CIType {0} is not existed".format(ci_type_name))
unique_key = CIAttributeCache.get(ci_type.uniq_id) \
or abort(400, 'illegality unique attribute')
unique = ci_dict.get(unique_key.attr_name) \
or abort(400, '{0} missing'.format(unique_key.attr_name))
old_ci = self.ci_is_exist(ci_type, unique_key, unique)
if old_ci is not None:
ci_existed = True
if exist_policy == 'reject':
return abort(400, 'CI is existed')
if old_ci.type_id != ci_type.type_id: # update ci_type
old_ci.type_id = ci_type.type_id
db.session.add(old_ci)
db.session.flush()
ci = old_ci
else:
if exist_policy == 'need':
return abort(404, 'CI {0} not exist'.format(unique))
ci = CI()
ci.type_id = ci_type.type_id
_uuid = uuid.uuid4().hex
ci.uuid = _uuid
ci.created_time = datetime.datetime.now()
db.session.add(ci)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error('add CI error: {0}'.format(str(e)))
return abort(400, 'add CI error')
value_manager = AttributeValueManager()
histories = list()
for p, v in ci_dict.items():
ret, res = value_manager.add_attr_value(
p, v, ci.ci_id, ci_type,
_no_attribute_policy=_no_attribute_policy,
ci_existed=ci_existed)
if not ret:
db.session.rollback()
if not ci_existed:
self.delete(ci.ci_id)
current_app.logger.info(res)
return abort(400, res)
if res is not None:
histories.append(res)
try:
db.session.commit()
except Exception as e:
current_app.logger.error(str(e))
db.session.rollback()
if not ci_existed: # only add
self.delete(ci.ci_id)
return abort(400, "add CI error")
his_manager = CIAttributeHistoryManger()
his_manager.add(ci.ci_id, histories)
ci_cache.apply_async([ci.ci_id], queue="cmdb_async")
return ci.ci_id
def update_unique_value(self, ci_id, args):
ci = self.get_ci_by_id(ci_id, need_children=False)
unique_key = ci.get("unique")
attr = CIAttributeCache.get(unique_key)
table_key = "index_{0}".format(attr.value_type) \
if attr.is_index else attr.value_type
value_table = type_map.get("table").get(table_key)
v = args.get(unique_key)
if value_table and v:
item = db.session.query(value_table).filter(
value_table.ci_id == ci_id).filter(
value_table.attr_id == attr.attr_id).first()
if item:
converter = type_map.get("converter").get(attr.value_type)
try:
item.value = converter(v)
except:
return abort(400, "value is illegal")
db.session.add(item)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(str(e))
return abort(400, "update unique failed")
ci_cache.apply_async([ci_id], queue="cmdb_async")
def delete(self, ci_id):
ci = db.session.query(CI).filter(CI.ci_id == ci_id).first()
if ci is not None:
attrs = db.session.query(CITypeAttribute.attr_id).filter(
CITypeAttribute.type_id == ci.type_id).all()
attr_names = []
for attr in attrs:
attr_names.append(CIAttributeCache.get(attr.attr_id).attr_name)
attr_names = set(attr_names)
for attr_name in attr_names:
Table = TableMap(attr_name=attr_name).table
db.session.query(Table).filter(Table.ci_id == ci_id).delete()
db.session.query(CIRelation).filter(
CIRelation.first_ci_id == ci_id).delete()
db.session.query(CIRelation).filter(
CIRelation.second_ci_id == ci_id).delete()
# db.session.query(CIAttributeHistory).filter(
# CIAttributeHistory.ci_id == ci_id).delete()
db.session.flush()
db.session.delete(ci)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("delete CI error, {0}".format(str(e)))
return abort(400, "delete CI error, {0}".format(str(e)))
# todo: write history
ci_delete.apply_async([ci.ci_id], queue="cmdb_async")
return ci_id
return abort(404, "CI {0} not found".format(ci_id))
def add_heartbeat(self, ci_type, unique):
ci_type = CITypeCache.get(ci_type)
if not ci_type:
return 'error'
uniq_key = CIAttributeCache.get(ci_type.uniq_id)
Table = TableMap(attr_name=uniq_key.attr_name).table
ci_id = db.session.query(Table.ci_id).filter(
Table.attr_id == uniq_key.attr_id).filter(
Table.value == unique).first()
if ci_id is None:
return 'error'
ci = db.session.query(CI).filter(CI.ci_id == ci_id.ci_id).first()
if ci is None:
return 'error'
ci.heartbeat = datetime.datetime.now()
db.session.add(ci)
db.session.commit()
return "ok"
def get_heartbeat(self, page, type_id, agent_status=None):
query = db.session.query(CI.ci_id, CI.heartbeat)
expire = datetime.datetime.now() - datetime.timedelta(minutes=72)
if type_id:
query = query.filter(CI.type_id == type_id)
else:
query = query.filter(db.or_(CI.type_id == 7, CI.type_id == 8))
if agent_status == -1:
query = query.filter(CI.heartbeat == None)
elif agent_status == 0:
query = query.filter(CI.heartbeat <= expire)
elif agent_status == 1:
query = query.filter(CI.heartbeat > expire)
numfound = query.count()
per_page_count = current_app.config.get("DEFAULT_PAGE_COUNT")
cis = query.offset((page - 1) * per_page_count).limit(
per_page_count).all()
ci_ids = [ci.ci_id for ci in cis]
heartbeat_dict = {}
for ci in cis:
if agent_status is not None:
heartbeat_dict[ci.ci_id] = agent_status
else:
if ci.heartbeat is None:
heartbeat_dict[ci.ci_id] = -1
elif ci.heartbeat <= expire:
heartbeat_dict[ci.ci_id] = 0
else:
heartbeat_dict[ci.ci_id] = 1
current_app.logger.debug(heartbeat_dict)
ci_ids = map(str, ci_ids)
res = get_cis_by_ids(ci_ids, fields=["hostname", "private_ip"])
result = [(i.get("hostname"), i.get("private_ip")[0], i.get("ci_type"),
heartbeat_dict.get(i.get("_id"))) for i in res
if i.get("private_ip")]
return numfound, result
class CIRelationManager(object):
"""
manage relation between CIs
"""
def __init__(self):
pass
@property
def relation_types(self):
""" all CIType relation types
"""
from lib.const import CI_RELATION_TYPES
return CI_RELATION_TYPES
def get_second_cis(self, first_ci, relation_type="contain",
page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
second_cis = db.session.query(
CI.ci_id).join(
CIRelation, CIRelation.second_ci_id == CI.ci_id).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.relation_type == relation_type)
if kwargs: # special for devices
second_cis = self._query_wrap_for_device(second_cis, **kwargs)
numfound = second_cis.count()
second_cis = second_cis.offset(
(page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.ci_id) for son in second_cis]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def get_grandsons(self, ci_id, page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
children = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).subquery()
grandsons = db.session.query(CIRelation.second_ci_id).join(
children,
children.c.second_ci_id == CIRelation.first_ci_id).subquery()
grandsons = db.session.query(CI.ci_id).join(
grandsons, grandsons.c.second_ci_id == CI.ci_id)
if kwargs:
grandsons = self._query_wrap_for_device(grandsons, **kwargs)
numfound = grandsons.count()
grandsons = grandsons.offset(
(page - 1) * per_page).limit(per_page).all()
if not grandsons:
return 0, 0, []
ci_ids = [str(son.ci_id) for son in grandsons]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def _sort_handler(self, sort_by, query_sql):
if sort_by.startswith("+"):
sort_type = "asc"
sort_by = sort_by[1:]
elif sort_by.startswith("-"):
sort_type = "desc"
sort_by = sort_by[1:]
else:
sort_type = "asc"
attr = CIAttributeCache.get(sort_by)
if attr is None:
return query_sql
attr_id = attr.attr_id
Table = TableMap(attr_name=sort_by).table
CI_table = query_sql.subquery()
query_sql = db.session.query(CI_table.c.ci_id, Table.value).join(
Table, Table.ci_id == CI_table.c.ci_id).filter(
Table.attr_id == attr_id).order_by(
getattr(Table.value, sort_type)())
return query_sql
def _query_wrap_for_device(self, query_sql, **kwargs):
_type = kwargs.pop("_type", False) or kwargs.pop("type", False) \
or kwargs.pop("ci_type", False)
if _type:
ci_type = CITypeCache.get(_type)
if ci_type is None:
return
query_sql = query_sql.filter(CI.type_id == ci_type.type_id)
for k, v in kwargs.iteritems():
attr = CIAttributeCache.get(k)
if attr is None:
continue
Table = TableMap(attr_name=k).table
CI_table = query_sql.subquery()
query_sql = db.session.query(CI_table.c.ci_id).join(
Table, Table.ci_id == CI_table.c.ci_id).filter(
Table.attr_id == attr.attr_id).filter(
Table.value.ilike(v.replace("*", "%")))
current_app.logger.debug(query_sql)
sort_by = kwargs.pop("sort", False)
if sort_by:
query_sql = self._sort_handler(sort_by, query_sql)
return query_sql
def get_great_grandsons(self, ci_id, page=1, per_page=None, **kwargs):
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
children = db.session.query(CIRelation.second_ci_id).filter(
CIRelation.first_ci_id == ci_id).subquery()
grandsons = db.session.query(CIRelation.second_ci_id).join(
children,
children.c.second_ci_id == CIRelation.first_ci_id).subquery()
great_grandsons = db.session.query(CIRelation.second_ci_id).join(
grandsons,
grandsons.c.second_ci_id == CIRelation.first_ci_id).subquery()
great_grandsons = db.session.query(CI.ci_id).join(
great_grandsons, great_grandsons.c.second_ci_id == CI.ci_id)
if kwargs:
great_grandsons = self._query_wrap_for_device(
great_grandsons, **kwargs)
if great_grandsons is None:
return 0, 0, []
numfound = great_grandsons.count()
great_grandsons = great_grandsons.offset(
(page - 1) * per_page).limit(per_page).all()
ci_ids = [str(son.ci_id) for son in great_grandsons]
total = len(ci_ids)
result = get_cis_by_ids(ci_ids)
return numfound, total, result
def get_first_cis(self, second_ci, relation_type="contain",
page=1, per_page=None):
"""only for CI Type
"""
if per_page is None:
per_page = current_app.config.get("DEFAULT_PAGE_COUNT")
first_cis = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == second_ci).filter(
CIRelation.relation_type == relation_type)
numfound = first_cis.count()
first_cis = first_cis.offset(
(page - 1) * per_page).limit(per_page).all()
result = []
first_ci_ids = [str(first_ci.first_ci_id) for first_ci in first_cis]
total = len(first_ci_ids)
if first_ci_ids:
result = get_cis_by_ids(first_ci_ids)
return numfound, total, result
def get_grandfather(self, ci_id, relation_type="contain"):
"""only for CI Type
"""
grandfather = db.session.query(CIRelation.first_ci_id).filter(
CIRelation.second_ci_id.in_(db.session.query(
CIRelation.first_ci_id).filter(
CIRelation.second_ci_id == ci_id).filter(
CIRelation.relation_type == relation_type))).filter(
CIRelation.relation_type == relation_type).first()
if grandfather:
return CIManager().get_ci_by_id(grandfather.first_ci_id,
need_children=False)
def add(self, first_ci, second_ci, more=None, relation_type="contain"):
ci = db.session.query(CI.ci_id).filter(CI.ci_id == first_ci).first()
if ci is None:
return abort(404, "first_ci {0} is not existed".format(first_ci))
c = db.session.query(CI.ci_id).filter(CI.ci_id == second_ci).first()
if c is None:
return abort(404, "second_ci {0} is not existed".format(
second_ci))
existed = db.session.query(CIRelation.cr_id).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.second_ci_id == second_ci).first()
if existed is not None:
return existed.cr_id
cr = CIRelation()
cr.first_ci_id = first_ci
cr.second_ci_id = second_ci
if more is not None:
cr.more = more
cr.relation_type = relation_type
db.session.add(cr)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error("add CIRelation is error, {0}".format(
str(e)))
return abort(400, "add CIRelation is error, {0}".format(str(e)))
# write history
his_manager = CIRelationHistoryManager()
his_manager.add(cr.cr_id, cr.first_ci_id, cr.second_ci_id,
relation_type, operate_type="add")
return cr.cr_id
def delete(self, cr_id):
cr = db.session.query(CIRelation).filter(
CIRelation.cr_id == cr_id).first()
cr_id = cr.cr_id
first_ci = cr.first_ci_id
second_ci = cr.second_ci_id
if cr is not None:
db.session.delete(cr)
try:
db.session.commit()
except Exception as e:
db.session.rollback()
current_app.logger.error(
"delete CIRelation is error, {0}".format(str(e)))
return abort(
400, "delete CIRelation is error, {0}".format(str(e)))
his_manager = CIRelationHistoryManager()
his_manager.add(cr_id, first_ci, second_ci,
cr.relation_type, operate_type="delete")
return True
return abort(404, "CI relation is not existed")
def delete_2(self, first_ci, second_ci):
cr = db.session.query(CIRelation).filter(
CIRelation.first_ci_id == first_ci).filter(
CIRelation.second_ci_id == second_ci).first()
return self.delete(cr.cr_id)
class HostNumStatis(object):
def __init__(self):
pass
def get_hosts_by_project(self, project_id_list=None):
res = {}
if not project_id_list:
project = CITypeCache.get("project")
projects = db.session.query(CI.ci_id).filter(
CI.type_id == project.type_id).all()
project_id_list = (project.ci_id for project in projects)
project_id_list = map(str, project_id_list)
project_ids = ",".join(project_id_list)
nums = db.session.execute(QUERY_HOSTS_NUM_BY_PROJECT.format(
"".join(["(", project_ids, ")"]))).fetchall()
if nums:
for ci_id in project_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_hosts_by_product(self, product_id_list=None):
res = {}
if not product_id_list:
product = CITypeCache.get("product")
products = db.session.query(CI.ci_id).filter(
CI.type_id == product.type_id).all()
product_id_list = (product.ci_id for product in products)
product_id_list = map(str, product_id_list)
product_ids = ",".join(product_id_list)
nums = db.session.execute(QUERY_HOSTS_NUM_BY_PRODUCT.format(
"".join(["(", product_ids, ")"]))).fetchall()
if nums:
for ci_id in product_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_hosts_by_bu(self, bu_id_list=None):
res = {}
if not bu_id_list:
bu = CITypeCache.get("bu")
bus = db.session.query(CI.ci_id).filter(
CI.type_id == bu.type_id).all()
bu_id_list = (bu.ci_id for bu in bus)
bu_id_list = map(str, bu_id_list)
bu_ids = ",".join(bu_id_list)
current_app.logger.debug(QUERY_HOSTS_NUM_BY_BU.format(
"".join(["(", bu_ids, ")"])))
if not bu_ids:
return res
nums = db.session.execute(
QUERY_HOSTS_NUM_BY_BU.format(
"".join(["(", bu_ids, ")"]))).fetchall()
if nums:
for ci_id in bu_id_list:
res[int(ci_id)] = 0
for ci_id, num in nums:
res[ci_id] = num
return res
def get_cis_by_ids(ci_ids, ret_key="name", fields="", value_tables=None):
""" argument ci_ids are string list of CI instance ID, eg. ['1', '2']
"""
if not ci_ids:
return []
start = time.time()
ci_id_tuple = tuple(map(int, ci_ids))
res = rd.get(ci_id_tuple)
if res is not None and None not in res and ret_key == "name":
res = map(json.loads, res)
if not fields:
return res
else:
_res = []
for d in res:
_d = dict()
_d["_id"], _d["_type"] = d.get("_id"), d.get("_type")
_d["ci_type"] = d.get("ci_type")
for field in fields:
_d[field] = d.get(field)
_res.append(_d)
current_app.logger.debug("filter time: %s" % (time.time() - start))
return _res
current_app.logger.warning("cache not hit...............")
if not fields:
_fields = ""
else:
_fields = list()
for field in fields:
attr = CIAttributeCache.get(field)
if attr is not None:
_fields.append(str(attr.attr_id))
_fields = "WHERE A.attr_id in ({0})".format(",".join(_fields))
ci_ids = ",".join(ci_ids)
if value_tables is None:
value_tables = type_map["table_name"].values()
current_app.logger.debug(value_tables)
value_sql = " UNION ".join([QUERY_CIS_BY_VALUE_TABLE.format(value_table,
ci_ids)
for value_table in value_tables])
query_sql = QUERY_CIS_BY_IDS.format(ci_ids, _fields, value_sql)
current_app.logger.debug(query_sql)
start = time.time()
hosts = db.session.execute(query_sql).fetchall()
current_app.logger.info("get cis time is: {0}".format(
time.time() - start))
ci_list = set()
res = list()
ci_dict = dict()
start = time.time()
for ci_id, type_id, attr_id, attr_name, \
attr_alias, value, value_type, is_multivalue in hosts:
if ci_id not in ci_list:
ci_dict = dict()
ci_type = CITypeSpecCache.get(type_id)
ci_dict["_id"] = ci_id
ci_dict["_type"] = type_id
ci_dict["ci_type"] = ci_type.type_name
ci_dict["ci_type_alias"] = ci_type.type_alias
ci_list.add(ci_id)
res.append(ci_dict)
if ret_key == "name":
if is_multivalue:
if isinstance(ci_dict.get(attr_name), list):
ci_dict[attr_name].append(value)
else:
ci_dict[attr_name] = [value]
else:
ci_dict[attr_name] = value
elif ret_key == "alias":
if is_multivalue:
if isinstance(ci_dict.get(attr_alias), list):
ci_dict[attr_alias].append(value)
else:
ci_dict[attr_alias] = [value]
else:
ci_dict[attr_alias] = value
elif ret_key == "id":
if is_multivalue:
if isinstance(ci_dict.get(attr_id), list):
ci_dict[attr_id].append(value)
else:
ci_dict[attr_id] = [value]
else:
ci_dict[attr_id] = value
current_app.logger.debug("result parser time is: {0}".format(
time.time() - start))
return res
| gpl-2.0 | 297,984,128,190,413,200 | 38.565341 | 79 | 0.539348 | false |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/fixes.py | 1 | 29568 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
from __future__ import division
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg, xrange
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in inspect.getargspec(filtfilt)[0]:
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| bsd-3-clause | -7,662,654,578,994,901,000 | 32.297297 | 79 | 0.616883 | false |
polyaxon/polyaxon | core/polyaxon/tracking/contrib/ignite.py | 1 | 1104 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.tracking import Run
try:
from ignite.contrib.handlers.polyaxon_logger import (
PolyaxonLogger as _PolyaxonLogger,
)
except ImportError:
raise PolyaxonClientException("ignite is required to use PolyaxonCallback")
class PolyaxonLogger(_PolyaxonLogger):
def __init__(self, *args, **kwargs):
self.experiment = kwargs.get("run", Run(*args, **kwargs))
# alias
PolyaxonIgniteLogger = PolyaxonLogger
| apache-2.0 | -7,285,680,150,348,896,000 | 31.470588 | 79 | 0.75 | false |
sysopfb/Malware_Scripts | wannacry/decode_dll.py | 1 | 1118 | #For decoded t.wnry file from sample: ed01ebfbc9eb5bbea545af4d01bf5f1071661840480439c6e5babe8e080e41aa
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
from Crypto import Random
from Crypto.Hash import SHA
import sys
import struct
import binascii
import hashlib
def decode_rsa(privkey, data):
rsa_key = RSA.importKey(privkey)
cipher = PKCS1_v1_5.new(rsa_key)
sentinel = Random.new().read(16)
d = cipher.decrypt(data[::-1],sentinel)
return d
if __name__ == "__main__":
data = open(sys.argv[1],'rb').read()
privkey = open('privkey.der').read()
hdr = data[:8]
data = data[8:]
size = struct.unpack_from('<I', data)[0]
data = data[4:]
blob1 = data[:size]
data = data[size:]
(id, size) = struct.unpack_from('<IQ', data)
data = data[12:]
blob2 = data[:size]
data = data[size:]
if data != '':
print("More data found!")
key = decode_rsa(privkey, blob1)
aes = AES.new(key, AES.MODE_CBC, '\x00'*16)
decoded = aes.decrypt(blob2)
sha256 = hashlib.sha256(decoded).hexdigest()
open(sha256, 'wb').write(decoded)
print("Wrote decoded file to: "+sha256)
| mit | -1,457,900,662,308,675,000 | 25.619048 | 102 | 0.69678 | false |
BenjaminEHowe/library-api | library_api/implementations/enterprise.py | 1 | 15276 | import re
import requests
from ..library import NotAuthenticatedError
class library:
def __init__(self, url):
""" Initialises the library. """
self.session = requests.Session()
self.authenticated = False;
return
def login(self, userid, password):
""" Authenticates session for future use. """
# find the "formdata" that we need to submit with the login
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/patronlogin/')
formdata = re.search('name="t:ac" type="hidden"></input><input value="(.*?)" name="t:formdata" type="hidden">', r.text).group(1)
# log in
postData = {
'j_username': userid,
'j_password': password,
't:formdata': formdata }
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/patronlogin.loginpageform/LIVE?&t:ac=$N', postData)
if "new RedirectAfterLogin('null');" in r.text:
# if we get redirected, the login was successful!
self.authenticated = True
return True
else:
return False
def search(self, query=None, title=None, author=None, ean=None):
""" Performs a search, returning a (potentially empty) list of
items. Optionally, search only within the title or author or
EAN / ISBN-13 attributes. """
# perform the search
if query:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?qu=' + query)
elif title:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CTITLE%7C%7C%7CTitle&qu=' + title)
elif author:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CAUTHOR%7C%7C%7CAuthor&qu=' + author)
elif ean:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CISBN%7C%7C%7CISBN&qu=' + ean)
else:
raise ValueError
results = []
# interpret the results
if 'Holds:' in result.text:
# if we got to the page for a single item
# item type, checks are orders so the most likely check to
# pass is done first
if '<div class="displayElementText GENRE_TERM"><a title="Electronic books" alt="Electronic books"' in result.text:
# ebooks also have ISBN so we have to check this first
itemtype = 'ebook'
elif re.search('Loan Type.{0,350}?PERIODICAL', result.text, re.DOTALL):
itemtype = 'journal'
elif '<div class="displayElementText GENRE_TERM"><a title="Electronic journals." alt="Electronic journals."' in result.text:
itemtype = 'ejournal'
elif re.search('Call Number.{0,450}?THESIS(?: -|--)', result.text, re.DOTALL):
# thesis
itemtype = 'academic_paper'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[cartographic material\]', result.text):
# map / atlas
itemtype = 'map'
elif 'Electronic access:' in result.text:
# electronic resources / cd-rom
itemtype = 'electronic'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[sound recording\]', result.text):
# sound recording
itemtype = 'audio'
elif re.search('<div class="displayElementText TITLE">.{0,100}?\[videorecording\]', result.text):
# dvd / video casette / visual materials
itemtype = 'video'
elif 'ISBN:' in result.text:
# if it's nothing else and it has an ISBN it's probably a book
itemtype = 'book'
else:
# archive, object, kit
itemtype = 'other'
# find an ID number to use
identifier = None
if itemtype == 'journal' or itemtype == 'ejournal':
try:
identifier = re.search('<div class="displayElementText ISSN_LOCAL">(\d{4}-\d{4})', result.text).group(1)
except AttributeError:
pass
elif itemtype == 'academic_paper':
identifier = re.search('Call Number.{0,4500}?THESIS(?: -|--|-)(R\d{0,6})', result.text, re.DOTALL).group(1)
else:
try:
identifier = re.search('<div class="displayElementText LOCAL_ISBN">(\d{13})', result.text).group(1)
except AttributeError:
# no ISBN-13 / EAN recorded, drop to ISBN-10
try:
identifier = re.search('<div class="displayElementText LOCAL_ISBN">(\d{10})', result.text).group(1)
except AttributeError:
pass
if identifier == None: # if we couldn't find an ISBN / ISSN
identifier = re.search("'ent://SD_ILS/\d{0,8}?/SD_ILS:(\d{0,10}?)'", result.text).group(1)
# title
fulltitle = re.search('<div class="displayElementText TITLE">(.*?)<\/div>', result.text).group(1)
try:
title = re.search('(.*?)(?: :| \/|\.)', fulltitle).group(1)
except AttributeError:
title = fulltitle # if the full title is also the normal title
if ' / ' in fulltitle:
# if the author has been embedded in the title use that
# as it's generally more accurate
author = re.search('.*? (?:\/ by|\/) (.*?)(?: ;|\.$|$)', fulltitle).group(1).split(', ')
elif 'Personal author:' in result.text:
# the personal author generally only includes the first
# author, but it's the best we've got. it also sometimes
# includes the years the author was alive, which is
# annoying
match = re.search('<div class="displayElementText PERSONAL_AUTHOR"><a title="(.*?), (.*?,|.*?\.).*?" alt=', result.text)
first = match.group(2).rstrip(',')
second = match.group(1)
author = [first + ' ' + second]
elif 'Added corporate author' in result.text:
corporate_authors = "".join(re.findall('<div class="displayElementText ADDED_CORPORATE_AUTHOR">(.*?)</div>', result.text))
author = re.findall('<a .*?>(.*?)</a>', corporate_authors)
else:
# not much else we can do other than return unknown
author = ['unknown']
results.append( {
'id': identifier,
'title': title,
'author': author,
'type': itemtype,
} )
elif 'results found' in result.text:
# if we got to a page with lots of results
number_of_results = re.search('(\d{0,7}?) results found', result.text).group(1)
#if number_of_results > 120:
# cap at 120 otherwise getting results could be slow
# number_of_results = 120
print (result.text)
while len(results) < int(number_of_results):
types = re.findall('<div class="displayElementText highlightMe UR_FORMAT"> (.*?)</div>', result.text)
for i in range(len(types)):
# title
fulltitle = re.search('<a id="detailLink' + str(i) + '" title="(.*?)"', result.text).group(1)
print (str(i))
print(fulltitle)
try:
title = re.search('(.*?)(?: :| \/|\.)', fulltitle).group(1)
except AttributeError:
pass # if the full title is also the normal title
if ' / ' in fulltitle:
# if the author has been embedded in the title use that
# as it's generally more accurate
author = re.search('.*? (?:\/ by|\/) (.*?)(?: ;|\.$|$)', fulltitle).group(1).split(', ')
else:
author = ['unknown']
# type
if types[i] == 'Thesis':
itemtype = 'academic_paper'
elif types[i] == 'Sound disc':
itemtype = 'audio'
elif types[i] == 'Book':
if '[electronic resource]' in title:
itemtype = 'ebook'
else:
itemtype = 'book'
elif types[i] == 'Electronic Resources' or types[i] == 'CD-ROM':
itemtype = 'electronic'
elif types[i] == 'Journal':
if '[electronic resource]' in title:
itemtype = 'ejournal'
else:
itemtype = 'journal'
elif types[i] == 'Maps' or types[i] == 'Atlas':
itemtype = 'map'
elif types[i] == 'Printed music':
itemtype = 'audio'
elif types[i] == 'DVD' or types[i] == 'Video casette' or types[i] == 'Visual Materials':
itemtype = 'video'
else:
itemtype = 'other'
# identifier
identifier = None
try:
identifier = re.search('<div id="hitlist' + str(i) + '_ISBN"><div class="ISBN_value">(\d{13})', result.text).group(1)
except AttributeError:
try:
identifier = re.search('<div id="hitlist' + str(i) + '_ISSN"><div class="ISSN_value">(\d\d\d\d-\d\d\d\d)', result.text).group(1)
except AttributeError:
pass
if identifier == None:
identifier = re.search('(\d{0,10})" type="hidden" id="da' + str(i) + '"', result.text).group(1)
results.append( {
'id': identifier,
'title': title,
'author': author,
'type': itemtype,
} )
if len(results) % 12 == 0: # we'll have run out of results, get more
if query:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?qu=' + query + '&rw=' + str(len(results)))
elif title:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CTITLE%7C%7C%7CTitle&qu=' + title + '&rw=' + str(len(results)))
elif author:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CAUTHOR%7C%7C%7CAuthor&qu=' + author + '&rw=' + str(len(results)))
elif ean:
result = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/results?rt=false%7C%7C%7CISBN%7C%7C%7CISBN&qu=' + ean + '&rw=' + str(len(results)))
print (result.text)
return results
def list_items(self):
""" Returns a list of items the borrower has (currently formatted
as a list of enterprise IDs). """
if not self.authenticated:
raise NotAuthenticatedError
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account?')
# for some insane reason it's nessesary to get the holds to get an ID to get checkouts...
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.holdsajax/true?', {'t:zoneid': 'holdsAjax'}, headers={'X-Requested-With': 'XMLHttpRequest'})
zoneid = re.search("<div class='hidden t-zone' id='(.*?)'>", r.text).group(1)
# request list of books checked out
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.finesandcheckouts/-1/-1/$B/0/true?', {'t:zoneid': zoneid}, headers={'X-Requested-With': 'XMLHttpRequest'})
books = re.findall('<span>([X\d]{10})<\\\/span>', r.text)
return books
def renew_all(self):
r = self.session.get('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account?')
# for some insane reason it's nessesary to get the holds to get an ID to get checkouts...
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.holdsajax/true?', {'t:zoneid': 'holdsAjax'}, headers={'X-Requested-With': 'XMLHttpRequest'})
zoneid = re.search("<div class='hidden t-zone' id='(.*?)'>", r.text).group(1)
# request list of books checked out
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.finesandcheckouts/-1/-1/$B/0/true?', {'t:zoneid': zoneid}, headers={'X-Requested-With': 'XMLHttpRequest'})
items = self.list_items()
numberOfItems = len(items)
formdata = re.search("<div class='t-invisible'><input value='(.*?)' name='t:formdata' type='hidden'>", r.text).group(1)
listSubmitId = re.search("<input value='submit' class='hidden' id='(.*?)'", r.text).group(1)
# renew items
postData = {
't:formdata': formdata,
't:submit': '["' + listSubmitId + '[","myCheckouts_checkoutslist_submit"]',
't:zoneid': 'checkoutItemsZone'}
for i in range(numberOfItems):
if i == 0: # special case
postData['checkbox'] = 'on'
else:
postData['checkbox_' + str(i-1)] = 'on'
r = self.session.post('https://rdg.ent.sirsidynix.net.uk/client/en_GB/main/search/account.checkouts.checkoutslist.form?pc=%7B%22checkoutsList%22%3A%22%22%7D', postData, headers={'X-Requested-With': 'XMLHttpRequest'})
renewalStatus = {}
for item in items:
# check it renewed successfully
if re.search(item + "<\\\/span><br/><span class='checkoutsRenewed'>Renewal succeeded.<\\\/span>", r.text):
renewalStatus[item] = [True]
else:
renewalStatus[item] = [False]
# fix this for "item recalled"
dueDateMatch = re.search(item + ".*?class='checkoutsDueDate'>(\d\d)\/(\d\d)\/(\d\d)<\\\/td>", r.text)
dueDate = '20' + dueDateMatch.group(3) + '-' + dueDateMatch.group(2) + '-' + dueDateMatch.group(1)
renewalStatus[item].append(dueDate)
return renewalStatus
| mit | -5,243,826,327,986,438,000 | 53.752688 | 224 | 0.515449 | false |
kyoshino/bedrock | bedrock/releasenotes/tests/test_models.py | 1 | 6304 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from itertools import chain
from django.core.cache import caches
from django.test.utils import override_settings
from mock import call, patch
from pathlib2 import Path
from bedrock.mozorg.tests import TestCase
from bedrock.releasenotes import models
RELEASES_PATH = str(Path(__file__).parent)
release_cache = caches['release-notes']
@patch('bedrock.releasenotes.models.reverse')
class TestReleaseNotesURL(TestCase):
def test_aurora_android_releasenotes_url(self, mock_reverse):
"""
Should return the results of reverse with the correct args
"""
release = models.ProductRelease(channel='Aurora', version='42.0a2', product='Firefox for Android')
assert release.get_absolute_url() == mock_reverse.return_value
mock_reverse.assert_called_with('firefox.android.releasenotes', args=['42.0a2', 'aurora'])
def test_desktop_releasenotes_url(self, mock_reverse):
"""
Should return the results of reverse with the correct args
"""
release = models.ProductRelease(version='42.0', product='Firefox')
assert release.get_absolute_url() == mock_reverse.return_value
mock_reverse.assert_called_with('firefox.desktop.releasenotes', args=['42.0', 'release'])
@override_settings(RELEASE_NOTES_PATH=RELEASES_PATH, DEV=False)
class TestReleaseModel(TestCase):
def setUp(self):
models.ProductRelease.objects.refresh()
release_cache.clear()
def test_release_major_version(self):
rel = models.get_release('firefox', '57.0a1')
assert rel.major_version == '57'
def test_get_bug_search_url(self):
rel = models.get_release('firefox', '57.0a1')
assert '=Firefox%2057&' in rel.get_bug_search_url()
rel.bug_search_url = 'custom url'
assert 'custom url' == rel.get_bug_search_url()
def test_equivalent_release_for_product(self):
"""Based on the test files the equivalent release for 56 should be 56.0.2"""
rel = models.get_release('firefox', '56.0', 'release')
android = rel.equivalent_release_for_product('Firefox for Android')
assert android.version == '56.0.2'
assert android.product == 'Firefox for Android'
def test_equivalent_release_for_product_none_match(self):
rel = models.get_release('firefox', '45.0esr')
android = rel.equivalent_release_for_product('Firefox for Android')
assert android is None
def test_note_fixed_in_release(self):
rel = models.get_release('firefox', '55.0a1')
note = rel.notes[11]
with self.activate('en-US'):
assert note.fixed_in_release.get_absolute_url() == '/en-US/firefox/55.0a1/releasenotes/'
def test_field_processors(self):
rel = models.get_release('firefox', '57.0a1')
# datetime conversion
assert rel.created.year == 2017
# datetime conversion
assert rel.modified.year == 2017
# date conversion
assert rel.release_date.year == 2017
# markdown
assert rel.system_requirements.startswith('<h2 id="windows">Windows</h2>')
# version
assert rel.version_obj.major == 57
# notes
note = rel.notes[0]
# datetime conversion
assert note.created.year == 2017
# datetime conversion
assert note.modified.year == 2017
# markdown
assert note.note.startswith('<p>Firefox Nightly')
assert note.id == 787203
@override_settings(DEV=False)
def test_is_public_query(self):
"""Should not return the release value when DEV is false.
Should also only include public notes."""
assert models.get_release('firefox for android', '56.0.3') is None
rel = models.get_release('firefox', '57.0a1')
assert len(rel.notes) == 4
@override_settings(DEV=True)
def test_is_public_field_processor_dev_true(self):
"""Should always be true when DEV is true."""
models.get_release('firefox for android', '56.0.3')
rel = models.get_release('firefox', '57.0a1')
assert len(rel.notes) == 6
@patch.object(models.ProductRelease, 'objects')
class TestGetRelease(TestCase):
def setUp(self):
release_cache.clear()
def test_get_release(self, manager_mock):
manager_mock.product().get.return_value = 'dude is released'
assert models.get_release('Firefox', '57.0') == 'dude is released'
manager_mock.product.assert_called_with('Firefox', models.ProductRelease.CHANNELS[0], '57.0', False)
def test_get_release_esr(self, manager_mock):
manager_mock.product().get.return_value = 'dude is released'
assert models.get_release('Firefox Extended Support Release', '51.0') == 'dude is released'
manager_mock.product.assert_called_with('Firefox Extended Support Release', 'esr', '51.0', False)
def test_get_release_none_match(self, manager_mock):
"""Make sure the proper exception is raised if no file matches the query"""
manager_mock.product().get.side_effect = models.ProductRelease.DoesNotExist
assert models.get_release('Firefox', '57.0') is None
expected_calls = chain.from_iterable(
(call('Firefox', ch, '57.0', False), call().get()) for ch in models.ProductRelease.CHANNELS)
manager_mock.product.assert_has_calls(expected_calls)
@override_settings(RELEASE_NOTES_PATH=RELEASES_PATH, DEV=False)
class TestGetLatestRelease(TestCase):
def setUp(self):
models.ProductRelease.objects.refresh()
release_cache.clear()
def test_latest_release(self):
correct_release = models.get_release('firefox for android', '56.0.2')
assert models.get_latest_release('firefox for android', 'release') == correct_release
def test_non_public_release_not_duped(self):
# refresh again
models.ProductRelease.objects.refresh()
release_cache.clear()
# non public release
# should NOT raise multiple objects error
assert models.get_release('firefox for android', '56.0.3', include_drafts=True)
| mpl-2.0 | -4,550,439,496,601,802,000 | 39.670968 | 108 | 0.663071 | false |
adam900710/btrfs-progs | libbtrfsutil/python/setup.py | 1 | 2883 | #!/usr/bin/env python3
# Copyright (C) 2018 Facebook
#
# This file is part of libbtrfsutil.
#
# libbtrfsutil is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libbtrfsutil is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with libbtrfsutil. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import os.path
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import subprocess
def get_version():
f = open('../../VERSION', 'r')
version = f.readline().strip()
f.close()
return ".".join(version[1:].split('.'))
def out_of_date(dependencies, target):
dependency_mtimes = [os.path.getmtime(dependency) for dependency in dependencies]
try:
target_mtime = os.path.getmtime(target)
except OSError:
return True
return any(dependency_mtime >= target_mtime for dependency_mtime in dependency_mtimes)
def gen_constants():
with open('../btrfsutil.h', 'r') as f:
btrfsutil_h = f.read()
constants = re.findall(
r'^\s*(BTRFS_UTIL_ERROR_[a-zA-Z0-9_]+)',
btrfsutil_h, flags=re.MULTILINE)
with open('constants.c', 'w') as f:
f.write("""\
#include <btrfsutil.h>
#include "btrfsutilpy.h"
void add_module_constants(PyObject *m)
{
""")
for constant in constants:
assert constant.startswith('BTRFS_UTIL_')
name = constant[len('BTRFS_UTIL_'):]
f.write('\tPyModule_AddIntConstant(m, "{}", {});\n'.format(name, constant))
f.write("""\
}
""")
class my_build_ext(build_ext):
def run(self):
if out_of_date(['../btrfsutil.h'], 'constants.c'):
try:
gen_constants()
except Exception as e:
try:
os.remove('constants.c')
except OSError:
pass
raise e
super().run()
module = Extension(
name='btrfsutil',
sources=[
'constants.c',
'error.c',
'filesystem.c',
'module.c',
'qgroup.c',
'subvolume.c',
],
include_dirs=['..'],
library_dirs=['../..'],
libraries=['btrfsutil'],
)
setup(
name='btrfsutil',
version=get_version(),
description='Library for managing Btrfs filesystems',
url='https://github.com/kdave/btrfs-progs',
license='LGPLv3',
cmdclass={'build_ext': my_build_ext},
ext_modules=[module],
)
| gpl-2.0 | -551,273,148,070,264,200 | 26.198113 | 90 | 0.6188 | false |
dragondjf/PFramer | objbrowser/app.py | 1 | 7811 | """ Module for IPython event loop integration.
Two things are handled by this module .
1) Creating the QApplication instance (or getting the singleton if it already
exists). Also no difference between IPython and the regular Python.
2) Starting the event loop.
If IPython is not running, qApp.exec_() is called, which is blocking.
The IPython.lib.guisupport.start_event_loop_qt4() function is used. If no
event loop is yet running, it will start a blocking event loop. If an event
loop is running, start_event_loop_qt4() will do nothing and return. It is
therefore non-blocking. This makes user interaction from the command
line possible.
The user can start an IPython event loop by calling the '%gui qt' magic command,
by starting IPython with the --qui=qt command line option, or by setting
c.TerminalIPythonApp.gui = 'qt' in ~/.ipython/<profile>/ipython_config.py
See also:
http://ipython.readthedocs.org/en/stable/api/generated/IPython.lib.guisupport.html
Known issues:
1) Starting; ipython --gui=qt main.py
Since this will start a non-blocking event loop before calling main, the
application exits as soon as it is created. Use the IPython -i option to
stay in IPython after the script has finished.
So run: ipython --gui=qt -i main.py
2) PyQT4 has two API versions: Python 2 uses API v1 by default, Python 3
uses v2 (PySide only implements the v2 API). The API version must be set
before PyQt4 is imported!
This program is written for v2 so if v1 is already running, an error will
occur. If you use the iptyhon --qui=qt command line option to start an
event loop (and make interaction from the command line possible), IPython-2
will start API v1 if PyQt is configured. To force IPython-2 to use the
v2 API, the QT_API environment variable must be set to 'pyqt'.
This works, unfortunately IPython 4.0.0 contains a bug and raises the
following ImportError: No module named qt. As a work around you can,
1: Ignore the ImportError
2: Import PyQt4 (or PySide) manually. In IPython type: import PyQt4.QtCore
3: Start the event loop with: %gui qt
Also IPython 5.0.0 and 5.1.0 contain a bug so it won't work there as well.
See https://github.com/ipython/ipython/issues/9974. It is expected to be fixed
in IPython 5.2.0
"""
import sys, logging, traceback
logger = logging.getLogger(__name__)
from objbrowser.qtpy import QtCore, QtWidgets
from objbrowser.version import DEBUGGING, PROGRAM_NAME
def in_ipython():
""" Returns True if IPython is running, False for the regular Python.
"""
try:
from IPython.core.getipython import get_ipython
except ImportError:
return False
else:
return get_ipython() is not None
def qapp_exists():
""" Returns true if a QApplicaiotn is already running
"""
return QtWidgets.QApplication.instance() is not None
def get_qapp(*args, **kwargs):
""" Gets the global Qt application object. Creates one if it doesn't exist.
"""
qApp = QtWidgets.QApplication.instance()
if qApp:
logger.debug("Returning existing QApplication")
return qApp
else:
logger.debug("Creating new QApplication")
return QtWidgets.QApplication(*args, **kwargs)
def get_qsettings():
""" Creates a QSettings object for this application.
We do not set the application and organization in the QApplication object to
prevent side-effects.
"""
return QtCore.QSettings("titusjan.nl", PROGRAM_NAME)
def start_qt_event_loop(qApp):
""" Starts the eventloop if it's not yet running.
If the IPython event loop is active (and set to Qt) this function does nothing. The IPython
event loop will process Qt events as well so the user can continue to use the command
prompt together with the ObjectBrower. Unfortunately this behaviour is broken again in
IPython 5, so there we fall back on the non-interactive event loop.
"""
if in_ipython():
from IPython import version_info
logger.debug("IPython detected. Version info: {}".format(version_info))
if version_info[0] < 4:
logger.debug("Event loop integration not supported for IPython < 4")
elif version_info[0] == 5 and version_info[1] <= 1:
# The is_event_loop_running_qt4 function is broken in IPython 5.0 and 5.1.
# https://github.com/ipython/ipython/issues/9974
logger.debug("Event loop integration does not work in IPython 5.0 and 5.1")
else:
try:
from IPython.lib.guisupport import is_event_loop_running_qt4, start_event_loop_qt4
if is_event_loop_running_qt4(qApp):
logger.info("IPython event loop already running. GUI integration possible.")
else:
# No gui integration
logger.info("Starting (non-interactive) IPython event loop")
start_event_loop_qt4(qApp) # exit code always 0
return
except Exception as ex:
logger.warning("Unable to start IPython Qt event loop: {}".format(ex))
logger.warning("Falling back on non-interactive event loop: {}".format(ex))
logger.info("Starting (non-interactive) event loop")
return qApp.exec_()
def handleException(exc_type, exc_value, exc_traceback):
""" Causes the application to quit in case of an unhandled exception (as God intended)
Shows an error dialog before quitting when not in debugging mode.
"""
traceback.format_exception(exc_type, exc_value, exc_traceback)
logger.critical("Bug: uncaught {}".format(exc_type.__name__),
exc_info=(exc_type, exc_value, exc_traceback))
if DEBUGGING:
sys.exit(1)
else:
# Constructing a QApplication in case this hasn't been done yet.
if not QtWidgets.qApp:
_app = QtWidgets.QApplication()
msgBox = ResizeDetailsMessageBox()
msgBox.setText("Bug: uncaught {}".format(exc_type.__name__))
msgBox.setInformativeText(str(exc_value))
lst = traceback.format_exception(exc_type, exc_value, exc_traceback)
msgBox.setDetailedText("".join(lst))
msgBox.setIcon(QtWidgets.QMessageBox.Warning)
msgBox.exec_()
sys.exit(1)
class ResizeDetailsMessageBox(QtWidgets.QMessageBox):
""" Message box that enlarges when the 'Show Details' button is clicked.
Can be used to better view stack traces. I could't find how to make a resizeable message
box but this it the next best thing.
Taken from:
http://stackoverflow.com/questions/2655354/how-to-allow-resizing-of-qmessagebox-in-pyqt4
"""
def __init__(self, detailsBoxWidth=700, detailBoxHeight=300, *args, **kwargs):
""" Constructor
:param detailsBoxWidht: The width of the details text box (default=700)
:param detailBoxHeight: The heights of the details text box (default=700)
"""
super(ResizeDetailsMessageBox, self).__init__(*args, **kwargs)
self.detailsBoxWidth = detailsBoxWidth
self.detailBoxHeight = detailBoxHeight
def resizeEvent(self, event):
""" Resizes the details box if present (i.e. when 'Show Details' button was clicked)
"""
result = super(ResizeDetailsMessageBox, self).resizeEvent(event)
details_box = self.findChild(QtWidgets.QTextEdit)
if details_box is not None:
#details_box.setFixedSize(details_box.sizeHint())
details_box.setFixedSize(QtCore.QSize(self.detailsBoxWidth, self.detailBoxHeight))
return result
| gpl-3.0 | 3,190,367,816,464,771,600 | 40.547872 | 100 | 0.669953 | false |
singularityhub/sregistry | shub/apps/logs/mixins.py | 1 | 4621 | """
Adopted from drf-tracking
https://github.com/aschn/drf-tracking
"""
from django.utils.timezone import now
from shub.apps.logs.models import APIRequestLog
from shub.apps.logs.utils import clean_data
from rest_framework.authtoken.models import Token
import traceback
class BaseLoggingMixin(object):
logging_methods = "__all__"
"""Mixin to log requests"""
def initial(self, request, *args, **kwargs):
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
ipaddr = [x.strip() for x in ipaddr.split(",")][0]
else:
ipaddr = request.META.get("REMOTE_ADDR", "")
# get view
view_name = ""
try:
method = request.method.lower()
attributes = getattr(self, method)
view_name = (
type(attributes.__self__).__module__
+ "."
+ type(attributes.__self__).__name__
)
except:
pass
# get the method of the view
if hasattr(self, "action"):
view_method = self.action if self.action else ""
else:
view_method = method.lower()
try:
params = clean_data(request.query_params.dict())
except:
params = {}
# create log
self.request.log = APIRequestLog(
requested_at=now(),
path=request.path,
view=view_name,
view_method=view_method,
remote_addr=ipaddr,
host=request.get_host(),
method=request.method,
query_params=params,
)
# regular initial, including auth check
super(BaseLoggingMixin, self).initial(request, *args, **kwargs)
# add user to log after auth
user = request.user
if user.is_anonymous:
user = None
# Get a user, if auth token is provided
auth_header = request.META.get("HTTP_AUTHORIZATION")
if auth_header:
try:
token = Token.objects.get(key=auth_header.replace("BEARER", "").strip())
user = token.user
except Token.DoesNotExist:
pass
self.request.log.user = user
# get data dict
try:
# Accessing request.data *for the first time* parses the request body, which may raise
# ParseError and UnsupportedMediaType exceptions. It's important not to swallow these,
# as (depending on implementation details) they may only get raised this once, and
# DRF logic needs them to be raised by the view for error handling to work correctly.
self.request.log.data = clean_data(self.request.data.dict())
except AttributeError: # if already a dict, can't dictify
self.request.log.data = clean_data(self.request.data)
def handle_exception(self, exc):
# basic handling
response = super(BaseLoggingMixin, self).handle_exception(exc)
# log error
if hasattr(self.request, "log"):
self.request.log.errors = traceback.format_exc()
# return
return response
def finalize_response(self, request, response, *args, **kwargs):
# regular finalize response
response = super(BaseLoggingMixin, self).finalize_response(
request, response, *args, **kwargs
)
# check if request is being logged
if not hasattr(self.request, "log"):
return response
# compute response time
response_timedelta = now() - self.request.log.requested_at
response_ms = int(response_timedelta.total_seconds() * 1000)
# save to log
if self._should_log(request, response):
self.request.log.response = response.rendered_content
self.request.log.status_code = response.status_code
self.request.log.response_ms = response_ms
self.request.log.save()
return response
def _should_log(self, request, response):
"""
Method that should return True if this request should be logged.
By default, check if the request method is in logging_methods.
"""
return (
self.logging_methods == "__all__" or request.method in self.logging_methods
)
class LoggingMixin(BaseLoggingMixin):
pass
class LoggingErrorsMixin(BaseLoggingMixin):
"""Log only errors"""
def _should_log(self, request, response):
return response.status_code >= 400
| mpl-2.0 | -5,573,458,890,350,275,000 | 30.222973 | 98 | 0.58667 | false |
widowild/messcripts | exercice/python3/solutions_exercices/exercice_6_11.py | 1 | 1686 | #! /usr/bin/env python
# -*- coding:Utf8 -*-
# Calculs de triangles
from sys import exit # module contenant des fonctions système
print("""
Veuillez entrer les longueurs des 3 côtés
(en séparant ces valeurs à l'aide de virgules) :""")
a, b, c = eval(input())
# Il n'est possible de construire un triangle que si chaque côté
# a une longueur inférieure à la somme des deux autres :
if a < (b+c) and b < (a+c) and c < (a+b) :
print("Ces trois longueurs déterminent bien un triangle.")
else:
print("Il est impossible de construire un tel triangle !")
exit() # ainsi l'on n'ira pas plus loin.
f = 0
if a == b and b == c :
print("Ce triangle est équilatéral.")
f = 1
elif a == b or b == c or c == a :
print("Ce triangle est isocèle.")
f = 1
if a*a + b*b == c*c or b*b + c*c == a*a or c*c + a*a == b*b :
print("Ce triangle est rectangle.")
f = 1
if f == 0 :
print("Ce triangle est quelconque.")
######### Variante ? (proposée par Alex Misbah) ######### :
print("* Variante ? (par Alex Misbah) *")
a=eval(input('entrer une longueur a:'))
b=eval(input('entrer une longueur b:'))
c=eval(input('entrer une longueur c:'))
ab_carre=(a*b)**2
pytha=(b*c)**2+(c*a)**2
if a<(b+c) and b<(a+c) and c <(a+b):
print(" les longueurs définissent un triangle")
if ab_carre == pytha:
print(" c'est un triangle rectangle")
elif a == b == c:
print(" c'est un triangle équilatéral")
elif a == b or b == c or c == a:
print("c'est un triangle isocèle")
else:
print("c'est un triangle quelconque")
else:
print("les longueurs a,b et c ne permettent pas de définir un triangle")
| gpl-3.0 | -136,907,640,076,369,580 | 30.415094 | 76 | 0.607207 | false |
sbillaudelle/labbook | labbook/cli.py | 1 | 2486 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
__all__ = ['Interface', 'main']
import os
import datetime
from .exceptions import *
from .labbook import LabBook
class Interface(object):
def __init__(self):
pass
def create(self, args):
path = os.path.abspath(args.path)
try:
labbook = LabBook.create(path)
except LabBookAlreadyExistsError as exc:
print(exc.message)
else:
print("I created a labbook for you in '{0}'. Get to work!".format(labbook.path))
def run(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
labbook.run(args.command_line)
def log(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
for experiment in labbook.log():
print("{date}: {cmd} ({uuid})".format(
date = datetime.datetime.fromtimestamp(float(experiment.date)).strftime('%a %b %d %H:%M:%S %Y'),
cmd = experiment.command_line,
uuid = experiment.uuid
))
if experiment.comment:
print("\n {0}\n".format(experiment.comment))
else:
print("\n (no comment)\n")
def comment(self, args):
path = os.getcwd()
try:
labbook = LabBook(path)
except LabBookNotFoundError as exc:
print(exc.message)
else:
try:
labbook.set_comment(args.uuid, args.comment)
except (UUIDNotFoundError, AmbiguousUUIDError) as exc:
print(exc.message)
def main():
import argparse
parser = argparse.ArgumentParser()
sub = parser.add_subparsers(dest='command')
run_parser = sub.add_parser('run')
run_parser.add_argument('command_line', type=str, nargs='+')
log_parser = sub.add_parser('log')
log_parser = sub.add_parser('create')
log_parser.add_argument('path', type=str)
log_parser = sub.add_parser('comment')
log_parser.add_argument('uuid', type=str, nargs='?')
log_parser.add_argument('comment', type=str)
args = parser.parse_args()
interface = Interface()
getattr(interface, args.command)(args)
| gpl-2.0 | 271,354,617,792,468,030 | 28.247059 | 120 | 0.54827 | false |
frhumanes/consulting | web/deploy/wtdeploy/wtdeploy/modules/fab_mysql.py | 1 | 2073 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
#
# author: javi santana
from fabric.api import *
def install_mysql():
""" ripped from http://www.muhuk.com/2010/05/how-to-install-mysql-with-fabric/
"""
with settings(hide('warnings', 'stderr'), warn_only=True):
result = sudo('dpkg-query --show mysql-server')
if result.failed is False:
warn('MySQL is already installed')
return
mysql_password = env.database_admin_pass
sudo('echo "mysql-server-5.5 mysql-server/root_password password ' \
'%s" | debconf-set-selections' % mysql_password)
sudo('echo "mysql-server-5.5 mysql-server/root_password_again password ' \
'%s" | debconf-set-selections' % mysql_password)
sudo('apt-get install -y mysql-server')
def install(conf_folder):
install_mysql()
sudo("apt-get -y install mysql-client libmysqlclient-dev") # dev libraries for compile python bindings
def copy_conf_files(conf_folder):
pass
def create_database(name, encoding='utf8'):
with settings(warn_only=True):
run_mysql_sudo('create database %s character set %s' % (name, encoding))
def set_password(user, password):
sudo("mysqladmin -u %s password %s" % (user, password))
def create_user(user, password):
run_mysql_sudo("CREATE USER %s IDENTIFIED BY '%s'" % (user, password))
def drop_user(user):
with settings(warn_only=True):
run_mysql_sudo("DROP USER %s" % (user))
def user_perms(user, database, password):
run_mysql_sudo("GRANT ALL ON %s.* TO %s@'localhost' IDENTIFIED BY '%s'" % (database, user, password))
def run_mysql_sudo(cmd):
run('echo "' + cmd + '" | mysql -u%(database_admin)s -p%(database_admin_pass)s' % env)
def get_dump(name, user, password, where):
# todo make temporally file
run("mysqldump -u%s -p%s %s | gzip > /tmp/db_dump.sql.gz" % (user, password, name));
get("/tmp/db_dump.sql.gz", where)
def drop_database():
with settings(warn_only=True):
run_mysql_sudo("DROP DATABASE %s" % env.database_name)
| apache-2.0 | -271,757,417,475,889,120 | 34.741379 | 105 | 0.641582 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.