repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
goniz/plexmyxbmc | plexmyxbmc/__init__.py | 1 | 1359 | #!/usr/bin/python2
__version__ = "1.0.0"
import plexapi
from plexmyxbmc.config import get_config
from plexmyxbmc.log import get_logger
plexapi.X_PLEX_PROVIDES = 'player,controller,sync-target'
plexapi.X_PLEX_PRODUCT = "PlexMyXBMC"
plexapi.X_PLEX_VERSION = __version__
plexapi.X_PLEX_IDENTIFIER = get_config().get('uuid', 'randomuuid')
plexapi.X_PLEX_PLATFORM_VERSION = plexapi.X_PLEX_PLATFORM + plexapi.X_PLEX_PLATFORM_VERSION
plexapi.X_PLEX_PLATFORM = 'Generic'
BASE_HEADERS = {
'X-Plex-Provides': plexapi.X_PLEX_PROVIDES,
'X-Plex-Product': plexapi.X_PLEX_PRODUCT,
'X-Plex-Version': plexapi.X_PLEX_VERSION,
'X-Plex-Client-Identifier': plexapi.X_PLEX_IDENTIFIER,
'X-Plex-Device-Name': get_config().get('name', 'randomname'),
'X-Plex-Platform': plexapi.X_PLEX_PLATFORM,
'X-Plex-Platform-Version': plexapi.X_PLEX_PLATFORM_VERSION,
}
plexapi.BASE_HEADERS.update(BASE_HEADERS)
logger = get_logger('plexapi', _force=True)
plexapi.log = logger
def time_to_millis(time):
return (time['hours']*3600 + time['minutes']*60 + time['seconds'])*1000 + time['milliseconds']
def millis_to_time(t):
millis = int(t)
seconds = millis / 1000
minutes = seconds / 60
hours = minutes / 60
seconds %= 60
minutes %= 60
millis %= 1000
return dict(hours=hours, minutes=minutes, seconds=seconds, milliseconds=millis)
| gpl-2.0 | 2,932,022,274,638,289,400 | 32.975 | 98 | 0.701987 | false |
ghorn/debian-casadi | experimental/joris/scp.py | 1 | 2985 | #
# This file is part of CasADi.
#
# CasADi -- A symbolic framework for dynamic optimization.
# Copyright (C) 2010-2014 Joel Andersson, Joris Gillis, Moritz Diehl,
# K.U. Leuven. All rights reserved.
# Copyright (C) 2011-2014 Greg Horn
#
# CasADi is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3 of the License, or (at your option) any later version.
#
# CasADi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with CasADi; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
from numpy import *
from casadi import *
DMatrix.setPrecision(16)
solver = None
def sdqp_sol(h=None,c=None,a=None,uba=None,f=None,g=None):
global solver
if solver is None:
solver = SDPSdqpSolver(sdqpStruct(h=h.sparsity(),f=f.sparsity(),g=g.sparsity(),a=a.sparsity()))
solver.setOption("sdp_solver",DSdpSolver)
solver.setOption("sdp_solver_options",{"_printlevel": 0})
solver.init()
solver.setInput(h,"h")
solver.setInput(f,"f")
solver.setInput(g,"g")
solver.setInput(a,"a")
solver.setInput(c,"c")
solver.setInput(-Inf,"lba")
solver.setInput(uba,"uba")
solver.evaluate()
return solver.output("x"),solver.output("lam_a"), solver.output("dual")
x = ssym("x",2)
f = (1-x[0])**2+100*(x[1]-x[0]**2)**2
nsd = blockcat([[-x[0],2],[2,-x[1]**2]]) # <=0
g = eig_symbolic(nsd)
nlp = SXFunction(nlpIn(x=x),nlpOut(f=f,g=g))
nlp.init()
# Find a refence solution with another
ipopt = IpoptSolver(nlp)
ipopt.init()
ipopt.setInput(-Inf,"lbg")
ipopt.setInput(0,"ubg")
ipopt.solve()
print "reference sol= ", ipopt.output("x")
g = DMatrix(0,1)
lambd = ssym("lambda",g.shape)
Lambd = ssym("lambda",nsd.sparsity())
lag = f+mul(lambd.T,g)+trace(mul(Lambd,nsd))
oracle = SXFunction(customIO(x=x,lambd=lambd,Lambd=Lambd),customIO(f=f,g=g,nsd=nsd,hess=hessian(lag,x), gradF=gradient(f,x), jacG= jacobian(g,x),jac_nsd=jacobian(vec(nsd),x)))
oracle.init()
lambda_k = DMatrix([0])
Lambda_k = DMatrix([0])
x_k = DMatrix([2,3])
for i in range(25):
print i, x_k
oracle.setInput(x_k,"x")
oracle.setInput(lambda_k,"lambd")
oracle.setInput(Lambda_k,"Lambd")
oracle.evaluate()
step, lambda_k, Lambda_k = sdqp_sol(h=oracle.output("hess"),c=oracle.output("gradF"),a=oracle.output("jacG"),uba=-oracle.output("g"),f=vertcat([ oracle.output("jac_nsd")[:,i].reshape(oracle.output("nsd").shape) for i in range(x_k.size())]),g=-oracle.output("nsd"))
x_k+= step
print linalg.eig(oracle.output("nsd"))[0]
| lgpl-3.0 | -7,634,495,534,286,407,000 | 30.421053 | 266 | 0.670352 | false |
qutebrowser/qutebrowser | scripts/dev/ua_fetch.py | 1 | 2287 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
"""Fetch and print the most common user agents.
This script fetches the most common user agents according to
https://github.com/Kikobeats/top-user-agents, and prints the most recent
Chrome user agent for Windows, macOS and Linux.
"""
import math
import sys
import textwrap
import requests
import qutebrowser.config.websettings
def version(ua):
"""Comparable version of a user agent."""
return tuple(int(v) for v in ua.upstream_browser_version.split('.')[:2])
def wrap(ini, sub, string):
return textwrap.wrap(string, width=80, initial_indent=ini, subsequent_indent=sub)
response = requests.get('https://raw.githubusercontent.com/Kikobeats/top-user-agents/master/index.json')
if response.status_code != 200:
print('Unable to fetch the user agent index', file=sys.stderr)
sys.exit(1)
ua_checks = {
'Win10': lambda ua: ua.os_info.startswith('Windows NT'),
'macOS': lambda ua: ua.os_info.startswith('Macintosh'),
'Linux': lambda ua: ua.os_info.startswith('X11'),
}
ua_strings = {}
ua_versions = {}
ua_names = {}
for ua_string in reversed(response.json()):
# reversed to prefer more common versions
# Filter out browsers that are not Chrome-based
parts = ua_string.split()
if not any(part.startswith("Chrome/") for part in parts):
continue
if any(part.startswith("OPR/") or part.startswith("Edg/") for part in parts):
continue
if 'Chrome/99.0.7113.93' in parts:
# Fake or false-positive entry
continue
user_agent = qutebrowser.config.websettings.UserAgent.parse(ua_string)
# check which os_string conditions are met and select the most recent version
for key, check in ua_checks.items():
if check(user_agent):
v = version(user_agent)
if v >= ua_versions.get(key, (-math.inf,)):
ua_versions[key] = v
ua_strings[key] = ua_string
ua_names[key] = f'Chrome {v[0]} {key}'
for key, ua_string in ua_strings.items():
quoted_ua_string = f'"{ua_string}"'
for line in wrap(" - - ", " ", quoted_ua_string):
print(line)
for line in wrap(" - ", " ", ua_names[key]):
print(line)
| gpl-3.0 | 1,706,314,109,449,327,400 | 30.328767 | 104 | 0.647136 | false |
xpansa/pmis | purchase_request/__openerp__.py | 1 | 1861 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Eficent (<http://www.eficent.com/>)
# <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Purchase Request",
"author": "Eficent, "
"Odoo Community Association (OCA)",
"version": "8.0.1.0.1",
"contributors": [
'Jordi Ballester Alomar',
],
"category": "Purchase Management",
"depends": [
"purchase",
"product"
],
"data": [
"security/purchase_request.xml",
"security/ir.model.access.csv",
"data/purchase_request_sequence.xml",
"data/purchase_request_data.xml",
"views/purchase_request_view.xml",
"reports/report_purchaserequests.xml",
"views/purchase_request_report.xml",
],
'demo': [
"demo/purchase_request_demo.xml",
],
'test': [
"test/purchase_request_users.yml",
"test/purchase_request_data.yml",
"test/purchase_request_status.yml"
],
"license": 'AGPL-3',
"installable": True
}
| agpl-3.0 | -1,313,501,190,851,611,100 | 33.462963 | 78 | 0.569586 | false |
vauxoo-dev/stoqdrivers | stoqdrivers/printers/bematech/MP20.py | 1 | 4982 | # -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Stoqdrivers
## Copyright (C) 2009 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Ronaldo Maia <[email protected]>
##
"""
Bematech MP20 driver
The MP20 is compatible with the MP25 command set (actually its the other way
around ;) until a certain command (85, I think). Commands above that are just
not executed.
There are some differences on the Registers numbering as well.
Also, some commands have different parameter sizes. These are:
CMD MP 20 MP 25
00 29 28+30+80 (abertura de cupom)
14 - 28+30+80 (cancelamento de cupom)
32 (inicia fechamento cupom)
73 Algumas diferencas no funcionamento. Ver manual.
"""
from kiwi.log import Logger
from decimal import Decimal
from stoqdrivers.printers.bematech.MP25 import (MP25, MP25Status, CMD_STATUS,
CMD_COUPON_OPEN)
log = Logger('stoqdrivers.bematech.MP20')
CMD_ADD_ITEM_SIMPLE = 9
class MP20Registers(object):
TOTAL = 3
TOTAL_CANCELATIONS = 4
TOTAL_DISCOUNT = 5
COO = 6
GNF = 7
NUMBER_REDUCTIONS_Z = 9
CRO = 10
LAST_ITEM_ID = 12
NUMBER_TILL = 14
FISCAL_FLAGS = 17
EMISSION_DATE = 23
TOTALIZERS = 29
PAYMENT_METHODS = 32
SERIAL = 0
FIRMWARE = 1
# (size, bcd)
formats = {
TOTAL: ('9s', True),
TOTAL_CANCELATIONS: ('7s', True),
TOTAL_DISCOUNT: ('7s', True),
COO: ('3s', True),
GNF: ('3s', True),
NUMBER_REDUCTIONS_Z: ('2s', True),
CRO: ('2s', True),
LAST_ITEM_ID: ('2s', True),
NUMBER_TILL: ('2s', True),
FISCAL_FLAGS: ('1s', False),
EMISSION_DATE: ('6s', False),
TOTALIZERS: ('2s', False),
# 1 + (52 * 16) + (52 * 10) + (52 * 10) + (52 * 1)
# 1 + 832 + 520 + 520 + 52: 1925
PAYMENT_METHODS: ('b832s520s520s52s', False),
SERIAL: ('15s', False),
FIRMWARE: ('3s', True),
}
class MP20Status(MP25Status):
def __init__(self, reply):
self.st1, self.st2 = reply[-2:]
self.st3 = 0
class MP20(MP25):
model_name = "Bematech MP20 TH FI"
CMD_PROTO = 0x1b
registers = MP20Registers
supports_duplicate_receipt = False
reply_format = '<b%sbb'
status_size = 2
#
# MP25 implementation
#
def coupon_open(self):
""" This needs to be called before anything else. """
self._send_command(CMD_COUPON_OPEN,
"%-29s" % (self._customer_document))
def coupon_add_item(self, code, description, price, taxcode,
quantity=Decimal("1.0"), unit=None,
discount=Decimal("0.0"), markup=Decimal("0.0"),
unit_desc=""):
# We are using a simpler command for adding items with the MP20
# because its not working with the MP25 command (ESC 63). This
# simpler command does not support markup and unit
data = (
"%-13s" # code
"%29s" # description
"%02s" # taxcode
"%07d" # quantity
"%08d" # value
"%08d" # discount
) % (code, description, taxcode, quantity * Decimal("1e3"),
price * Decimal("1e2"), discount * Decimal("1e2"))
self._send_command(CMD_ADD_ITEM_SIMPLE, data)
return self._get_last_item_id()
def get_status(self, val=None):
if val is None:
val = self._send_command(CMD_STATUS, raw=True)
return MP20Status(val)
def cancel_last_coupon(self):
"""Cancel the last non fiscal coupon or the last sale."""
#XXX MP20 does not support this
self.coupon_cancel()
def get_ccf(self):
# MP20 does not support this. We should just return the coo
# http://www.forumweb.com.br/foruns/lofiversion/index.php/t64417.html
return self.get_coo()
def status_reply_complete(self, reply):
log.debug('status_reply_complete "%s" (size=%s)' % (reply, len(reply)))
return len(reply) == 18
| lgpl-2.1 | 2,580,845,285,714,813,400 | 30.732484 | 79 | 0.5833 | false |
KDE/tellico | src/fetch/scripts/dark_horse_comics.py | 1 | 14898 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***************************************************************************
# Copyright (C) 2006-2009 Mathias Monnerville <[email protected]>
# ***************************************************************************
#
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or *
# * modify it under the terms of the GNU General Public License as *
# * published by the Free Software Foundation; either version 2 of *
# * the License or (at your option) version 3 or any later version *
# * accepted by the membership of KDE e.V. (or its successor approved *
# * by the membership of KDE e.V.), which shall act as a proxy *
# * defined in Section 14 of version 3 of the license. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see <http://www.gnu.org/licenses/>. *
# * *
# ***************************************************************************
# $Id: comics_darkhorsecomics.py 123 2006-03-24 08:47:48Z mathias $
"""
This script has to be used with tellico (http://periapsis.org/tellico) as an external data source program.
It allows searching through the Dark Horse Comics web database.
Related info and cover are fetched automatically. It takes only one argument (comic title).
Tellico data source setup:
- source name: Dark Horse Comics (US) (or whatever you want :)
- Collection type: comics collection
- Result type: tellico
- Path: /path/to/script/comics_darkhorsecomics.py
- Arguments:
Title (checked) = %1
Update (checked) = %{title}
"""
import sys, os, re, hashlib, random, string
import urllib, time, base64
import xml.dom.minidom
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
XML_HEADER = """<?xml version="1.0" encoding="UTF-8"?>"""
DOCTYPE = """<!DOCTYPE tellico PUBLIC "-//Robby Stephenson/DTD Tellico V9.0//EN" "http://periapsis.org/tellico/dtd/v9/tellico.dtd">"""
NULLSTRING = ''
VERSION = "0.2"
def genMD5():
"""
Generates and returns a random md5 string. Its main purpose is to allow random
image file name generation.
"""
float = random.random()
return hashlib.md5(str(float)).hexdigest()
class BasicTellicoDOM:
"""
This class manages tellico's XML data model (DOM)
"""
def __init__(self):
self.__doc = xml.dom.minidom.Document()
self.__root = self.__doc.createElement('tellico')
self.__root.setAttribute('xmlns', 'http://periapsis.org/tellico/')
self.__root.setAttribute('syntaxVersion', '9')
self.__collection = self.__doc.createElement('collection')
self.__collection.setAttribute('title', 'My Comics')
self.__collection.setAttribute('type', '6')
self.__fields = self.__doc.createElement('fields')
# Add all default (standard) fields
self.__dfltField = self.__doc.createElement('field')
self.__dfltField.setAttribute('name', '_default')
self.__fields.appendChild(self.__dfltField)
self.__collection.appendChild(self.__fields)
self.__images = self.__doc.createElement('images')
self.__root.appendChild(self.__collection)
self.__doc.appendChild(self.__root)
# Current movie id. See entry's id attribute in self.addEntry()
self.__currentId = 0
def addEntry(self, movieData):
"""
Add a comic entry.
Returns an entry node instance
"""
d = movieData
entryNode = self.__doc.createElement('entry')
entryNode.setAttribute('id', str(self.__currentId))
titleNode = self.__doc.createElement('title')
titleNode.appendChild(self.__doc.createTextNode(unicode(d['title'], 'latin-1').encode('utf-8')))
entryNode.appendChild(titleNode)
yearNode = self.__doc.createElement('pub_year')
yearNode.appendChild(self.__doc.createTextNode(d['pub_year']))
entryNode.appendChild(yearNode)
countryNode = self.__doc.createElement('country')
countryNode.appendChild(self.__doc.createTextNode(d['country']))
entryNode.appendChild(countryNode)
pubNode = self.__doc.createElement('publisher')
pubNode.appendChild(self.__doc.createTextNode(d['publisher']))
entryNode.appendChild(pubNode)
langNode = self.__doc.createElement('language')
langNode.appendChild(self.__doc.createTextNode(d['language']))
entryNode.appendChild(langNode)
writersNode = self.__doc.createElement('writers')
for g in d['writer']:
writerNode = self.__doc.createElement('writer')
writerNode.appendChild(self.__doc.createTextNode(unicode(g, 'latin-1').encode('utf-8')))
writersNode.appendChild(writerNode)
entryNode.appendChild(writersNode)
genresNode = self.__doc.createElement('genres')
for g in d['genre']:
genreNode = self.__doc.createElement('genre')
genreNode.appendChild(self.__doc.createTextNode(unicode(g, 'latin-1').encode('utf-8')))
genresNode.appendChild(genreNode)
entryNode.appendChild(genresNode)
commentsNode = self.__doc.createElement('comments')
#for g in d['comments']:
# commentsNode.appendChild(self.__doc.createTextNode(unicode("%s\n\n" % g, 'latin-1').encode('utf-8')))
commentsData = string.join(d['comments'], '\n\n')
commentsNode.appendChild(self.__doc.createTextNode(unicode(commentsData, 'latin-1').encode('utf-8')))
entryNode.appendChild(commentsNode)
artistsNode = self.__doc.createElement('artists')
for k, v in d['artist'].iteritems():
if v == 'various':
continue
artistNode = self.__doc.createElement('artist')
artistNode.appendChild(self.__doc.createTextNode(unicode(v, 'latin-1').encode('utf-8')))
artistsNode.appendChild(artistNode)
entryNode.appendChild(artistsNode)
if 'pages' in d:
pagesNode = self.__doc.createElement('pages')
pagesNode.appendChild(self.__doc.createTextNode(d['pages']))
entryNode.appendChild(pagesNode)
if 'isbn' in d:
isbnNode = self.__doc.createElement('isbn')
isbnNode.appendChild(self.__doc.createTextNode(d['isbn']))
entryNode.appendChild(isbnNode)
if 'issue' in d:
issueNode = self.__doc.createElement('issue')
issueNode.appendChild(self.__doc.createTextNode(d['issue']))
entryNode.appendChild(issueNode)
if 'image' in d:
imageNode = self.__doc.createElement('image')
imageNode.setAttribute('format', 'JPEG')
imageNode.setAttribute('id', d['image'][0])
imageNode.appendChild(self.__doc.createTextNode(unicode(d['image'][1], 'latin-1').encode('utf-8')))
coverNode = self.__doc.createElement('cover')
coverNode.appendChild(self.__doc.createTextNode(d['image'][0]))
entryNode.appendChild(coverNode)
if 'image' in d:
entryNode.appendChild(coverNode)
self.__images.appendChild(imageNode)
self.__collection.appendChild(entryNode)
self.__currentId += 1
return entryNode
def printEntry(self, nEntry):
"""
Prints entry's XML content to stdout
"""
try:
print(nEntry.toxml())
except:
print(sys.stderr, "Error while outputting XML content from entry to Tellico")
def printXMLTree(self):
"""
Outputs XML content to stdout
"""
self.__collection.appendChild(self.__images)
print(XML_HEADER); print(DOCTYPE)
print(self.__root.toxml())
class DarkHorseParser:
def __init__(self):
self.__baseURL = 'https://www.darkhorse.com'
self.__basePath = '/Comics/'
self.__searchURL = '/Search/%s'
self.__coverPath = '/covers/'
self.__movieURL = self.__baseURL + self.__basePath
# Define some regexps
self.__regExps = { 'title' : '<h2 class="title">(?P<title>.*?)</h2>',
'pub_date' : '<dt>Pub.* Date:</dt>.*?<dd>(?P<pub_date>.*?)</dd>',
'isbn' : '<dt>ISBN-10:</dt><dd>(?P<isbn>.*?)</dd>',
'desc' : '<div class="product-description">(?P<desc>.*?)</div>',
'writer' : '<dt>Writer: *</dt> *<dd><a.*?>(?P<writer>.*?)</a> *</dd>',
'cover_artist' : '<dt>Artist: *</dt> *<dd><a.*>(?P<cover_artist>.*?)</a> *</dd>',
'penciller' : '<dt>Penciller: *</dt> *<dd><a.*>(?P<penciller>.*?)</a> *</dd>',
'inker' : '<dt>Inker: *</dt> *<dd><a.*>(?P<inker>.*?)</a> *</dd>',
'letterer' : '<dt>Letterer: *</dt> *<dd><a.*>(?P<letterer>.*?)</a> *</dd>',
'colorist' : '<dt>Colorist: *</dt> *<dd><a.*>(?P<colorist>.*?)</a> *</dd>',
'genre' : '<strong>Genre: *</strong> *<a.*?>(?P<genre>.*?)</a> *</div>',
'format' : '<dt>Format: *</dt> *(?P<format>.*?)<dt>',
}
# Compile patterns objects
self.__regExpsPO = {}
for k, pattern in self.__regExps.iteritems():
self.__regExpsPO[k] = re.compile(pattern, re.DOTALL)
self.__domTree = BasicTellicoDOM()
def run(self, title):
"""
Runs the parser: fetch movie related links, then fills and prints the DOM tree
to stdout (in tellico format) so that tellico can use it.
"""
self.__getMovie(title)
# Print results to stdout
self.__domTree.printXMLTree()
def __getHTMLContent(self, url):
"""
Fetch HTML data from url
"""
u = urlopen(url)
self.__data = u.read()
u.close()
def __fetchMovieLinks(self):
"""
Retrieve all links related to the search. self.__data contains HTML content fetched by self.__getHTMLContent()
that need to be parsed.
"""
matchList = re.findall("""<a *href="%s(?P<page>.*?)" class="product_link">.*?</a>""" % self.__basePath.replace('?', '\?'), self.__data)
if not matchList: return None
return list(set(matchList))
def __fetchCover(self, path, delete = True):
"""
Fetch cover to /tmp. Returns base64 encoding of data.
The image is deleted if delete is True
"""
md5 = genMD5()
imObj = urlopen(path.strip())
img = imObj.read()
imObj.close()
imgPath = "/tmp/%s.jpeg" % md5
try:
f = open(imgPath, 'w')
f.write(img)
f.close()
except:
print(sys.stderr, "Error: could not write image into /tmp")
b64data = (md5 + '.jpeg', base64.encodestring(img))
# Delete temporary image
if delete:
try:
os.remove(imgPath)
except:
print(sys.stderr, "Error: could not delete temporary image /tmp/%s.jpeg" % md5)
return b64data
def __fetchMovieInfo(self, url):
"""
Looks for movie information
"""
self.__getHTMLContent(url)
# First grab picture data
imgMatch = re.search("""<img src="(?P<imgpath>.*%s.*?)".*>""" % self.__coverPath, self.__data)
if imgMatch:
imgPath = "http:" + imgMatch.group('imgpath')
# Fetch cover and gets its base64 encoded data
b64img = self.__fetchCover(imgPath)
else:
b64img = None
# Now isolate data between <div class="bodytext">...</div> elements
# re.DOTALL makes the "." special character match any character at all, including a newline
m = re.search("""<div id="inner_content">(?P<part>.*)<div id="right_bar">""", self.__data, re.DOTALL)
try:
self.__data = m.group('part')
except AttributeError:
self.__data = ""
matches = {}
data = {}
data['comments'] = []
data['artist'] = {}
# Default values
data['publisher'] = 'Dark Horse Comics'
data['language'] = 'English'
data['country'] = 'USA'
if b64img is not None:
data['image'] = b64img
data['pub_year'] = NULLSTRING
for name, po in self.__regExpsPO.iteritems():
data[name] = NULLSTRING
if name == 'desc':
matches[name] = re.findall(self.__regExps[name], self.__data, re.S | re.I)
else:
matches[name] = po.search(self.__data)
if matches[name]:
if name == 'title':
title = matches[name].group('title').strip()
data[name] = title
# Look for issue information
m = re.search("#(?P<issue>[0-9]+)", title)
if m:
data['issue'] = m.group('issue')
else:
data['issue'] = ''
elif name == 'pub_date':
pub_date = matches[name].group('pub_date').strip()
data['pub_year'] = pub_date[-4:]
# Add this to comments field
data['comments'].insert(0, "Pub. Date: %s" % pub_date)
elif name == 'isbn':
isbn = matches[name].group('isbn').strip()
data[name] = isbn
elif name == 'desc':
# Find biggest size
max = 0
for i in range(len(matches[name])):
if len(matches[name][i]) > len(matches[name][max]):
max = i
data['comments'].append(matches[name][max].strip())
elif name == 'writer':
# We may find several writers
data[name] = []
writersList = re.sub('</?a.*?>', '', matches[name].group('writer')).split(',')
for d in writersList:
data[name].append(d.strip())
elif name == 'cover_artist':
data['artist']['Cover Artist'] = matches[name].group('cover_artist').strip()
elif name == 'penciller':
data['artist']['Penciller'] = matches[name].group('penciller').strip()
elif name == 'inker':
data['artist']['Inker'] = matches[name].group('inker').strip()
elif name == 'colorist':
data['artist']['Colorist'] = matches[name].group('colorist').strip()
elif name == 'letterer':
data['artist']['Letterer'] = matches[name].group('letterer').strip()
elif name == 'genre':
# We may find several genres
data[name] = []
genresList = re.sub('</?a.*?>', '', matches[name].group('genre')).split(',')
for d in genresList:
data[name].append(d.strip())
elif name == 'format':
format = matches[name].group('format').strip()
data['comments'].insert(1, format)
m = re.search("(?P<pages>[0-9]+)", format)
if m:
data['pages'] = m.group('pages')
else:
data['pages'] = ''
return data
def __getMovie(self, title):
if not len(title): return
self.__title = title
self.__getHTMLContent("%s%s" % (self.__baseURL, self.__searchURL % urllib.quote(self.__title)))
# Get all links
links = self.__fetchMovieLinks()
# Now retrieve info
if links:
for entry in links:
data = self.__fetchMovieInfo( url = self.__movieURL + entry )
# Add DC link (custom field)
data['darkhorse'] = "%s%s" % (self.__movieURL, entry)
node = self.__domTree.addEntry(data)
# Print entries on-the-fly
#self.__domTree.printEntry(node)
else:
return None
def halt():
print("HALT.")
sys.exit(0)
def showUsage():
print("Usage: %s comic" % sys.argv[0])
sys.exit(1)
def main():
if len(sys.argv) < 2:
showUsage()
parser = DarkHorseParser()
parser.run(sys.argv[1])
if __name__ == '__main__':
main()
| gpl-2.0 | -8,448,121,832,135,674,000 | 32.254464 | 137 | 0.6127 | false |
jonaprieto/agda-pkg | apkg/commands/list.py | 1 | 3039 | '''
apkg
~~~~
A package manager for Agda.
'''
# ----------------------------------------------------------------------------
import click
import logging
import click_log as clog
from operator import attrgetter, itemgetter
from pony.orm import db_session, select
from natsort import natsorted
from ..service.database import db
from ..service.database import ( Library , LibraryVersion )
from ..service.logging import logger, clog
# ----------------------------------------------------------------------------
# -- Command def.
@click.group()
def list(): pass
listFields = ["name", "version", "url"]
@list.command()
@clog.simple_verbosity_option(logger)
@click.option('--full'
, type=bool
, is_flag=True
, help='Show name, version and description per package.'
)
@click.option('--field'
, type=str
, default=""
, help='Show a specific field e.g.: name, version, url')
@db_session
def list(full, field):
"""List all installed packages."""
short = not full
libraries = select(l for l in Library if l)[:]
libraries = natsorted(libraries, key=lambda x : attrgetter('name')(x).lower())
if len(libraries) == 0:
logger.info("[!] No libraries available to list.")
logger.info(" Consider run the following command:")
logger.info(" $ apkg init")
return
orderFields = [
#, "library"
#, "sha"
"description"
# , "license"
# , "include"
# , "depend"
# , "testedWith"
, "keywords"
# , "installed"
# , "cached"
# , "fromIndex"
# , "fromUrl"
# , "fromGit"
, "origin"
# , "default"
]
i = 0
if short and field == "":
logger.info("{:<20.20} {:<15.20} {:.72}"
.format("Library name", "Latest version", "URL"))
logger.info("-"*105)
for library in libraries:
v = library.getLatestVersion()
if v is not None:
if not short:
logger.info(v.library.name)
logger.info("="*len(v.library.name))
info = v.info
for k in orderFields:
val = info.get(k, None)
if val is not None or val != "" or len(val) > 0:
click.echo("{0}: {1}".format(k,val))
vs = ','.join(str(ver) for ver in v.library.versions)
if len(vs) > 0:
print("Versions:", vs)
else:
if field in listFields:
if field == "name":
print(v.library.name)
elif field == "version":
print(v.name)
else:
print(v.library.url)
else:
print("{:<20.20} {:<15.20} {:.72}"
.format(v.library.name,v.name,v.library.url))
i += 1
if not short and i < len(libraries):
logger.info("") | mit | -6,497,745,852,845,697,000 | 24.546218 | 80 | 0.471866 | false |
raccoongang/xblock-video | video_xblock/backends/wistia.py | 1 | 11501 | # -*- coding: utf-8 -*-
"""
Wistia Video player plugin.
"""
import HTMLParser
import json
import httplib
import logging
import re
import requests
import babelfish
from video_xblock import BaseVideoPlayer
from video_xblock.constants import TranscriptSource
from video_xblock.utils import ugettext as _
log = logging.getLogger(__name__)
class WistiaPlayer(BaseVideoPlayer):
"""
WistiaPlayer is used for videos hosted on the Wistia Video Cloud.
"""
# From official Wistia documentation. May change in the future
# https://wistia.com/doc/construct-an-embed-code#the_regex
url_re = re.compile(
r'https?:\/\/(.+)?(wistia.com|wi.st)\/(medias|embed)\/(?P<media_id>.*)'
)
# Token field is stored in metadata only if authentication was successful
metadata_fields = ['token', ]
# Current api (v1) for requesting transcripts.
# For example: https://api.wistia.com/v1/medias/jzmku8z83i/captions.json
# Docs on captions: https://wistia.com/doc/data-api#captions
# Docs on auth: https://wistia.com/doc/data-api#authentication, https://wistia.com/doc/oauth2
captions_api = {
# To check on authentication status; reference: https://wistia.com/doc/data-api#authentication
'auth_sample_url': 'api.wistia.com/v1/medias.json?api_password={token}',
# To fetch a specific transcript; reference: https://wistia.com/doc/data-api#captions_show
'download_url': 'http://api.wistia.com/v1/medias/{media_id}/captions/'
'{lang_code}.json?api_password={token}',
# To get list of captions; reference: https://wistia.com/doc/data-api#captions_index
'url': 'api.wistia.com/v1/medias/{media_id}/captions.json?api_password={token}',
'response': {
'language_code': 'language',
'language_label': 'english_name',
'subs': 'text'
}
}
# Stores default transcripts fetched from the captions API
default_transcripts = []
fields_help = {
'token': 'You can get a master token following the guide of '
'<a href="https://wistia.com/doc/data-api" target="_blank">Wistia</a>. '
'Please ensure appropriate operations scope has been set on the video platform.'
}
@property
def advanced_fields(self):
"""
Tuple of VideoXBlock fields to display in Basic tab of edit modal window.
Brightcove videos require Brightcove Account id.
"""
return super(WistiaPlayer, self).advanced_fields
@property
def trans_fields(self):
"""
List of VideoXBlock fields to display on `Manual & default transcripts` panel.
"""
fields_list = super(WistiaPlayer, self).trans_fields
# Add `token` after `default_transcripts`
fields_list.append('token')
return fields_list
def media_id(self, href):
"""
Extract Platform's media id from the video url.
E.g. https://example.wistia.com/medias/12345abcde -> 12345abcde
"""
return self.url_re.search(href).group('media_id')
def get_frag(self, **context):
"""
Compose an XBlock fragment with video player to be rendered in student view.
Extend general player fragment with Wistia specific context and JavaScript.
"""
context['data_setup'] = json.dumps(WistiaPlayer.player_data_setup(context))
frag = super(WistiaPlayer, self).get_frag(**context)
frag.add_content(
self.render_resource('static/html/wistiavideo.html', **context)
)
js_files = [
'static/vendor/js/vjs.wistia.js',
'static/vendor/js/videojs-offset.min.js',
'static/js/videojs/player-context-menu.js'
]
for js_file in js_files:
frag.add_javascript(self.resource_string(js_file))
return frag
@staticmethod
def player_data_setup(context):
"""
Wistia Player data setup.
"""
result = BaseVideoPlayer.player_data_setup(context)
result.update({
"techOrder": ["wistia"],
"sources": [{
"type": "video/wistia",
"src": context['url'] + "?controlsVisibleOnLoad=false"
}],
})
return result
def authenticate_api(self, **kwargs):
"""
Call a sample Wistia API url to check on authentication success.
Reference:
https://wistia.com/doc/data-api#authentication
Arguments:
kwargs (dict): Wistia master token key-value pair.
Returns:
auth_data (dict): Master token, provided by a user, which is to be stored in Wistia's player metadata.
error_status_message (str): Message with authentication outcomes for the sake of verbosity.
"""
token, media_id = kwargs.get('token'), kwargs.get('video_id') # pylint: disable=unused-variable
auth_data, error_message = {}, ''
auth_data['token'] = token
url = self.captions_api.get('auth_sample_url').format(token=str(token))
response = requests.get('https://' + url)
if response.status_code == httplib.UNAUTHORIZED:
error_message = "Authentication failed. " \
"Please ensure you have provided a valid master token, using Video API Token field."
return auth_data, error_message
def get_default_transcripts(self, **kwargs):
"""
Fetch transcripts list from Wistia API.
Urls of transcripts are to be fetched later on with separate API calls.
References:
https://wistia.com/doc/data-api#captions_index
https://wistia.com/doc/data-api#captions_show
Arguments:
kwargs (dict): Key-value pairs with video_id, fetched from video xblock, and token, fetched from Wistia API.
Returns:
list: List of dicts of transcripts. Example:
[
{
'lang': 'en',
'label': 'English',
'url': 'default_url_to_be_replaced',
'source': 'default'
},
# ...
]
"""
video_id = kwargs.get('video_id')
token = kwargs.get('token')
url = self.captions_api['url'].format(token=token, media_id=video_id)
message = _('Success.')
self.default_transcripts = []
# Fetch available transcripts' languages (codes and English labels), and assign its' urls.
try:
# get all languages caps data:
response = requests.get('https://{}'.format(url))
except IOError as exc:
# Probably, current API has changed
message = _('No timed transcript may be fetched from a video platform.\nError details: {}').format(
exc.message
)
log.exception("Transcripts INDEX request failure.")
return self.default_transcripts, message
# If a video does not exist, the response will be an empty HTTP 404 Not Found.
# Reference: https://wistia.com/doc/data-api#captions_index
if response.status_code == httplib.NOT_FOUND:
message = _("Wistia video {} doesn't exist.").format(video_id)
return self.default_transcripts, message
# Fetch other failure cases:
if not response.ok:
message = _("Invalid request.")
return self.default_transcripts, message
try:
wistia_data = response.json()
except ValueError:
wistia_data = ''
# No transcripts case, see: wistia.com/doc/data-api#captions_index
if not wistia_data:
message = _("For now, video platform doesn't have any timed transcript for this video.")
return self.default_transcripts, message
transcripts_data = [
[el.get('language'), el.get('english_name')]
for el in wistia_data
]
# Populate default_transcripts
for lang_code, lang_label in transcripts_data:
download_url = self.captions_api['download_url'].format(
media_id=video_id,
lang_code=lang_code,
token=token
)
# Wistia's API uses ISO-639-2, so "lang_code" is a 3-character code, e.g. "eng".
# Reference: https://wistia.com/doc/data-api#captions_show
# Convert from ISO-639-2 to ISO-639-1; reference: https://pythonhosted.org/babelfish/
try:
lang_code = babelfish.Language(lang_code).alpha2
except ValueError:
# In case of B or T codes, e.g. 'fre'.
# Reference: https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
lang_code = babelfish.Language.fromalpha3b(lang_code).alpha2 # pylint: disable=no-member
lang_label = self.get_transcript_language_parameters(lang_code)[1]
self.default_transcripts.append({
'lang': lang_code,
'label': lang_label,
'url': download_url,
'source': TranscriptSource.DEFAULT,
})
return self.default_transcripts, message
@staticmethod
def format_transcript_text_line(line):
"""
Replace comma with dot in timings, e.g. 00:00:10,500 should be 00:00:10.500.
"""
new_line = u""
for token in line.split():
decoded_token = token.encode('utf8', 'ignore')
formatted_token = re.sub(r'(\d{2}:\d{2}:\d{2}),(\d{3})', r'\1.\2', decoded_token)
new_line += unicode(formatted_token.decode('utf8')) + u" "
return new_line
def format_transcript_text(self, text):
"""
Prepare unicode transcripts to be converted to WebVTT format.
"""
new_text = [
self.format_transcript_text_line(line)
for line in text[0].splitlines()
]
new_text = '\n'.join(new_text)
html_parser = HTMLParser.HTMLParser()
unescaped_text = html_parser.unescape(new_text)
if u"WEBVTT" not in text:
text = u"WEBVTT\n\n" + unicode(unescaped_text)
else:
text = unicode(unescaped_text)
return text
def download_default_transcript(self, url, language_code):
"""
Get default transcript fetched from a video platform API and format it to WebVTT-like unicode.
References:
https://wistia.com/doc/data-api#captions_index
https://wistia.com/doc/data-api#captions_show
Arguments:
url (str): API url to fetch a default transcript from.
language_code (str): Language ISO-639-2 code of a default transcript to be downloaded.
Returns:
text (unicode): Text of transcripts.
"""
try:
response = requests.get(url)
json_data = response.json()
return json_data[u'text']
except IOError:
log.exception("Transcript fetching failure: language [{}]".format(language_code))
return u''
except (ValueError, KeyError, TypeError, AttributeError):
log.exception("Can't parse fetched transcript: language [{}]".format(language_code))
return u''
def dispatch(self, request, suffix):
"""
Wistia dispatch method.
"""
pass
| gpl-3.0 | -7,618,465,501,326,326,000 | 36.584967 | 120 | 0.589166 | false |
tbttfox/TwistyTools | ttLib/ttRegion.py | 1 | 6597 | #!/usr/bin/python
from __future__ import division
from ttPoint import TINYNUM,Point,cross,matMul,angle,distance2,cart2uv
from ttPlane import pointDistPlane
from ttTriangle import Triangle
from ttSphere import Sphere
from ttBase import TTBase
from Draw.DrawRegion import DrawRegion
class Region(TTBase):
def __init__(self,arcList,pnList):
"""
Regions are made in the Graph class
>> Region(arcList,pnList,graph)
Where:
arcList is an ordered list of arcs
pnList is an ordered list of arc "directions" (0 means the arc
is convex, 1 means the arc is concave)
baseSide is the list of circles that overlap the current region
"""
super(Region,self).__init__()
self.enum = TTBase.enumType.Region
#these will be reordered
self.arcs = None
self.posNeg = None
self.corners = None
self._center = None
self.allNeg = False
if sum(pnList) == len(pnList):
self.allNeg = True
#here we order the arcs/posNeg/corners
#if there's only two arcs, everything is already in order
if len(arcList) > 2:
arcs = list(arcList[:])
pn = list(pnList[:])
if self.allNeg:
idx = 0
corn = [arcs[idx].end, arcs[idx].start]
else: #we have at least one positive arc
idx = pn.index(0) #grab the first positive arc
corn = [arcs[idx].start, arcs[idx].end]
pnOrd = [pn.pop(idx)] #PosNeg Ordered
arcsOrd = [arcs.pop(idx)] #Arcs Ordered
#print "arcStart ",arcsOrd[0].start
#print "arcEnd ",arcsOrd[0].end
#loop through the list to find if anything begins/ends with the last item on the list
#while corn[0] != corn[-1]: #go 'till last arc connects to the first
for _ in range(len(pn)): # looping the variable "_" just means don't use
found = 0
for i in range(len(arcs)):
if arcs[i].start == corn[-1]:
corn.append(arcs[i].end)
arcsOrd.append(arcs.pop(i))
pnOrd.append(pn.pop(i))
found = 1
break
elif arcs[i].end == corn[-1]:
corn.append(arcs[i].start)
arcsOrd.append(arcs.pop(i))
pnOrd.append(pn.pop(i))
found = 1
break
if found == 1:
continue
else:
print "problem finding a ccycle in region.__init__"
self.posNeg = pnOrd
self.corners = corn[:-1]
self.arcs = arcsOrd
self.parents = []
self.setParents()
@property
def center(self):
if self._center == None:
if len(self.corners) > 0:
vecChain = Point(0,0,0)
for p in self.corners:
vecChain = vecChain + p
#everything is done on the unit sphere
self._center = vecChain.n
return self._center
def __eq__(self,other):
if not isinstance(other,Region):
return False
if len(self.arcs) != len(other.arcs):
return False
for i in range(len(self.arcs)):
if self.arcs == other.arcs[i:] + other.arcs[:i]:
return True
return False
def fanTriangle(self,offset=0):
corn = self.corners #already sorted
if len(corn) < 3:
print "Trying to make a triangle out of < 3 corners"
return None
corn = corn[offset:] + corn[:offset]
tris = []
for i in range(2,len(corn)):
tris.append(Triangle(corn[0], corn[i-1], corn[i]))
return tris
def similar(self,other):
if not isinstance(other,Region):
return False
if len(self.arcs) != len(other.arcs):
return False
if len(self.arcs) == 2:
myd = distance2(self.arcs[0].start, self.arcs[0].end)
yourd = distance2(other.arcs[0].start, other.arcs[0].end)
if myd != yourd:
return False
myx = distance2(self.arcs[1].c, self.arcs[0].c)
yourx = distance2(other.arcs[1].c, other.arcs[0].c)
if myx != yourx:
return False
myrads = sorted([self.arcs[0].rad, self.arcs[1].rad])
yourrads = sorted([other.arcs[0].rad, other.arcs[1].rad])
if -TINYNUM < myrads[0] - yourrads[0] < TINYNUM:
if -TINYNUM < myrads[1] - yourrads[1] < TINYNUM:
return True
return False
myTris = self.fanTriangle()
for i in range(len(self.arcs)):
yourTris = other.fanTriangle(i)
if myTris == yourTris:
return True
return False
def contains(self,pt):
for i,a in enumerate(self.arcs):
d = pointDistPlane(pt,a.circle.toPlane())
if self.posNeg[i] == 0 and d < -TINYNUM:
return False
elif self.posNeg[i] == 1 and d > TINYNUM:
return False
return True
@property
def drawObject(self):
if self._drawObject == None:
self._drawObject = DrawRegion(self)
return self._drawObject
def regionCoreRegion(A,B):
##TODO##
# This function relies on a graph object
# pull this functionality out of the new combined shell object
# so we can use it here without duplicating code
##TODO##
allCircles = [x.circle for x in A.arcs] + [x.circle for x in B.arcs]
allPN = A.posNeg + B.posNeg
tempGraph = Graph(allCircles)
keep = []
ccls = []
mypn = []
#for all the arcs in the overlapping regions
for arc in tempGraph.arcs:
mp = arc.midpoint
if A.contains(mp) and B.contains(mp):
try:
idx = allCircles.index(arc.circle)
except AttributeError:
continue
except:
raise
keep.append(arc)
#if the circle is positive in the region
#it'll be positive in the core
if allPn[idx] == 0:
mypn.append(0)
ccls.append(arc.circle)
else:
mypn.append(1)
return Region(keep,mypn,tempGraph)
| gpl-3.0 | -1,122,696,833,930,972,200 | 32.658163 | 97 | 0.51766 | false |
Fat-Zer/FreeCAD_sf_master | src/Mod/Fem/femexamples/thermomech_spine.py | 9 | 6411 | # ***************************************************************************
# * Copyright (c) 2019 Bernd Hahnebach <[email protected]> *
# * Copyright (c) 2020 Sudhanshu Dubey <[email protected] *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
# to run the example use:
"""
from femexamples.thermomech_spine import setup
setup()
"""
import FreeCAD
import Fem
import ObjectsFem
mesh_name = "Mesh" # needs to be Mesh to work with unit tests
def init_doc(doc=None):
if doc is None:
doc = FreeCAD.newDocument()
return doc
def get_information():
info = {"name": "Thermomech Spine",
"meshtype": "solid",
"meshelement": "Tet10",
"constraints": ["fixed", "initial temperature", "temperature", "heatflux"],
"solvers": ["calculix"],
"material": "solid",
"equation": "thermomechanical"
}
return info
def setup(doc=None, solvertype="ccxtools"):
# setup model
if doc is None:
doc = init_doc()
# geometry object
geom_obj = doc.addObject("Part::Box", "Box")
geom_obj.Height = 25.4
geom_obj.Width = 25.4
geom_obj.Length = 203.2
doc.recompute()
if FreeCAD.GuiUp:
geom_obj.ViewObject.Document.activeView().viewAxonometric()
geom_obj.ViewObject.Document.activeView().fitAll()
# analysis
analysis = ObjectsFem.makeAnalysis(doc, "Analysis")
# solver
if solvertype == "calculix":
solver_object = analysis.addObject(
ObjectsFem.makeSolverCalculix(doc, "SolverCalculiX")
)[0]
elif solvertype == "ccxtools":
solver_object = analysis.addObject(
ObjectsFem.makeSolverCalculixCcxTools(doc, "CalculiXccxTools")
)[0]
solver_object.WorkingDir = u""
# should be possible with elmer too
# elif solvertype == "elmer":
# analysis.addObject(ObjectsFem.makeSolverElmer(doc, "SolverElmer"))
else:
FreeCAD.Console.PrintWarning(
"Not known or not supported solver type: {}. "
"No solver object was created.\n".format(solvertype)
)
if solvertype == "calculix" or solvertype == "ccxtools":
solver_object.SplitInputWriter = False
solver_object.AnalysisType = "thermomech"
solver_object.GeometricalNonlinearity = "linear"
solver_object.ThermoMechSteadyState = True
solver_object.MatrixSolverType = "default"
solver_object.IterationsThermoMechMaximum = 2000
solver_object.IterationsControlParameterTimeUse = True
# material
material_object = analysis.addObject(
ObjectsFem.makeMaterialSolid(doc, "MechanicalMaterial")
)[0]
mat = material_object.Material
mat["Name"] = "Steel-Generic"
mat["YoungsModulus"] = "200000 MPa"
mat["PoissonRatio"] = "0.30"
mat["Density"] = "7900 kg/m^3"
mat["ThermalConductivity"] = "43.27 W/m/K" # SvdW: Change to Ansys model values
mat["ThermalExpansionCoefficient"] = "12 um/m/K"
mat["SpecificHeat"] = "500 J/kg/K" # SvdW: Change to Ansys model values
material_object.Material = mat
# fixed_constraint
fixed_constraint = analysis.addObject(
ObjectsFem.makeConstraintFixed(doc, "FemConstraintFixed")
)[0]
fixed_constraint.References = [(geom_obj, "Face1")]
# initialtemperature_constraint
initialtemperature_constraint = analysis.addObject(
ObjectsFem.makeConstraintInitialTemperature(doc, "FemConstraintInitialTemperature")
)[0]
initialtemperature_constraint.initialTemperature = 300.0
# temperature_constraint
temperature_constraint = analysis.addObject(
ObjectsFem.makeConstraintTemperature(doc, "FemConstraintTemperature")
)[0]
temperature_constraint.References = [(geom_obj, "Face1")]
temperature_constraint.Temperature = 310.93
# heatflux_constraint
heatflux_constraint = analysis.addObject(
ObjectsFem.makeConstraintHeatflux(doc, "FemConstraintHeatflux")
)[0]
heatflux_constraint.References = [
(geom_obj, "Face3"),
(geom_obj, "Face4"),
(geom_obj, "Face5"),
(geom_obj, "Face6")
]
heatflux_constraint.AmbientTemp = 255.3722
heatflux_constraint.FilmCoef = 5.678
# mesh
from .meshes.mesh_thermomech_spine_tetra10 import create_nodes, create_elements
fem_mesh = Fem.FemMesh()
control = create_nodes(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating nodes.\n")
control = create_elements(fem_mesh)
if not control:
FreeCAD.Console.PrintError("Error on creating elements.\n")
femmesh_obj = analysis.addObject(
ObjectsFem.makeMeshGmsh(doc, mesh_name)
)[0]
femmesh_obj.FemMesh = fem_mesh
femmesh_obj.Part = geom_obj
femmesh_obj.SecondOrderLinear = False
doc.recompute()
return doc
| lgpl-2.1 | 5,691,031,743,129,874,000 | 37.160714 | 91 | 0.58696 | false |
roscopecoltran/scraper | .staging/meta-engines/xlinkBook/update/update_mergers_and_acquisitions.py | 1 | 10303 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from spider import *
sys.path.append("..")
from utils import Utils
class MergersAndAcquisitionsSpider(Spider):
def __init__(self):
Spider.__init__(self)
self.school = 'wiki'
def processMergers(self):
utils = Utils()
wiki_dict = {'google' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Google',\
'facebook' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Facebook',\
'microsoft' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Microsoft',\
'apple' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Apple',\
'ibm' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_IBM',\
'yahoo' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Yahoo!',\
'twitter' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Twitter'}
wiki_dict = {'google' : 'http://en.wikipedia.org/wiki/List_of_mergers_and_acquisitions_by_Google'}
for key, url in wiki_dict.items():
r = requests.get(url)
soup = BeautifulSoup(r.text)
table = soup.find('table', class_='wikitable')
#print table
soup = BeautifulSoup(table.prettify())
count = 0
title = ''
desc = 'description:'
file_name = self.get_file_name('economics/mergers-and-acquisitions/' + key, self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
item_id = key + '-merger-'
rows = soup.find_all('tr')
print len(rows)
for td in soup.find_all('td'):
count += 1
if key == 'google':
#if count > 8 or (count == 8 and self.count == len(rows) - 2):
if count == 7:
print title
count = 0
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = ''
print '----------------------------------'
if count != 7:
#if count == 3:
# desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
if count == 1:
desc += 'date:' + td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 2:
title = utils.removeDoubleSpace(td.text.strip())
elif count == 3:
desc += 'business:' + td.text.strip().replace(' and ', ', ') + ' '
elif count == 4:
desc += 'country:' + td.text.strip() + ' '
elif count == 5:
desc += 'price:$' + td.text.strip()[td.text.strip().find('♠') + 1 :].strip() + ' '
elif count == 6:
desc += 'description:' + td.text.strip() + ' '
if key == 'facebook':
if count > 10 or (count == 10 and self.count == len(rows) - 2):
count = 1
print title
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 1 and count != 9 and count != 10:
if count == 2:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 3:
title = utils.removeDoubleSpace(td.text.strip())
elif count == 5 and td.a != None:
desc += td.a.text.strip() + ' '
else:
desc += td.text.strip() + ' '
if key == 'microsoft':
if count > 7 or (count == 7 and self.count == len(rows) - 2):
count = 1
print title
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 1 and count != 7:
if count == 2:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 3:
title = utils.removeDoubleSpace(td.text.strip())
else:
desc += td.text.strip() + ' '
if key == 'apple':
if count > 8 or (count == 8 and self.count == len(rows) - 2):
print title
count = 1
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 1 and count != 7 and count != 8:
if count == 2:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 3:
title = utils.removeDoubleSpace(td.text.strip())
else:
desc += td.text.strip() + ' '
if key == 'ibm':
if count > 6 or (count == 6 and self.count == len(rows) - 2):
print title
count = 1
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 6:
if count == 1:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 2:
title = utils.removeDoubleSpace(td.text.strip())
else:
desc += td.text.strip().replace('\n','') + ' '
if key == 'yahoo':
if count > 8 or (count == 8 and self.count == len(rows) - 2):
count = 1
print title
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 1 and count != 8:
if count == 2:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 3:
title = utils.removeDoubleSpace(td.text.strip())
else:
desc += td.text.strip() + ' '
if key == 'twitter':
if count > 8 or (count == 8 and self.count == len(rows) - 2):
count = 1
print title
self.count += 1
self.write_db(f, item_id + str(self.count), title, '', utils.removeDoubleSpace(desc))
title = ''
desc = 'description:'
print '----------------------------------'
if count != 1 and count != 8:
if count == 2:
desc += td.text.strip()[td.text.strip().find(' ') :].strip() + ' '
elif count == 3:
title = utils.removeDoubleSpace(td.text.strip())
else:
desc += td.text.strip() + ' '
self.close_db(f)
#if file_lines != self.count and self.count > 0:
if True:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def processAll(self):
file_name = self.get_file_name('economics/mergers-and-acquisitions/all', self.school)
file_lines = self.countFileLineNum(file_name)
f = self.open_db(file_name + ".tmp")
self.count = 0
r = requests.get('https://en.wikipedia.org/wiki/Category:Lists_of_corporate_acquisitions')
soup = BeautifulSoup(r.text)
div = soup.find('div', class_='mw-category')
soup = BeautifulSoup(div.prettify())
for a in soup.find_all('a'):
print a.text.strip()
self.count += 1
self.write_db(f, 'all-mergers-' + str(self.count), a.text.strip(), 'https://en.wikipedia.org' + a['href'])
self.close_db(f)
if file_lines != self.count and self.count > 0:
self.do_upgrade_db(file_name)
print "before lines: " + str(file_lines) + " after update: " + str(self.count) + " \n\n"
else:
self.cancel_upgrade(file_name)
print "no need upgrade\n"
def doWork(self):
self.processAll()
self.processMergers()
start = MergersAndAcquisitionsSpider()
start.doWork()
| mit | 4,579,076,035,847,220,000 | 49.743842 | 118 | 0.404038 | false |
RobinQuetin/CAIRIS-web | cairis/cairis/AssetsDialog.py | 1 | 2963 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
import armid
import Asset
from AssetDialog import AssetDialog
from DialogClassParameters import DialogClassParameters
import ARM
import os
import xml.sax
from DimensionBaseDialog import DimensionBaseDialog
class AssetsDialog(DimensionBaseDialog):
def __init__(self,parent):
DimensionBaseDialog.__init__(self,parent,armid.ASSETS_ID,'Assets',(930,300),'asset.png')
self.rmFrame = parent
idList = [armid.ASSETS_ASSETLIST_ID,armid.ASSETS_BUTTONADD_ID,armid.ASSETS_BUTTONDELETE_ID]
columnList = ['Name','Type']
self.buildControls(idList,columnList,self.dbProxy.getAssets,'asset')
listCtrl = self.FindWindowById(armid.ASSETS_ASSETLIST_ID)
listCtrl.SetColumnWidth(0,200)
listCtrl.SetColumnWidth(1,200)
def addObjectRow(self,assetListCtrl,listRow,asset):
assetListCtrl.InsertStringItem(listRow,asset.name())
assetListCtrl.SetStringItem(listRow,1,asset.type())
def onAdd(self,evt):
try:
addParameters = DialogClassParameters(armid.ASSET_ID,'Add asset',AssetDialog,armid.ASSET_BUTTONCOMMIT_ID,self.dbProxy.addAsset,True)
self.addObject(addParameters)
self.rmFrame.updateObjectSelection(self.selectedLabel)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Add asset',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
def onUpdate(self,evt):
selectedObjt = self.objts[self.selectedLabel]
assetId = selectedObjt.id()
try:
updateParameters = DialogClassParameters(armid.ASSET_ID,'Edit asset',AssetDialog,armid.ASSET_BUTTONCOMMIT_ID,self.dbProxy.updateAsset,False)
self.updateObject(selectedObjt,updateParameters)
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Edit asset',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
def onDelete(self,evt):
try:
self.deleteObject('No asset','Delete asset',self.dbProxy.deleteAsset)
self.rmFrame.updateObjectSelection()
except ARM.ARMException,errorText:
dlg = wx.MessageDialog(self,str(errorText),'Delete asset',wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy
| apache-2.0 | 2,933,360,615,195,132,400 | 39.589041 | 146 | 0.746541 | false |
lhirschfeld/JargonBot | custombot.py | 1 | 3061 | import pickle
import praw
import random
from textblob import TextBlob
from datetime import datetime
from sklearn import linear_model
class RedditBot:
"""A class that performs basic operations, working with Reddit's
PRAW API."""
def __init__(self, botName):
# Setup the bot and primary variables.
self.r = praw.Reddit(botName)
self.responses = []
with open('ids.pickle', 'rb') as handle:
try:
self.ids = pickle.load(handle)
except EOFError:
self.ids = []
with open('models.pickle', 'rb') as handle:
try:
self.models = pickle.load(handle)
except EOFError:
self.models = {}
def updateIds(self):
# Save the new ids of comments that have been responded to.
with open('ids.pickle', 'wb') as handle:
pickle.dump(self.ids, handle, protocol=pickle.HIGHEST_PROTOCOL)
def createModel(self, sub, init_fit):
new_model = linear_model.LinearRegression()
new_model.fit(init_fit[0], init_fit[1])
# TODO: Create sub class that stores this data.
self.models[sub] = (new_model, 1, init_fit[0], init_fit[1])
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
def updateModels(self, modelParams):
# Model params is a list of strings which contains the keys in
# each result which should be used to update the model.
# Models is a dictionary with a touple at each key containing:
# (linear regression, randomness rate, x fits, y fits)
currentTime = datetime.now()
oldResponses = [(currentTime - r["time"]).total_seconds() > 3600
for r in self.responses]
self.responses = [(currentTime - r["time"]).total_seconds() < 3600
for r in self.responses]
for r in oldResponses:
result = 0
url = "https://reddit.com/" + r["sID"] + "?comment=" + r["cID"]
submission = self.r.get_submission(url=url)
comment_queue = submission.comments[:]
if comment_queue:
com = comment_queue.pop(0)
result += com.score
comment_queue.extend(com.replies)
while comment_queue:
com = comment_queue.pop(0)
text = TextBlob(com.text)
result += text.sentiment.polarity * com.score
x = []
for key in modelParams:
x.append(r[key])
# Get old fits
x_fits = self.models[r["sub"]][2].append(x)
y_fits = self.models[r["sub"]][3].append(result)
self.models[r["sub"]][0].fit(x_fits, y_fits)
# Update odds of random choice
self.models[r]["sub"][1] *= 0.96
with open('models.pickle', 'wb') as handle:
pickle.dump(self.models, handle, protocol=pickle.HIGHEST_PROTOCOL)
| mit | 3,835,738,549,379,795,000 | 35.440476 | 78 | 0.560928 | false |
toomore/grs | tools/make_twse_list.py | 1 | 3616 | # -*- coding: utf-8 -*-
import csv
import re
import urllib2
from datetime import datetime
NOW = datetime(2013, 12, 17)
SAVEPATH = '../grs/twse_list.csv'
INDUSTRYCODE = '../grs/industry_code.csv'
TWSEURL = 'http://www.twse.com.tw/ch/trading/exchange/MI_INDEX/MI_INDEX2_print.php?genpage=genpage/Report%(year)s%(mon)02d/A112%(year)s%(mon)02d%(day)02d%%s.php&type=csv' % {'year': NOW.year, 'mon': NOW.month, 'day': NOW.day}
TWSECLS = {'0049': u'封閉式基金',
'0099P': u'ETF',
'019919T': u'受益證券',
'0999': u'認購權證', #(不含牛證)
'0999P': u'認售權證', #(不含熊證)
'0999C': u'牛證',
'0999B': u'熊證',
'0999GA': u'附認股權特別股',
'0999GD': u'附認股權公司債',
'0999G9': u'認股權憑證',
'01': u'水泥工業',
'02': u'食品工業',
'03': u'塑膠工業',
'04': u'紡織纖維',
'05': u'電機機械',
'06': u'電器電纜',
'07': u'化學生技醫療',
'21': u'化學工業',
'22': u'生技醫療業',
'08': u'玻璃陶瓷',
'09': u'造紙工業',
'10': u'鋼鐵工業',
'11': u'橡膠工業',
'12': u'汽車工業',
'13': u'電子工業',
'24': u'半導體業',
'25': u'電腦及週邊設備業',
'26': u'光電業',
'27': u'通信網路業',
'28': u'電子零組件業',
'29': u'電子通路業',
'30': u'資訊服務業',
'31': u'其他電子業',
'14': u'建材營造',
'15': u'航運業',
'16': u'觀光事業',
'17': u'金融保險',
'18': u'貿易百貨',
'9299': u'存託憑證',
'23': u'油電燃氣業',
'19': u'綜合',
'20': u'其他',
'CB': u'可轉換公司債',}
#'ALL_1': u'全部'}
def fetch_twse_list():
with open(SAVEPATH, 'w') as files:
csv_file = csv.writer(files)
re_pattern = re.compile(r'(=")?[\d\w]{4,6}(=)?')
re_sub = re.compile(r'[^\w\d]')
for no in TWSECLS:
for i in csv.reader(urllib2.urlopen(TWSEURL % no).readlines()):
if len(i) >= 3 and re_pattern.match(i[0]):
pass
else:
i.pop(0)
if len(i) >= 2 and re_pattern.match(i[0]):
csv_file.writerow([re_sub.sub('', i[0]),
i[1].decode('cp950').encode('utf-8'),
no, TWSECLS[no].encode('utf-8')])
with open(SAVEPATH, 'r') as files:
csv_file = csv.reader(files)
all_items = {}
for i in csv_file:
all_items.update({i[0]: i})
with open(SAVEPATH, 'w') as files:
csv_file = csv.writer(files)
#csv_file.writerow(['文件更新', datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'x', 'x'])
csv_file.writerow(['UPDATE', datetime.now().strftime('%Y/%m/%d'), 'x', 'x'])
csv_file.writerow(['證期會代碼', '公司簡稱', '分類代碼', '分類名稱'])
for i in sorted(all_items):
csv_file.writerow(all_items[i])
def output_industry_code():
with open(INDUSTRYCODE, 'w') as files:
csv_file = csv.writer(files)
for i in sorted(TWSECLS):
csv_file.writerow([i, TWSECLS[i].encode('utf-8')])
if __name__ == '__main__':
fetch_twse_list()
output_industry_code()
| mit | -7,749,647,010,060,471,000 | 31.55102 | 225 | 0.445141 | false |
snakeztc/NeuralDialog-CVAE | config_utils.py | 1 | 1880 | # Copyright (C) 2017 Tiancheng Zhao, Carnegie Mellon University
class KgCVAEConfig(object):
description= None
use_hcf = True # use dialog act in training (if turn off kgCVAE -> CVAE)
update_limit = 3000 # the number of mini-batch before evaluating the model
# how to encode utterance.
# bow: add word embedding together
# rnn: RNN utterance encoder
# bi_rnn: bi_directional RNN utterance encoder
sent_type = "bi_rnn"
# latent variable (gaussian variable)
latent_size = 200 # the dimension of latent variable
full_kl_step = 10000 # how many batch before KL cost weight reaches 1.0
dec_keep_prob = 1.0 # do we use word drop decoder [Bowman el al 2015]
# Network general
cell_type = "gru" # gru or lstm
embed_size = 200 # word embedding size
topic_embed_size = 30 # topic embedding size
da_embed_size = 30 # dialog act embedding size
cxt_cell_size = 600 # context encoder hidden size
sent_cell_size = 300 # utterance encoder hidden size
dec_cell_size = 400 # response decoder hidden size
backward_size = 10 # how many utterance kept in the context window
step_size = 1 # internal usage
max_utt_len = 40 # max number of words in an utterance
num_layer = 1 # number of context RNN layers
# Optimization parameters
op = "adam"
grad_clip = 5.0 # gradient abs max cut
init_w = 0.08 # uniform random from [-init_w, init_w]
batch_size = 30 # mini-batch size
init_lr = 0.001 # initial learning rate
lr_hold = 1 # only used by SGD
lr_decay = 0.6 # only used by SGD
keep_prob = 1.0 # drop out rate
improve_threshold = 0.996 # for early stopping
patient_increase = 2.0 # for early stopping
early_stop = True
max_epoch = 60 # max number of epoch of training
grad_noise = 0.0 # inject gradient noise?
| apache-2.0 | 5,411,400,849,462,010,000 | 34.471698 | 79 | 0.66117 | false |
UMD-DRASTIC/drastic | tests/test_user.py | 1 | 1769 | import unittest
from drastic.models.user import User
from drastic.models.group import Group
from drastic.models.errors import UserConflictError
from nose.tools import raises
class UserTest(unittest.TestCase):
_multiprocess_can_split_ = True
def test_create(self):
user = User.create(username="test", password="password", email="[email protected]", quick=True)
assert user.name == "test"
assert user.email == '[email protected]'
assert user.administrator == False
assert user.active == True
@raises(UserConflictError)
def test_create_fail(self):
User.create(username="test", password="password", email="[email protected]", quick=True)
User.create(username="test", password="password", email="[email protected]", quick=True)
def test_authenticate(self):
user = User.create(username="test_auth", password="password", email="[email protected]", quick=True)
assert user.authenticate("password")
def test_authenticate_fail(self):
user = User.create(username="test_auth_fail", password="password", email="[email protected]", quick=True)
assert not user.authenticate("not the password")
def test_group_membership(self):
user = User.create(username="test_group", password="password", email="[email protected]", groups=[], quick=True)
assert user
group = Group.create(name="test_group_1")
user.update(groups=[group.id])
# Refetch the user
user = User.find("test_group")
assert group.id in user.groups
groups = Group.find_by_ids(user.groups)
assert [g.id for g in groups] == user.groups
users = group.get_users()
assert users[0].id == user.id
| agpl-3.0 | 3,964,483,294,707,895,300 | 36.638298 | 123 | 0.671566 | false |
LionelDupuy/ARCHI_PHEN | ImageJ/DatabaseInput_deprecated.py | 1 | 5558 | import time
from datetime import date
import numpy
from PIL import Image
import zbar
import os,sys
import wx # GUI
# Handle time lapse!
scanner = zbar.ImageScanner()
# configure the reader
scanner.parse_config('enable')
#scanner.set_config(0, zbar.Config.ENABLE, 0)
#scanner.set_config(zbar.Symbol.QRCODE, zbar.Config.ENABLE, 1)
label = ""
# TODO
# Read label better (crop enhance contrast etc...)
# copy files
# record previous file
def readQRCODE(ImageFile):
label = ""
pilImage = Image.open(ImageFile)
width, height = pilImage.size
pilImage = pilImage.crop((int(0.18*width), int(0.2*height),int(0.97*width), int(0.95*height)))
pilImage = pilImage.convert('L')
width, height = pilImage.size
raw = pilImage.tostring()
# wrap image data
image = zbar.Image(width, height, 'Y800', raw)
# scan the image for barcodes
scanner.scan(image)
# extract results
for symbol in image:
label = symbol.data
# clean up
del(image)
return label
class MainWindow(wx.Frame):
def __init__(self, parent, title):
wx.Frame.__init__(self, parent, title=title, size=(400,300))
self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE | wx.TE_READONLY)
self.CreateStatusBar() # A Statusbar in the bottom of the window
# FOlders
self.dest_folder = os.path.dirname(sys.argv[0])
self.root_folder = os.path.dirname(sys.argv[0])
# Setting up the menu.
filemenu= wx.Menu()
# wx.ID_ABOUT and wx.ID_EXIT are standard IDs provided by wxWidgets.
filemenu.Append(1, "&Base Folders"," Set folders")
filemenu.Append(2, "&Run"," scan for files")
filemenu.Append(wx.ID_ABOUT, "&About"," Information about this program")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT,"E&xit"," Terminate the program")
# xxxxxxx
self.Bind(wx.EVT_MENU, self.get_folder, id=1)
self.Bind(wx.EVT_MENU, self.scan_data, id=2)
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&File") # Adding the "filemenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
self.Show(True)
def get_folder(self, id):
dlg = wx.DirDialog(self, "Choose Root Folder:")
if dlg.ShowModal() == wx.ID_OK:
self.root_folder = dlg.GetPath()
dlg.Destroy()
def scan_data(self, id):
#################################################################
# Find all suitable files in the current folder
#################################################################
dir = self.root_folder#os.path.dirname(sys.argv[0])
sys.path.append(dir)
for f in os.listdir(dir):
file, ext = os.path.splitext(f) # Handles no-extension files, etc.
if ext == '.JPG':
base_row = file.split("-")
base = base_row[0]
if len(base_row) == 1:
if os.path.isfile(dir+ "\\"+ file + "-QR-.jpg"):
genotype = readQRCODE(dir+ "\\"+ file + "-QR-.jpg")
# image properties
file_tmp1 = file.split('_')
file_id = file_tmp1[1]
#os.path.getmtime(dir+ "\\"+ file +ext)
# Image identifiers
identifyer = [None,None,None]
if len(genotype) > 5:
text = "Root directory: " + dir + "\n"
text += "File: " + file + "\n"
text += "Genotype: " + genotype
self.control.SetValue(text)
wx.Yield()
identifyer = genotype.split('_')
else:
pilImage = Image.open(dir+ "\\"+ file + "-QR-.jpg")
width, height = pilImage.size
pilImage = pilImage.crop((int(0.18*width), int(0.3*height),int(0.97*width), int(0.92*height)))
width, height = pilImage.size
sc = 0.6
pilImage = pilImage.resize((int(width*sc),int(height*sc)), Image.ANTIALIAS)
img = wx.EmptyImage( *pilImage.size )
pilImageCopy = pilImage.copy()
pilImageCopyRGB = pilImageCopy.convert( 'RGB' ) # Discard any alpha from the PIL image.
pilImageRgbData =pilImageCopyRGB.tostring()
img.SetData( pilImageRgbData )
identifyer_length = 0
while identifyer_length>-1:# !=3:
dlg = wx.TextEntryDialog(self, 'Type "Species Population Id" with space as separation', 'Could not read bar code', '')
dlg.SetValue("")
self.pnl = MyFrame(dlg, -1, "Label not read", size=(int(width*sc),int(height*sc)), pos = (800,100), style = wx.DEFAULT_FRAME_STYLE, pic = img)
self.pnl.Show(True)
if dlg.ShowModal() == wx.ID_OK:
txtvalue = dlg.GetValue() #genotype.split('_')
identifyer = txtvalue.split(' ')
identifyer_length = len(identifyer)
dlg.Destroy()
else:
text = "!!! Could not recover barcode for !!! :\n\n"
text += "Root directory: " + dir + "\n"
text += "File: " + file + "\n"
self.control.SetValue(text)
wx.Yield()
class MyFrame(wx.Frame):
def __init__(
self, parent, ID, title, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE, pic = None
):
wx.Frame.__init__(self, parent, ID, title, pos, size, style)
panel = wx.Panel(self, -1)
wx.StaticBitmap(panel, -1, pic.ConvertToBitmap(), (0, 0))
def OnCloseMe(self, event):
self.Close(True)
def OnCloseWindow(self, event):
self.Destroy()
app = wx.App(False)
frame = MainWindow(None, "Sample editor")
app.MainLoop()
| mit | 4,614,048,488,324,428,000 | 29.412429 | 150 | 0.585103 | false |
tato69/myfirstrepo | day3/makeasentece.py | 1 | 1633 | import random
s_nouns = ["A dude", "My mom", "The king", "Some guy", "A cat with rabies", "A sloth", "Your homie", "This cool guy my gardener met yesterday", "Superman"]
p_nouns = ["These dudes", "Both of my moms", "All the kings of the world", "Some guys", "All of a cattery's cats", "The multitude of sloths living under your bed", "Your homies", "Like, these, like, all these people", "Supermen"]
s_verbs = ["eats", "kicks", "gives", "treats", "meets with", "creates", "hacks", "configures", "spies on", "retards", "meows on", "flees from", "tries to automate", "explodes"]
p_verbs = ["eat", "kick", "give", "treat", "meet with", "create", "hack", "configure", "spy on", "retard", "meow on", "flee from", "try to automate", "explode"]
infinitives = ["to make a pie.", "for no apparent reason.", "because the sky is green.", "for a disease.", "to be able to make toast explode.", "to know more about archeology."]
def sing_sen_maker():
'''Makes a random senctence from the different parts of speech. Uses a SINGULAR subject'''
a = raw_input("Would you like to add a new word?\n")
if a.lower() == "yes":
new_word = raw_input("Please enter a singular noun.\n")
# s_nouns.append(new_word)
while new_word == '':
new_word = raw_input("The string cannot be empty! Please enter a singular noun.\n")
print new_word, random.choice(s_verbs), random.choice(s_nouns).lower() , random.choice(infinitives)
elif a.lower() != "no":
print "only asnwer accepted is 'yes' or 'no'"
sing_sen_maker()
else:
print random.choice(s_nouns), random.choice(s_verbs), random.choice(s_nouns).lower() , random.choice(infinitives)
sing_sen_maker()
| apache-2.0 | -213,020,807,480,288,740 | 67.041667 | 229 | 0.670545 | false |
jut-io/jut-python-tools | jut/cli.py | 1 | 17203 | """
main entry point for jut tools
"""
import argparse
import sys
import traceback
from jut import defaults, config
from jut.commands import configs, jobs, programs, run, upload
from jut.common import error, is_debug_enabled
from jut.exceptions import JutException
def parse_key_value(string):
"""
internally used method to parse 'x=y' strings into a tuple (x,y)
"""
return tuple(string.split('='))
def main():
class JutArgParser(argparse.ArgumentParser):
"""
custom argument parser so we show the full comand line help menu
"""
def error(self, message):
error(message)
self.print_help()
sys.exit(2)
parser = JutArgParser(description='jut - jut command line tools')
commands = parser.add_subparsers(dest='subcommand')
# config commands
config_parser = commands.add_parser('config',
help='configuration management')
config_commands = config_parser.add_subparsers(dest='config_subcommand')
_ = config_commands.add_parser('list',
help='list configurations')
defaults_config = config_commands.add_parser('defaults',
help='change the configuration defaults')
defaults_config.add_argument('-u', '--username',
help='username to use')
defaults_config.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io '
'INTERNAL USE)')
add_config = config_commands.add_parser('add',
help='add another configuration '
'(default when no sub command '
'is provided)')
add_config.add_argument('-u', '--username',
help='username to use')
add_config.add_argument('-p', '--password',
help='password to use')
add_config.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
add_config.add_argument('-d', '--default',
action='store_true',
help='sets this configuration to the default')
add_config.add_argument('-s', '--show-password',
action='store_true',
default=False,
help='shows password as you type it interactively')
rm_config = config_commands.add_parser('rm',
help='remove a configuration')
rm_config.add_argument('-u', '--username',
help='username to use')
rm_config.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
# jobs commands
jobs_parser = commands.add_parser('jobs',
help='jobs management')
jobs_commands = jobs_parser.add_subparsers(dest='jobs_subcommand')
list_jobs = jobs_commands.add_parser('list',
help='list running jobs')
list_jobs.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
list_jobs.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
list_jobs.add_argument('-f', '--format',
default='table',
help='available formats are text, table with '
'default: table')
kill_job = jobs_commands.add_parser('kill',
help='kill running job')
kill_job.add_argument('job_id',
help='specify the job_id to kill')
kill_job.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
kill_job.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
kill_job.add_argument('-y', '--yes',
action='store_true',
default=False,
help='kill without prompting for confirmation')
connect_job = jobs_commands.add_parser('connect',
help='connect to a persistent job')
connect_job.add_argument('job_id',
help='specify the job_id to connect to')
connect_job.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
connect_job.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
connect_job.add_argument('-s', '--show-progress',
action='store_true',
default=False,
help='writes the progress out to stderr on how '
'many points were streamed thus far')
connect_job.add_argument('--retry',
type=int,
default=0,
help='retry running the program N times,'
'default 0. Use -1 to retry forever.')
connect_job.add_argument('--retry-delay',
type=int,
default=10,
help='number of seconds to wait between retries.')
connect_job.add_argument('-f', '--format',
default='json',
help='available formats are json, text, csv with '
'default: json')
# programs commands
programs_parser = commands.add_parser('programs',
help='programs management')
programs_commands = programs_parser.add_subparsers(dest='programs_subcommand')
list_programs = programs_commands.add_parser('list',
help='list programs')
list_programs.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
list_programs.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
list_programs.add_argument('-f', '--format',
default='table',
help='available formats are text, table with '
'default: table')
list_programs.add_argument('--all',
default=False,
help='list all programs, default is to list your'
' own programs')
run_programs = programs_commands.add_parser('run',
help='run a program in your local browser')
run_programs.add_argument('program_name',
help='specify the program name you wish to kick off')
pull_programs = programs_commands.add_parser('pull',
help='pull programs')
pull_programs.add_argument('directory',
help='directory to pull remote programs into')
pull_programs.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
pull_programs.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
pull_programs.add_argument('-p', '--per-user-directory',
action='store_true',
default=False,
help='save the programs per user into a '
'separate directory')
pull_programs.add_argument('--all',
action='store_true',
default=False,
help='pull all programs, default is to list your'
' own programs')
push_programs = programs_commands.add_parser('push',
help='push programs')
push_programs.add_argument('directory',
help='directory to pick up programs to push to '
'the running Jut instance')
push_programs.add_argument('-d', '--deployment',
default=None,
help='specify the deployment name')
push_programs.add_argument('-a', '--app-url',
default=defaults.APP_URL,
help='app url (default: https://app.jut.io INTERNAL USE)')
push_programs.add_argument('--all',
default=False,
help='pull all programs, default is to list your'
' own programs')
# upload commands
upload_parser = commands.add_parser('upload',
help='upload local JSON file(s) to Jut')
if sys.stdin.isatty():
upload_parser.add_argument('source',
help='The name of a JSON file or directory '
'containing JSON files to process')
upload_parser.add_argument('-u', '--url',
help='The URL to POST data points to, if none is '
'specified we will push to the webhook for '
'the default configuration')
upload_parser.add_argument('-d', '--deployment',
dest='deployment',
default=None,
help='specify the deployment name')
upload_parser.add_argument('-s', '--space',
dest='space',
default='default',
help='specify the destination space')
upload_parser.add_argument('--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Just log the data that would have been '
'POSTed to the specified URL.')
upload_parser.add_argument('--batch-size',
dest='batch_size',
default=100,
type=int,
help='Maximum set of data points to send in each '
'POST, default: 100.')
upload_parser.add_argument('--anonymize-fields',
metavar='field_name',
dest='anonymize_fields',
nargs='+',
default=[],
help='space separated field names to anonymize '
'in the data before uploading. Currently '
'we anonymize hashing the field value with '
'md5 hash')
upload_parser.add_argument('--remove-fields',
metavar='field_name',
dest='remove_fields',
nargs='+',
default=[],
help='space separated field names to remove '
'from the data before uploading')
upload_parser.add_argument('--rename-fields',
metavar='field_name=new_field_name',
dest='rename_fields',
type=parse_key_value,
nargs='+',
default=[],
help='space separated field names to rename '
'from the data before uploading.')
# run parser
run_parser = commands.add_parser('run',
help='run juttle program from the import '
'command line')
run_parser.add_argument('juttle',
help='juttle program to execute or the filename '
'of a juttle program.')
run_parser.add_argument('-d', '--deployment',
dest='deployment',
default=None,
help='specify the deployment name')
run_parser.add_argument('-f', '--format',
default='json',
help='available formats are json, text, csv with '
'default: json')
run_parser.add_argument('-n', '--name',
help='give your program a name to appear in the '
'Jobs application')
run_parser.add_argument('-p', '--persist',
action='store_true',
default=False,
help='allow the program containing background '
'outputs to become a persistent job by '
'disconnecting form the running job (ie '
'essentially backgrounding your program)')
run_parser.add_argument('-s', '--show-progress',
action='store_true',
default=False,
help='writes the progress out to stderr on how '
'many points were streamed thus far')
run_parser.add_argument('--retry',
type=int,
default=0,
help='retry running the program N times,'
'default 0. Use -1 to retry forever.')
run_parser.add_argument('--retry-delay',
type=int,
default=10,
help='number of seconds to wait between retries.')
options = parser.parse_args()
try:
if options.subcommand == 'config':
if options.config_subcommand == 'list':
config.show()
elif options.config_subcommand == 'add':
configs.add_configuration(options)
elif options.config_subcommand == 'rm':
configs.rm_configuration(options)
elif options.config_subcommand == 'defaults':
configs.change_defaults(options)
else:
raise Exception('Unexpected config subcommand "%s"' % options.command)
elif options.subcommand == 'jobs':
if options.jobs_subcommand == 'list':
jobs.list(options)
elif options.jobs_subcommand == 'kill':
jobs.kill(options)
elif options.jobs_subcommand == 'connect':
jobs.connect(options)
else:
raise Exception('Unexpected jobs subcommand "%s"' % options.command)
elif options.subcommand == 'programs':
if options.programs_subcommand == 'list':
programs.list(options)
elif options.programs_subcommand == 'pull':
programs.pull(options)
elif options.programs_subcommand == 'push':
programs.push(options)
elif options.programs_subcommand == 'run':
programs.run(options)
else:
raise Exception('Unexpected programs subcommand "%s"' % options.command)
elif options.subcommand == 'upload':
upload.upload_file(options)
elif options.subcommand == 'run':
run.run_juttle(options)
else:
raise Exception('Unexpected jut command "%s"' % options.command)
except JutException as exception:
if is_debug_enabled():
traceback.print_exc()
error(str(exception))
sys.exit(255)
if __name__ == '__main__':
main()
| mit | -4,553,299,491,324,600,000 | 38.821759 | 91 | 0.456374 | false |
catapult-project/catapult | common/py_utils/py_utils/refactor/annotated_symbol/class_definition.py | 3 | 1291 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import symbol
from py_utils.refactor.annotated_symbol import base_symbol
__all__ = [
'Class',
]
class Class(base_symbol.AnnotatedSymbol):
@classmethod
def Annotate(cls, symbol_type, children):
if symbol_type != symbol.stmt:
return None
compound_statement = children[0]
if compound_statement.type != symbol.compound_stmt:
return None
statement = compound_statement.children[0]
if statement.type == symbol.classdef:
return cls(statement.type, statement.children)
elif (statement.type == symbol.decorated and
statement.children[-1].type == symbol.classdef):
return cls(statement.type, statement.children)
else:
return None
@property
def suite(self):
# TODO: Complete.
raise NotImplementedError()
def FindChild(self, snippet_type, **kwargs):
return self.suite.FindChild(snippet_type, **kwargs)
def FindChildren(self, snippet_type):
return self.suite.FindChildren(snippet_type)
def Cut(self, child):
self.suite.Cut(child)
def Paste(self, child):
self.suite.Paste(child)
| bsd-3-clause | -7,862,515,505,693,531,000 | 24.82 | 72 | 0.701007 | false |
mozilla/kuma | kuma/attachments/tests/test_templates.py | 1 | 1910 | import pytest
from pyquery import PyQuery as pq
from kuma.core.urlresolvers import reverse
from kuma.core.utils import to_html
from kuma.wiki.models import Revision
from . import make_test_file
from ..models import Attachment
@pytest.mark.security
def test_xss_file_attachment_title(admin_client, constance_config, root_doc,
wiki_user, editor_client, settings):
constance_config.WIKI_ATTACHMENT_ALLOWED_TYPES = 'text/plain'
# use view to create new attachment
file_for_upload = make_test_file()
files_url = reverse('attachments.edit_attachment',
kwargs={'document_path': root_doc.slug})
title = '"><img src=x onerror=prompt(navigator.userAgent);>'
post_data = {
'title': title,
'description': 'xss',
'comment': 'xss',
'file': file_for_upload,
}
response = admin_client.post(files_url, data=post_data,
HTTP_HOST=settings.WIKI_HOST)
assert response.status_code == 302
# now stick it in/on a document
attachment = Attachment.objects.get(title=title)
content = '<img src="%s" />' % attachment.get_file_url()
root_doc.current_revision = Revision.objects.create(
document=root_doc, creator=wiki_user, content=content)
# view it and verify markup is escaped
response = editor_client.get(root_doc.get_edit_url(),
HTTP_HOST=settings.WIKI_HOST)
assert response.status_code == 200
doc = pq(response.content)
text = doc('.page-attachments-table .attachment-name-cell').text()
assert text == ('%s\nxss' % title)
html = to_html(doc('.page-attachments-table .attachment-name-cell'))
assert '><img src=x onerror=prompt(navigator.userAgent);>' in html
# security bug 1272791
for script in doc('script'):
assert title not in script.text_content()
| mpl-2.0 | 6,381,598,535,668,310,000 | 37.979592 | 79 | 0.646073 | false |
aomelchenko/python_koans | python2/koans/about_classes.py | 1 | 4830 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutClasses(Koan):
class Dog(object):
"Dogs need regular walkies. Never, ever let them drive."
def test_instances_of_classes_can_be_created_adding_parentheses(self):
fido = self.Dog()
self.assertEqual('Dog', type(fido).__name__)
def test_classes_have_docstrings(self):
self.assertMatch("Dogs need regular walkies. Never, ever let them drive.", self.Dog.__doc__)
# ------------------------------------------------------------------
class Dog2(object):
def __init__(self):
self._name = 'Paul'
def set_name(self, a_name):
self._name = a_name
def test_init_method_is_the_constructor(self):
dog = self.Dog2()
self.assertEqual('Paul', dog._name)
def test_private_attributes_are_not_really_private(self):
dog = self.Dog2()
dog.set_name("Fido")
self.assertEqual("Fido", dog._name)
# The _ prefix in _name implies private ownership, but nothing is truly
# private in Python.
def test_you_can_also_access_the_value_out_using_getattr_and_dict(self):
fido = self.Dog2()
fido.set_name("Fido")
self.assertEqual("Fido", getattr(fido, "_name"))
# getattr(), setattr() and delattr() are a way of accessing attributes
# by method rather than through assignment operators
self.assertEqual("Fido", fido.__dict__["_name"])
# Yes, this works here, but don't rely on the __dict__ object! Some
# class implementations use optimization which result in __dict__ not
# showing everything.
# ------------------------------------------------------------------
class Dog3(object):
def __init__(self):
self._name = None
def set_name(self, a_name):
self._name = a_name
def get_name(self):
return self._name
name = property(get_name, set_name)
def test_that_name_can_be_read_as_a_property(self):
fido = self.Dog3()
fido.set_name("Fido")
self.assertEqual("Fido", fido.get_name()) # access as method
self.assertEqual("Fido", fido.name) # access as property
# ------------------------------------------------------------------
class Dog4(object):
def __init__(self):
self._name = None
@property
def name(self):
return self._name
@name.setter
def name(self, a_name):
self._name = a_name
def test_creating_properties_with_decorators_is_slightly_easier(self):
fido = self.Dog4()
fido.name = "Fido"
self.assertEqual("Fido", fido.name)
# ------------------------------------------------------------------
class Dog5(object):
def __init__(self, initial_name):
self._name = initial_name
@property
def name(self):
return self._name
def test_init_provides_initial_values_for_instance_variables(self):
fido = self.Dog5("Fido")
self.assertEqual("Fido", fido.name)
def test_args_must_match_init(self):
self.assertRaises(TypeError, self.Dog5) # Evaluates self.Dog5()
# THINK ABOUT IT:
# Why is this so?
def test_different_objects_have_difference_instance_variables(self):
fido = self.Dog5("Fido")
rover = self.Dog5("Rover")
self.assertEqual(False, rover.name == fido.name)
# ------------------------------------------------------------------
class Dog6(object):
def __init__(self, initial_name):
self._name = initial_name
def get_self(self):
return self
def __str__(self):
return self._name
def __repr__(self):
return "<Dog named '" + self._name + "'>"
def test_inside_a_method_self_refers_to_the_containing_object(self):
fido = self.Dog6("Fido")
self.assertEqual(fido, fido.get_self()) # Not a string!
def test_str_provides_a_string_version_of_the_object(self):
fido = self.Dog6("Fido")
self.assertEqual('Fido', str(fido))
def test_str_is_used_explicitly_in_string_interpolation(self):
fido = self.Dog6("Fido")
self.assertEqual("My dog is Fido", "My dog is " + str(fido))
def test_repr_provides_a_more_complete_string_version(self):
fido = self.Dog6("Fido")
self.assertEqual("<Dog named 'Fido'>", repr(fido))
def test_all_objects_support_str_and_repr(self):
seq = [1, 2, 3]
self.assertEqual('[1, 2, 3]', str(seq))
self.assertEqual("[1, 2, 3]", repr(seq))
self.assertEqual("STRING", str("STRING"))
self.assertEqual("'STRING'", repr("STRING"))
| mit | 4,466,670,656,665,157,600 | 29.764331 | 100 | 0.540373 | false |
FabriceSalvaire/python-project-template | RootModule/Config/Messages.py | 1 | 1373 | ####################################################################################################
#
# @Project@ - @ProjectDescription@.
# Copyright (C) Fabrice Salvaire 2013
#
####################################################################################################
####################################################################################################
about_laptop_control_panel = """
@ProjectDescription@..
"""
####################################################################################################
system_information_message_pattern = """
<h2>RootModule %(babel_version)s</h2>
<h2>Host %(node)s</h2>
<h3>Hardware</h3>
<ul>
<li>Machine: %(machine)s</li>
<li>Architecture: %(architecture)s</li>
<li>CPU: %(cpu)s</li>
<li>Number of cores: %(number_of_cores)u</li>
<li>Memory Size: %(memory_size_mb)u MB</li>
</ul>
<h3>OpenGL</h3>
<ul>
<li>Render: %(gl_renderer)s</li>
<li>Version: %(gl_version)s</li>
<li>Vendor: %(gl_vendor)s</li>
</ul>
<h3>Software Versions</h3>
<ul>
<li>OS: %(os)s %(distribution)s</li>
<li>Python %(python_version)s</li>
<li>Qt %(qt_version)s</li>
<li>PyQt %(pyqt_version)s</li>
</ul>
"""
####################################################################################################
#
# End
#
####################################################################################################
| gpl-3.0 | -953,366,365,162,987,300 | 28.847826 | 100 | 0.353241 | false |
mouthwateringmedia/python-docx | example-makedocument.py | 1 | 3666 | #!/usr/bin/env python2.6
'''
This file makes an docx (Office 2007) file from scratch, showing off most of python-docx's features.
If you need to make documents from scratch, use this file as a basis for your work.
Part of Python's docx module - http://github.com/mikemaccana/python-docx
See LICENSE for licensing information.
'''
from docx import *
if __name__ == '__main__':
# Default set of relationshipships - these are the minimum components of a document
relationships = relationshiplist()
# Make a new document tree - this is the main part of a Word document
document = newdocument()
# This xpath location is where most interesting content lives
docbody = document.xpath('/w:document/w:body', namespaces=nsprefixes)[0]
# Append two headings and a paragraph
docbody.append(heading('''Welcome to Python's docx module''',1) )
docbody.append(heading('Make and edit docx in 200 lines of pure Python',2))
docbody.append(paragraph('The module was created when I was looking for a Python support for MS Word .doc files on PyPI and Stackoverflow. Unfortunately, the only solutions I could find used:'))
# Add a numbered list
for point in ['''COM automation''','''.net or Java''','''Automating OpenOffice or MS Office''']:
docbody.append(paragraph(point,style='ListNumber'))
docbody.append(paragraph('''For those of us who prefer something simpler, I made docx.'''))
docbody.append(heading('Making documents',2))
docbody.append(paragraph('''The docx module has the following features:'''))
# Add some bullets
for point in ['Paragraphs','Bullets','Numbered lists','Multiple levels of headings','Tables','Document Properties']:
docbody.append(paragraph(point,style='ListBullet'))
docbody.append(paragraph('Tables are just lists of lists, like this:'))
# Append a table
docbody.append(table([['A1','A2','A3'],['B1','B2','B3'],['C1','C2','C3']]))
docbody.append(heading('Editing documents',2))
docbody.append(paragraph('Thanks to the awesomeness of the lxml module, we can:'))
for point in ['Search and replace','Extract plain text of document','Add and delete items anywhere within the document']:
docbody.append(paragraph(point,style='ListBullet'))
# Add an image
relationships,picpara = picture(relationships,'image1.png','This is a test description')
docbody.append(picpara)
# Search and replace
print 'Searching for something in a paragraph ...',
if search(docbody, 'the awesomeness'): print 'found it!'
else: print 'nope.'
print 'Searching for something in a heading ...',
if search(docbody, '200 lines'): print 'found it!'
else: print 'nope.'
print 'Replacing ...',
docbody = replace(docbody,'the awesomeness','the goshdarned awesomeness')
print 'done.'
# Add a pagebreak
docbody.append(pagebreak(type='page', orient='portrait'))
docbody.append(heading('Ideas? Questions? Want to contribute?',2))
docbody.append(paragraph('''Email <[email protected]>'''))
# Create our properties, contenttypes, and other support files
coreprops = coreproperties(title='Python docx demo',subject='A practical example of making docx from Python',creator='Mike MacCana',keywords=['python','Office Open XML','Word'])
appprops = appproperties()
contenttypes = contenttypes()
websettings = websettings()
wordrelationships = wordrelationships(relationships)
# Save our document
savedocx(document,coreprops,appprops,contenttypes,websettings,wordrelationships,'Welcome to the Python docx module.docx') | mit | 3,625,672,895,550,942,700 | 45.417722 | 198 | 0.698036 | false |
juju/python-libjuju | juju/provisioner.py | 1 | 11307 | import os
import re
import shlex
import tempfile
import uuid
from subprocess import CalledProcessError
import paramiko
from .client import client
arches = [
[re.compile(r"amd64|x86_64"), "amd64"],
[re.compile(r"i?[3-9]86"), "i386"],
[re.compile(r"(arm$)|(armv.*)"), "armhf"],
[re.compile(r"aarch64"), "arm64"],
[re.compile(r"ppc64|ppc64el|ppc64le"), "ppc64el"],
[re.compile(r"s390x?"), "s390x"],
]
def normalize_arch(rawArch):
"""Normalize the architecture string."""
for arch in arches:
if arch[0].match(rawArch):
return arch[1]
DETECTION_SCRIPT = """#!/bin/bash
set -e
os_id=$(grep '^ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
if [ "$os_id" = 'centos' ]; then
os_version=$(grep '^VERSION_ID=' /etc/os-release | tr -d '"' | cut -d= -f2)
echo "centos$os_version"
else
lsb_release -cs
fi
uname -m
grep MemTotal /proc/meminfo
cat /proc/cpuinfo
"""
INITIALIZE_UBUNTU_SCRIPT = """set -e
(id ubuntu &> /dev/null) || useradd -m ubuntu -s /bin/bash
umask 0077
temp=$(mktemp)
echo 'ubuntu ALL=(ALL) NOPASSWD:ALL' > $temp
install -m 0440 $temp /etc/sudoers.d/90-juju-ubuntu
rm $temp
su ubuntu -c 'install -D -m 0600 /dev/null ~/.ssh/authorized_keys'
export authorized_keys="{}"
if [ ! -z "$authorized_keys" ]; then
su ubuntu -c 'echo $authorized_keys >> ~/.ssh/authorized_keys'
fi
"""
class SSHProvisioner:
"""Provision a manually created machine via SSH."""
user = ""
host = ""
private_key_path = ""
def __init__(self, user, host, private_key_path):
self.host = host
self.user = user
self.private_key_path = private_key_path
def _get_ssh_client(self, host, user, key):
"""Return a connected Paramiko ssh object.
:param str host: The host to connect to.
:param str user: The user to connect as.
:param str key: The private key to authenticate with.
:return: object: A paramiko.SSHClient
:raises: :class:`paramiko.ssh_exception.SSHException` if the
connection failed
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pkey = None
# Read the private key into a paramiko.RSAKey
if os.path.exists(key):
with open(key, 'r') as f:
pkey = paramiko.RSAKey.from_private_key(f)
#######################################################################
# There is a bug in some versions of OpenSSH 4.3 (CentOS/RHEL5) where #
# the server may not send the SSH_MSG_USERAUTH_BANNER message except #
# when responding to an auth_none request. For example, paramiko will #
# attempt to use password authentication when a password is set, but #
# the server could deny that, instead requesting keyboard-interactive.#
# The hack to workaround this is to attempt a reconnect, which will #
# receive the right banner, and authentication can proceed. See the #
# following for more info: #
# https://github.com/paramiko/paramiko/issues/432 #
# https://github.com/paramiko/paramiko/pull/438 #
#######################################################################
try:
ssh.connect(host, port=22, username=user, pkey=pkey)
except paramiko.ssh_exception.SSHException as e:
if 'Error reading SSH protocol banner' == str(e):
# Once more, with feeling
ssh.connect(host, port=22, username=user, pkey=pkey)
else:
# Reraise the original exception
raise e
return ssh
def _run_command(self, ssh, cmd, pty=True):
"""Run a command remotely via SSH.
:param object ssh: The SSHClient
:param str cmd: The command to execute
:param list cmd: The `shlex.split` command to execute
:param bool pty: Whether to allocate a pty
:return: tuple: The stdout and stderr of the command execution
:raises: :class:`CalledProcessError` if the command fails
"""
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if type(cmd) is not list:
cmd = [cmd]
cmds = ' '.join(cmd)
stdin, stdout, stderr = ssh.exec_command(cmds, get_pty=pty)
retcode = stdout.channel.recv_exit_status()
if retcode > 0:
output = stderr.read().strip()
raise CalledProcessError(returncode=retcode, cmd=cmd,
output=output)
return (
stdout.read().decode('utf-8').strip(),
stderr.read().decode('utf-8').strip()
)
def _init_ubuntu_user(self):
"""Initialize the ubuntu user.
:return: bool: If the initialization was successful
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the authentication fails
"""
ssh = None
try:
# Run w/o allocating a pty, so we fail if sudo prompts for a passwd
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path,
)
stdout, stderr = self._run_command(ssh, "sudo -n true", pty=False)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
if ssh:
ssh.close()
# Infer the public key
public_key = None
public_key_path = "{}.pub".format(self.private_key_path)
if not os.path.exists(public_key_path):
raise FileNotFoundError(
"Public key '{}' doesn't exist.".format(public_key_path)
)
with open(public_key_path, "r") as f:
public_key = f.readline()
script = INITIALIZE_UBUNTU_SCRIPT.format(public_key)
try:
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path,
)
self._run_command(
ssh,
["sudo", "/bin/bash -c " + shlex.quote(script)],
pty=True
)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
ssh.close()
return True
def _detect_hardware_and_os(self, ssh):
"""Detect the target hardware capabilities and OS series.
:param object ssh: The SSHClient
:return: str: A raw string containing OS and hardware information.
"""
info = {
'series': '',
'arch': '',
'cpu-cores': '',
'mem': '',
}
stdout, stderr = self._run_command(
ssh,
["sudo", "/bin/bash -c " + shlex.quote(DETECTION_SCRIPT)],
pty=True,
)
lines = stdout.split("\n")
info['series'] = lines[0].strip()
info['arch'] = normalize_arch(lines[1].strip())
memKb = re.split(r'\s+', lines[2])[1]
# Convert megabytes -> kilobytes
info['mem'] = round(int(memKb) / 1024)
# Detect available CPUs
recorded = {}
for line in lines[3:]:
physical_id = ""
print(line)
if line.find("physical id") == 0:
physical_id = line.split(":")[1].strip()
elif line.find("cpu cores") == 0:
cores = line.split(":")[1].strip()
if physical_id not in recorded.keys():
info['cpu-cores'] += cores
recorded[physical_id] = True
return info
def provision_machine(self):
"""Perform the initial provisioning of the target machine.
:return: bool: The client.AddMachineParams
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the upload fails
"""
params = client.AddMachineParams()
if self._init_ubuntu_user():
try:
ssh = self._get_ssh_client(
self.host,
self.user,
self.private_key_path
)
hw = self._detect_hardware_and_os(ssh)
params.series = hw['series']
params.instance_id = "manual:{}".format(self.host)
params.nonce = "manual:{}:{}".format(
self.host,
str(uuid.uuid4()), # a nop for Juju w/manual machines
)
params.hardware_characteristics = {
'arch': hw['arch'],
'mem': int(hw['mem']),
'cpu-cores': int(hw['cpu-cores']),
}
params.addresses = [{
'value': self.host,
'type': 'ipv4',
'scope': 'public',
}]
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
ssh.close()
return params
async def install_agent(self, connection, nonce, machine_id):
"""
:param object connection: Connection to Juju API
:param str nonce: The nonce machine specification
:param str machine_id: The id assigned to the machine
:return: bool: If the initialization was successful
"""
# The path where the Juju agent should be installed.
data_dir = "/var/lib/juju"
# Disabling this prevents `apt-get update` from running initially, so
# charms will fail to deploy
disable_package_commands = False
client_facade = client.ClientFacade.from_connection(connection)
results = await client_facade.ProvisioningScript(
data_dir=data_dir,
disable_package_commands=disable_package_commands,
machine_id=machine_id,
nonce=nonce,
)
self._run_configure_script(results.script)
def _run_configure_script(self, script):
"""Run the script to install the Juju agent on the target machine.
:param str script: The script returned by the ProvisioningScript API
:raises: :class:`paramiko.ssh_exception.AuthenticationException`
if the upload fails
"""
_, tmpFile = tempfile.mkstemp()
with open(tmpFile, 'w') as f:
f.write(script)
try:
# get ssh client
ssh = self._get_ssh_client(
self.host,
"ubuntu",
self.private_key_path,
)
# copy the local copy of the script to the remote machine
sftp = paramiko.SFTPClient.from_transport(ssh.get_transport())
sftp.put(
tmpFile,
tmpFile,
)
# run the provisioning script
stdout, stderr = self._run_command(
ssh,
"sudo /bin/bash {}".format(tmpFile),
)
except paramiko.ssh_exception.AuthenticationException as e:
raise e
finally:
os.remove(tmpFile)
ssh.close()
| apache-2.0 | -8,809,007,503,847,249,000 | 30.761236 | 79 | 0.533386 | false |
cmarzullo/saltscaffold | setup.py | 1 | 1417 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found, could not convert Markdown to RST")
read_md = lambda f: open(f, 'r').read()
config = {
'description': 'sets up files and directories for a new salt formula',
'long_description': read_md('README.md'),
'author': 'Christopher Marzullo',
'url': 'https://github.com/cmarzullo/saltsaffold',
'author_email': '[email protected]',
'version': '3.0.5',
'install_requires': ['nose','mako'],
'packages': ['saltscaffold'],
'package_data': {
'': [
'skel/*.md',
'skel/*.txt',
'skel/*.sls',
'skel/.gitignore',
'skel/.kitchen.yml',
'skel/.kitchen-ci.yml',
'skel/Gemfile',
'skel/formula/*.sls',
'skel/formula/map.jinja',
'skel/formula/defaults.yml',
'skel/formula/files/config.conf',
'skel/test/integration/default/serverspec/_spec.rb',
'skel/test/mockup/*.sls'
]
},
'scripts': [],
'name': 'Saltscaffold',
'entry_points': {
'console_scripts': [
'saltscaffold = saltscaffold.__main__:main',
]
}
}
setup(**config)
| gpl-3.0 | -3,921,111,098,714,544,600 | 28.520833 | 82 | 0.547636 | false |
sanguinariojoe/FreeCAD | src/Mod/Arch/importWebGL.py | 9 | 44100 | #***************************************************************************
#* Copyright (c) 2013 Yorik van Havre <[email protected]> *
#* Copyright (c) 2020 Travis Apple <[email protected]> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
#
# REFS:
# https://github.com/mrdoob/three.js/blob/master/examples/webgl_interactive_buffergeometry.html
# https://threejs.org/examples/#webgl_buffergeometry_lines
# https://forum.freecadweb.org/viewtopic.php?t=51245
# https://forum.freecadweb.org/viewtopic.php?t=29487
# https://threejs.org/examples/#webgl_raycast_sprite
#
# Params for export()
# 'colors' is of the form: {'Body': [1,0,0], 'Body001': [1,1,0], 'Body002': [1,0,1] }
# 'camera' is of the form: "PerspectiveCamera {\n viewportMapping ADJUST_CAMERA\n position 30.242626 -51.772324 85.63475\n orientation -0.4146691 0.088459305 -0.90566254 4.7065201\nnearDistance 53.126431\n farDistance 123.09125\n aspectRatio 1\n focalDistance 104.53851\n heightAngle 0.78539819\n\n}"
# The 'camera' string for the active document may be generated from: import OfflineRenderingUtils; OfflineRenderingUtils.getCamera(FreeCAD.ActiveDocument.FileName);
#
# Development reload oneliner:
# def re(): from importlib import reload;import importWebGL;reload(importWebGL);o=FreeCAD.getDocument("YourDocName");importWebGL.export([o.getObject("YourBodyName")],u"C:/path/to/your/file.htm");
"""FreeCAD WebGL Exporter"""
import FreeCAD,Mesh,Draft,Part,OfflineRenderingUtils,json,six
import textwrap
if FreeCAD.GuiUp:
import FreeCADGui
from DraftTools import translate
else:
FreeCADGui = None
def translate(ctxt, txt): return txt
if open.__module__ in ['__builtin__','io']: pythonopen = open
## @package importWebGL
# \ingroup ARCH
# \brief FreeCAD WebGL Exporter
#
# This module provides tools to export HTML files containing the
# exported objects in WebGL format and a simple three.js-based viewer.
disableCompression = False # Compress object data before sending to JS
base = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!#$%&()*+-:;/=>?@[]^_,.{|}~`' # safe str chars for js in all cases
baseFloat = ',.-0123456789'
def getHTMLTemplate():
return textwrap.dedent("""\
<!DOCTYPE html>
<html lang="en">
<head>
<title>$pagetitle</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, user-scalable=no, minimum-scale=1.0, maximum-scale=1.0">
<meta name="generator" content="FreeCAD $version">
<style>
* {
margin: 0;
padding: 0;
}
body {
background: #ffffff; /* Old browsers */
background: -moz-linear-gradient(top, #e3e9fc 0%, #ffffff 70%, #e2dab3 100%); /* FF3.6-15 */
background: -webkit-linear-gradient(top, #e3e9fc 0%,#ffffff 70%,#e2dab3 100%); /* Chrome10-25, Safari5.1-6 */
background: linear-gradient(to bottom, #e3e9fc 0%,#ffffff 70%,#e2dab3 100%); /* W3C, IE10+, FF16+, Chrome26+, Opera12+, Safari7+ */
width: 100vw;
height: 100vh;
}
canvas { display: block; }
#mainCanvas {
width: 100%;
height: 100%;
}
#arrowCanvas {
position: absolute;
left: 0px;
bottom: 0px;
width: 150px;
height: 150px;
z-index: 100;
}
select { width: 170px; }
</style>
</head>
<body>
<canvas id="mainCanvas"></canvas>
<canvas id="arrowCanvas"></canvas>
<script type="module">
// Direct from mrdoob: https://www.jsdelivr.com/package/npm/three
import * as THREE from 'https://cdn.jsdelivr.net/npm/[email protected]/build/three.module.js';
import { OrbitControls } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/controls/OrbitControls.js';
import { GUI } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/libs/dat.gui.module.js';
import { Line2 } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/Line2.js';
import { LineMaterial } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/LineMaterial.js';
import { LineGeometry } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/lines/LineGeometry.js';
import { EdgeSplitModifier } from 'https://cdn.jsdelivr.net/npm/[email protected]/examples/jsm/modifiers/EdgeSplitModifier.js';
const data = $data;
// Z is up for FreeCAD
THREE.Object3D.DefaultUp = new THREE.Vector3(0, 0, 1);
const defaultWireColor = new THREE.Color('rgb(0,0,0)');
const defaultWireLineWidth = 2; // in pixels
const raycasterObj = []; // list of obj that can mouseover highlight
const canvas = document.querySelector('#mainCanvas');
const scene = new THREE.Scene();
const renderer = new THREE.WebGLRenderer({
alpha: true,
antialias: true,
canvas: canvas
}); // Clear bg so we can set it with css
renderer.setClearColor(0x000000, 0);
let renderRequested = false;
// HemisphereLight gives different colors of light from the top
// and bottom simulating reflected light from the 'ground' and
// 'sky'
scene.add(new THREE.HemisphereLight(0xC7E8FF, 0xFFE3B3, 0.4));
const dLight1 = new THREE.DirectionalLight(0xffffff, 0.4);
dLight1.position.set(5, -2, 3);
scene.add(dLight1);
const dLight2 = new THREE.DirectionalLight(0xffffff, 0.4);
dLight2.position.set(-5, 2, 3);
scene.add(dLight2);
if (data.compressed) {
const base = data.base;
const baseFloat = data.baseFloat;
function baseDecode(input) {
const baseCt = base.length;
const output = [];
const len = parseInt(input[0]); // num chars of each element
for (let i = 1; i < input.length; i += len) {
const str = input.substring(i, i + len).trim();
let val = 0;
for (let s = 0; s < str.length; s++) {
const ind = base.indexOf(str[s]);
val += ind * Math.pow(baseCt, s);
}
output.push(val);
}
return output;
}
function floatDecode(input) {
const baseCt = base.length;
const baseFloatCt = baseFloat.length;
let numString = '';
for (let i = 0; i < input.length; i += 4) {
const b90chunk = input.substring(i, i + 4).trim();
let quotient = 0;
for (let s = 0; s < b90chunk.length; s++) {
const ind = base.indexOf(b90chunk[s]);
quotient += ind * Math.pow(baseCt, s);
}
let buffer = '';
for (let s = 0; s < 7; s++) {
buffer = baseFloat[quotient % baseFloatCt] + buffer;
quotient = parseInt(quotient / baseFloatCt);
}
numString += buffer;
}
let trailingCommas = 0;
for (let s = 1; s < 7; s++) {
if (numString[numString.length - s] == baseFloat[0]) {
trailingCommas++;
}
}
numString = numString.substring(0, numString.length - trailingCommas);
return numString;
}
// Decode from base90 and distribute the floats
for (const obj of data.objects) {
obj.floats = JSON.parse('[' + floatDecode(obj.floats) + ']');
obj.verts = baseDecode(obj.verts).map(x => obj.floats[x]);
obj.facets = baseDecode(obj.facets);
obj.wires = obj.wires.map(w => baseDecode(w).map(x => obj.floats[x]));
obj.facesToFacets = obj.facesToFacets.map(x => baseDecode(x));
}
}
// Get bounds for global clipping
const globalMaxMin = [{min: null, max: null},
{min: null, max: null},
{min: null, max: null}];
for (const obj of data.objects) {
for (let v = 0; v < obj.verts.length; v++) {
if (globalMaxMin[v % 3] === null
|| obj.verts[v] < globalMaxMin[v % 3].min) {
globalMaxMin[v % 3].min = obj.verts[v];
}
if (globalMaxMin[v % 3] === null
|| obj.verts[v] > globalMaxMin[v % 3].max) {
globalMaxMin[v % 3].max = obj.verts[v];
}
}
}
let bigrange = 0;
// add a little extra
for (const i of globalMaxMin) {
const range = i.max - i.min;
if (range > bigrange) {
bigrange = range;
}
i.min -= range * 0.01;
i.max += range * 0.01;
}
const camCenter = new THREE.Vector3(
0.5 * (globalMaxMin[0].max - globalMaxMin[0].min) + globalMaxMin[0].min,
0.5 * (globalMaxMin[1].max - globalMaxMin[1].min) + globalMaxMin[1].min,
0.5 * (globalMaxMin[2].max - globalMaxMin[2].min) + globalMaxMin[2].min );
const viewSize = 1.5 * bigrange; // make the view area a little bigger than the object
const aspectRatio = canvas.clientWidth / canvas.clientHeight;
const originalAspect = aspectRatio;
function initCam(camera) {
// XXX this needs to treat the perspective and orthographic
// cameras differently
camera.position.set(
data.camera.position_x,
data.camera.position_y,
data.camera.position_z);
camera.lookAt(camCenter);
camera.updateMatrixWorld();
}
let cameraType = data.camera.type;
const persCamera = new THREE.PerspectiveCamera(
50, aspectRatio, 1, 100000);
initCam(persCamera);
const orthCamera = new THREE.OrthographicCamera(
-aspectRatio * viewSize / 2, aspectRatio * viewSize / 2,
viewSize / 2, -viewSize / 2, -100000, 100000);
initCam(orthCamera);
function assignMesh(positions, color, opacity, faces) {
const baseGeometry = new THREE.BufferGeometry();
baseGeometry.setAttribute('position', new THREE.BufferAttribute(
positions, 3));
// EdgeSplitModifier is used to combine verts so that smoothing normals can be generated WITHOUT removing the hard edges of the design
// REF: https://threejs.org/examples/?q=edge#webgl_modifier_edgesplit - https://github.com/mrdoob/three.js/pull/20535
const edgeSplit = new EdgeSplitModifier();
const cutOffAngle = 20;
const geometry = edgeSplit.modify(
baseGeometry, cutOffAngle * Math.PI / 180);
geometry.computeVertexNormals();
geometry.computeBoundingSphere();
const material = new THREE.MeshLambertMaterial({
color: color,
side: THREE.DoubleSide,
vertexColors: false,
flatShading: false,
opacity: opacity,
transparent: opacity != 1.0,
fog: false
});
const meshobj = new THREE.Mesh(geometry, material);
meshobj.name = meshobj.uuid;
faces.push(meshobj.uuid);
scene.add(meshobj);
raycasterObj.push(meshobj);
}
const objects = [];
for (const obj of data.objects) {
// Each face gets its own material because they each can
// have different colors
const faces = [];
if (obj.facesToFacets.length > 0) {
for (let f=0; f < obj.facesToFacets.length; f++) {
const facecolor = obj.faceColors.length > 0 ? obj.faceColors[f] : obj.color;
const positions = new Float32Array(obj.facesToFacets[f].length * 9);
for (let a=0; a < obj.facesToFacets[f].length; a++) {
for (let b=0; b < 3; b++) {
for (let c=0; c < 3; c++) {
positions[9 * a + 3 * b + c] = obj.verts[3 * obj.facets[3 * obj.facesToFacets[f][a] + b ] + c ];
}
}
}
assignMesh(positions, facecolor, obj.opacity, faces);
}
} else {
// No facesToFacets means that there was a tessellate()
// mismatch inside FreeCAD. Use all facets in object to
// create this mesh
const positions = new Float32Array(obj.facets.length * 3);
for (let a=0; a < obj.facets.length; a++) {
for (let b=0; b < 3; b++) {
positions[3 * a + b] = obj.verts[3 * obj.facets[a] + b];
}
}
assignMesh(positions, obj.color, obj.opacity, faces);
}
// Wires
// cannot have lines in WebGL that are wider than 1px due to browser limitations so Line2 workaround lib is used
// REF: https://threejs.org/examples/?q=fat#webgl_lines_fat - https://jsfiddle.net/brLk6aud/1/
// This material is shared by all wires in this object
const wirematerial = new LineMaterial( {
color: defaultWireColor,
linewidth: defaultWireLineWidth,
dashed: false, dashSize: 1, gapSize: 1, dashScale: 3
} );
wirematerial.resolution.set(
canvas.clientWidth * window.devicePixelRatio,
canvas.clientHeight * window.devicePixelRatio);
const wires = [];
for (const w of obj.wires) {
const wiregeometry = new LineGeometry();
wiregeometry.setPositions(w);
const wire = new Line2(wiregeometry, wirematerial);
wire.computeLineDistances();
wire.scale.set(1, 1, 1);
wire.name = wire.uuid;
scene.add(wire);
wires.push(wire.name);
}
objects.push({
data: obj,
faces: faces,
wires: wires,
wirematerial: wirematerial
});
}
// ---- GUI Init ----
const gui = new GUI({ width: 300 });
const guiparams = {
wiretype: 'Normal',
wirewidth: defaultWireLineWidth,
wirecolor: '#' + defaultWireColor.getHexString(),
clippingx: 100,
clippingy: 100,
clippingz: 100,
cameraType: cameraType,
navright: function() { navChange([1, 0, 0]); },
navtop: function() { navChange([0, 0, 1]); },
navfront: function() { navChange([0, -1, 0]); }
};
// ---- Wires ----
const wiretypes = { Normal: 'Normal', Dashed: 'Dashed', None: 'None' };
const wireFolder = gui.addFolder('Wire');
wireFolder.add(guiparams, 'wiretype', wiretypes).name('Wire Display').onChange(wireChange);
wireFolder.add(guiparams, 'wirewidth').min(1).max(5).step(1).name('Wire Width').onChange(wireChange);
wireFolder.addColor(guiparams, 'wirecolor').name('Wire Color').onChange(wireChange);
function wireChange() {
for (const obj of objects) {
const m = obj.wirematerial;
if (m.dashed) {
if (guiparams.wiretype != 'Dashed') {
m.dashed = false;
delete m.defines.USE_DASH;
}
} else {
if (guiparams.wiretype == 'Dashed') {
m.dashed = true;
// Dashed lines require this as of r122. delete if not dashed
m.defines.USE_DASH = ""; // https://discourse.threejs.org/t/dashed-line2-material/10825
}
}
if (guiparams.wiretype == 'None') {
m.visible = false;
} else {
m.visible = true;
}
m.linewidth = guiparams.wirewidth;
m.color = new THREE.Color(guiparams.wirecolor);
m.needsUpdate = true;
}
requestRender();
}
wireChange();
// ---- Clipping ----
const clippingFolder = gui.addFolder('Clipping');
clippingFolder.add(guiparams, 'clippingx').min(0).max(100).step(1).name('X-Axis Clipping').onChange(clippingChange);
clippingFolder.add(guiparams, 'clippingy').min(0).max(100).step(1).name('Y-Axis Clipping').onChange(clippingChange);
clippingFolder.add(guiparams, 'clippingz').min(0).max(100).step(1).name('Z-Axis Clipping').onChange(clippingChange);
const clipPlaneX = new THREE.Plane(new THREE.Vector3( -1, 0, 0 ), 0);
const clipPlaneY = new THREE.Plane(new THREE.Vector3( 0, -1, 0 ), 0);
const clipPlaneZ = new THREE.Plane(new THREE.Vector3( 0, 0, -1 ), 0);
function clippingChange() {
if (guiparams.clippingx < 100 || guiparams.clippingy < 100 || guiparams.clippingz < 100) {
if (renderer.clippingPlanes.length == 0) {
renderer.clippingPlanes.push(clipPlaneX, clipPlaneY, clipPlaneZ);
}
}
clipPlaneX.constant = (globalMaxMin[0].max - globalMaxMin[0].min) * guiparams.clippingx / 100.0 + globalMaxMin[0].min;
clipPlaneY.constant = (globalMaxMin[1].max - globalMaxMin[1].min) * guiparams.clippingy / 100.0 + globalMaxMin[1].min;
clipPlaneZ.constant = (globalMaxMin[2].max - globalMaxMin[2].min) * guiparams.clippingz / 100.0 + globalMaxMin[2].min;
requestRender();
}
// ---- Camera & Navigation ----
const camFolder = gui.addFolder('Camera');
const cameraTypes = { Perspective: 'Perspective', Orthographic: 'Orthographic' };
camFolder.add(guiparams, 'cameraType', cameraTypes).name('Camera type').onChange(cameraChange);
camFolder.add(guiparams, 'navright').name('View Right');
camFolder.add(guiparams, 'navtop').name('View Top');
camFolder.add(guiparams, 'navfront').name('View Front');
function navChange(v) {
const t = new THREE.Vector3();
new THREE.Box3().setFromObject(scene).getSize(t);
persControls.object.position.set(
v[0] * t.x * 2 + camCenter.x,
v[1] * t.y * 2 + camCenter.y,
v[2] * t.z * 2 + camCenter.z);
persControls.target = camCenter;
persControls.update();
orthControls.object.position.set(
v[0] * t.x + camCenter.x,
v[1] * t.y + camCenter.y,
v[2] * t.z + camCenter.z);
orthControls.target = camCenter;
orthControls.update();
// controls.update() implicitly calls requestRender()
}
function cameraChange(v) {
cameraType = v;
requestRender();
}
const guiObjects = gui.addFolder('Objects');
for (const obj of objects) {
// Ignore objects with no vertices
if (obj.data.verts.length > 0) {
const guiObjData = {
obj: obj, color: obj.data.color, opacity: obj.data.opacity };
const guiObject = guiObjects.addFolder(obj.data.name);
guiObject.addColor(guiObjData, 'color').name('Color').onChange(GUIObjectChange);
guiObject.add(guiObjData, 'opacity').min(0.0).max(1.0).step(0.05).name('Opacity').onChange(GUIObjectChange);
}
}
function GUIObjectChange(v) {
for (const f of this.object.obj.faces) {
const m = scene.getObjectByName(f).material;
if (this.property == 'color') {
m.color.setStyle(v);
}
if (this.property == 'opacity') {
m.opacity = v;
m.transparent = (v != 1.0);
}
}
if (this.property == 'opacity') {
const m = this.object.obj.wirematerial;
m.opacity = v;
m.transparent = (v != 1.0);
}
requestRender();
}
// Make simple orientation arrows and box - REF: http://jsfiddle.net/b97zd1a3/16/
const arrowCanvas = document.querySelector('#arrowCanvas');
const arrowRenderer = new THREE.WebGLRenderer({
alpha: true,
canvas: arrowCanvas
}); // clear
arrowRenderer.setClearColor(0x000000, 0);
arrowRenderer.setSize(arrowCanvas.clientWidth * window.devicePixelRatio,
arrowCanvas.clientHeight * window.devicePixelRatio,
false);
const arrowScene = new THREE.Scene();
const arrowCamera = new THREE.PerspectiveCamera(
50, arrowCanvas.clientWidth / arrowCanvas.clientHeight, 1, 500 );
arrowCamera.up = persCamera.up; // important!
const arrowPos = new THREE.Vector3(0, 0, 0);
arrowScene.add(new THREE.ArrowHelper(
new THREE.Vector3(1, 0, 0), arrowPos, 60, 0x7F2020, 20, 10));
arrowScene.add(new THREE.ArrowHelper(
new THREE.Vector3(0, 1, 0), arrowPos, 60, 0x207F20, 20, 10));
arrowScene.add(new THREE.ArrowHelper(
new THREE.Vector3(0, 0, 1), arrowPos, 60, 0x20207F, 20, 10));
arrowScene.add(new THREE.Mesh(
new THREE.BoxGeometry(40, 40, 40),
new THREE.MeshLambertMaterial(
{ color: 0xaaaaaa, flatShading: false })
));
arrowScene.add(new THREE.HemisphereLight(0xC7E8FF, 0xFFE3B3, 1.2));
// Controls
const persControls = new OrbitControls(persCamera, renderer.domElement);
persControls.target = camCenter; // rotate around center of parts
// persControls.enablePan = false;
// persControls.enableDamping = true;
persControls.update();
const orthControls = new OrbitControls(orthCamera, renderer.domElement);
orthControls.target = camCenter; // rotate around center of parts
// orthControls.enablePan = false;
// orthControls.enableDamping = true;
orthControls.update();
function render() {
renderRequested = false;
persControls.update();
if (cameraType == 'Perspective') {
arrowCamera.position.copy(persCamera.position);
arrowCamera.position.sub(persControls.target);
}
orthControls.update();
if (cameraType == 'Orthographic') {
arrowCamera.position.copy(orthCamera.position);
arrowCamera.position.sub(orthControls.target);
}
arrowCamera.lookAt(arrowScene.position);
arrowCamera.position.setLength(200);
if (cameraType == 'Perspective') {
renderer.render(scene, persCamera);
}
if (cameraType == 'Orthographic') {
renderer.render(scene, orthCamera);
}
arrowRenderer.render(arrowScene, arrowCamera);
};
function requestRender() {
if (!renderRequested) {
renderRequested = true;
requestAnimationFrame(render);
}
}
persControls.addEventListener('change', requestRender);
orthControls.addEventListener('change', requestRender);
renderer.domElement.addEventListener('mousemove', onMouseMove);
window.addEventListener('resize', onMainCanvasResize, false);
onMainCanvasResize();
requestRender();
function onMainCanvasResize() {
const pixelRatio = window.devicePixelRatio;
const width = canvas.clientWidth * pixelRatio | 0;
const height = canvas.clientHeight * pixelRatio | 0;
const needResize = canvas.width !== width || canvas.height !== height;
const aspect = canvas.clientWidth / canvas.clientHeight;
if (needResize) {
renderer.setSize(width, height, false);
// See https://stackoverflow.com/questions/39373113/three-js-resize-window-not-scaling-properly
const change = originalAspect / aspect;
const newSize = viewSize * change;
orthCamera.left = -aspect * newSize / 2;
orthCamera.right = aspect * newSize / 2;
orthCamera.top = newSize / 2;
orthCamera.bottom = -newSize / 2;
orthCamera.updateProjectionMatrix();
persCamera.aspect = canvas.clientWidth / canvas.clientHeight;
persCamera.updateProjectionMatrix();
}
for (const obj of objects) {
obj.wirematerial.resolution.set(width, height);
}
requestRender();
}
// XXX use mouse click to toggle the gui for the selected object?
function onMouseMove(e) {
let c = false;
if (cameraType == 'Orthographic') {
c = orthCamera;
}
if (cameraType == 'Perspective') {
c = persCamera;
}
if (!c) {
return;
}
const raycaster = new THREE.Raycaster();
raycaster.setFromCamera(new THREE.Vector2(
(e.clientX / canvas.clientWidth) * 2 - 1,
-(e.clientY / canvas.clientHeight) * 2 + 1),
c);
const intersects = raycaster.intersectObjects(raycasterObj);
let chosen = '';
for (const i of intersects) {
const m = i.object.material;
if (m.opacity > 0) {
if (m.emissive.getHex() == 0x000000) {
m.emissive.setHex( 0x777777 );
m.needsUpdate = true;
requestRender();
}
chosen = i.object.name;
break;
}
}
for (const r of raycasterObj) {
if (r.name == chosen) {
continue;
}
if (r.material.emissive.getHex() != 0x000000) {
r.material.emissive.setHex(0x000000);
r.material.needsUpdate = true;
requestRender();
}
}
}
</script>
</body>
</html>
""")
def export( exportList, filename, colors = None, camera = None ):
"""Exports objects to an html file"""
global disableCompression, base, baseFloat
data = { 'camera':{}, 'file':{}, 'objects':[] }
if not FreeCADGui and not camera:
camera = OfflineRenderingUtils.getCamera(FreeCAD.ActiveDocument.FileName)
if camera:
# REF: https://github.com/FreeCAD/FreeCAD/blob/master/src/Mod/Arch/OfflineRenderingUtils.py
camnode = OfflineRenderingUtils.getCoinCamera(camera)
cameraPosition = camnode.position.getValue().getValue()
data['camera']['type'] = 'Orthographic'
if 'PerspectiveCamera' in camera: data['camera']['type'] = 'Perspective'
data['camera']['focalDistance'] = camnode.focalDistance.getValue()
data['camera']['position_x'] = cameraPosition[0]
data['camera']['position_y'] = cameraPosition[1]
data['camera']['position_z'] = cameraPosition[2]
else:
v = FreeCADGui.ActiveDocument.ActiveView
data['camera']['type'] = v.getCameraType()
data['camera']['focalDistance'] = v.getCameraNode().focalDistance.getValue()
data['camera']['position_x'] = v.viewPosition().Base.x
data['camera']['position_y'] = v.viewPosition().Base.y
data['camera']['position_z'] = v.viewPosition().Base.z
# Take the objects out of groups
objectslist = Draft.get_group_contents(exportList, walls=True, addgroups=False)
# objectslist = Arch.pruneIncluded(objectslist)
for obj in objectslist:
# Pull all obj data before we dig down the links
label = obj.Label
color = '#cccccc';
opacity = 1.0
if FreeCADGui:
color = Draft.getrgb(obj.ViewObject.ShapeColor, testbw = False)
opacity = int((100 - obj.ViewObject.Transparency)/5) / 20 # 0>>1 with step of 0.05
elif colors:
if label in colors:
color = Draft.getrgb(colors[label], testbw = False)
validObject = False
if obj.isDerivedFrom('Mesh::Feature'):
mesh = obj.Mesh
validObject = True
if obj.isDerivedFrom('Part::Feature'):
objShape = obj.Shape
validObject = True
if obj.isDerivedFrom('App::Link'):
linkPlacement = obj.LinkPlacement
while True: # drill down to get to the actual obj
if obj.isDerivedFrom("App::Link"):
if obj.ViewObject.OverrideMaterial: color = Draft.getrgb(obj.ViewObject.ShapeMaterial.DiffuseColor, testbw = False)
obj = obj.LinkedObject
if hasattr(obj, "__len__"):
FreeCAD.Console.PrintMessage(label + ": Sub-Links are Unsupported.\n")
break
elif obj.isDerivedFrom('Part::Feature'):
objShape = obj.Shape.copy(False)
objShape.Placement = linkPlacement
validObject = True
break
elif obj.isDerivedFrom("Mesh::Feature"):
mesh = obj.Mesh.copy()
mesh.Placement = linkPlacement
validObject = True
break
if not validObject: continue
objdata = { 'name': label, 'color': color, 'opacity': opacity, 'verts':'', 'facets':'', 'wires':[], 'faceColors':[], 'facesToFacets':[], 'floats':[] }
if obj.isDerivedFrom('Part::Feature'):
deviation = 0.5
if FreeCADGui:
deviation = obj.ViewObject.Deviation
# obj.ViewObject.DiffuseColor is length=1 when all faces are the same color, length=len(faces) for when they're not
if len(obj.ViewObject.DiffuseColor) == len(objShape.Faces):
for fc in obj.ViewObject.DiffuseColor:
objdata['faceColors'].append( Draft.getrgb(fc, testbw = False) )
# get verts and facets for ENTIRE object
shapeData = objShape.tessellate( deviation )
mesh = Mesh.Mesh(shapeData)
if len(objShape.Faces) > 1:
# Map each Facet created by tessellate() to a Face so that it can be colored correctly using faceColors
# This is done by matching the results of a tessellate() on EACH FACE to the overall tessellate stored in shapeData
# if there is any error in matching these two then we display the whole object as one face and forgo the face colors
for f in objShape.Faces:
faceData = f.tessellate( deviation )
found = True
for fv in range( len(faceData[0]) ): # face verts. List of type Vector()
found = False
for sv in range( len(shapeData[0]) ): #shape verts
if faceData[0][fv] == shapeData[0][sv]: # do not use isEqual() here
faceData[0][fv] = sv # replace with the index of shapeData[0]
found = True
break
if not found: break
if not found:
FreeCAD.Console.PrintMessage("Facet to Face Mismatch.\n")
objdata['facesToFacets'] = []
break
# map each of the face facets to the shape facets and make a list of shape facet indices that belong to this face
facetList = []
for ff in faceData[1]: # face facets
found = False
for sf in range( len(shapeData[1]) ): #shape facets
if faceData[0][ff[0]] in shapeData[1][sf] and faceData[0][ff[1]] in shapeData[1][sf] and faceData[0][ff[2]] in shapeData[1][sf]:
facetList.append(sf)
found = True
break
if not found: break
if not found:
FreeCAD.Console.PrintMessage("Facet List Mismatch.\n")
objdata['facesToFacets'] = []
break
objdata['facesToFacets'].append( baseEncode(facetList) )
wires = [] # Add wires
for f in objShape.Faces:
for w in f.Wires:
wo = Part.Wire(Part.__sortEdges__(w.Edges))
wire = []
for v in wo.discretize(QuasiDeflection = 0.005):
wire.append( '{:.5f}'.format(v.x) ) # use strings to avoid 0.00001 written as 1e-05
wire.append( '{:.5f}'.format(v.y) )
wire.append( '{:.5f}'.format(v.z) )
wires.append( wire )
if not disableCompression:
for w in range( len(wires) ):
for wv in range( len(wires[w]) ):
found = False
for f in range( len(objdata['floats']) ):
if objdata['floats'][f] == wires[w][wv]:
wires[w][wv] = f
found = True
break
if not found:
objdata['floats'].append( wires[w][wv] )
wires[w][wv] = len(objdata['floats'])-1
wires[w] = baseEncode(wires[w])
objdata['wires'] = wires
vIndex = {}
verts = []
for p in range( len(mesh.Points) ):
vIndex[ mesh.Points[p].Index ] = p
verts.append( '{:.5f}'.format(mesh.Points[p].Vector.x) )
verts.append( '{:.5f}'.format(mesh.Points[p].Vector.y) )
verts.append( '{:.5f}'.format(mesh.Points[p].Vector.z) )
# create floats list to compress verts and wires being written into the JS
if not disableCompression:
for v in range( len(verts) ):
found = False
for f in range( len(objdata['floats']) ):
if objdata['floats'][f] == verts[v]:
verts[v] = f
found = True
break
if not found:
objdata['floats'].append( verts[v] )
verts[v] = len(objdata['floats'])-1
objdata['verts'] = baseEncode(verts)
facets = []
for f in mesh.Facets:
for i in f.PointIndices:
facets.append( vIndex[i] )
objdata['facets'] = baseEncode(facets)
# compress floats
if not disableCompression:
# use ratio of 7x base13 to 4x base90 because 13^7 ~ 90^4
fullstr = json.dumps(objdata['floats'], separators=(',', ':'))
fullstr = fullstr.replace('[', '').replace(']', '').replace('"', '')
floatStr = ''
baseFloatCt = len(baseFloat)
baseCt = len(base)
for fs in range( 0, len(fullstr), 7 ): # chunks of 7 chars, skip the first one
str7 = fullstr[fs:(fs+7)]
quotient = 0
for s in range( len(str7) ):
quotient += baseFloat.find(str7[s]) * pow(baseFloatCt, (6-s))
for v in range(4):
floatStr += base[ quotient % baseCt ]
quotient = int(quotient / baseCt)
objdata['floats'] = floatStr
data['objects'].append( objdata )
html = getHTMLTemplate()
html = html.replace('$pagetitle',FreeCAD.ActiveDocument.Label)
version = FreeCAD.Version()
html = html.replace('$version',version[0] + '.' + version[1] + '.' + version[2])
# Remove data compression in JS
data['compressed'] = not disableCompression
data['base'] = base
data['baseFloat'] = baseFloat
html = html.replace('$data', json.dumps(data, separators=(',', ':')) ) # Shape Data
if six.PY2:
outfile = pythonopen(filename, "wb")
else:
outfile = pythonopen(filename, "w")
outfile.write( html )
outfile.close()
FreeCAD.Console.PrintMessage( translate("Arch", "Successfully written") + ' ' + filename + "\n" )
def baseEncode( arr ):
"""Compresses an array of ints into a base90 string"""
global disableCompression, base
if disableCompression: return arr
if len(arr) == 0: return ''
longest = 0
output = []
baseCt = len(base)
for v in range( len(arr) ):
buffer = ''
quotient = arr[v]
while True:
buffer += base[ quotient % baseCt ]
quotient = int(quotient / baseCt)
if quotient == 0: break
output.append( buffer )
if len(buffer) > longest: longest = len(buffer)
output = [('{:>'+str(longest)+'}').format(x) for x in output] # pad each element
return str(longest) + ('').join(output)
| lgpl-2.1 | 5,400,546,240,878,188,000 | 48.273743 | 311 | 0.474444 | false |
dhamonex/zoneminder-rpm-database-init | src/zm_dbinit/mysql_configuration.py | 1 | 1650 | # -*- coding: utf-8 -*-
import configparser
import os.path, shutil, os
class MySQLConfiguration:
""" Handles MySQL Configuration file """
ClientSection = "client"
def __init__(self, userprompt, mysqlConfigFile):
self.configfile = mysqlConfigFile
self.config = configparser.SafeConfigParser()
self.prompt = userprompt
def readConfigIfExists(self):
if os.path.isfile(self.configfile):
self.config.read(self.configfile)
return True
return False
def backupOldConfigFileIfExists(self):
if os.path.isfile(self.configfile):
shutil.copy(self.configfile, self.configfile + ".backup")
print("copied old " + self.configfile + " to " + self.configfile + ".backup")
def checkFile(self):
if not self.readConfigIfExists():
self.createConfigFile()
try:
user = self.config.get(MySQLConfiguration.ClientSection, "user")
password = self.config.get(MySQLConfiguration.ClientSection, "password")
if user == "" or password == "":
self.createConfigFile()
except configparser.Error:
self.createConfigFile()
def createConfigFile(self):
self.readConfigIfExists()
self.backupOldConfigFileIfExists()
self.config.add_section(MySQLConfiguration.ClientSection)
self.config.set(MySQLConfiguration.ClientSection, "user", "root")
self.config.set(MySQLConfiguration.ClientSection, "password", self.prompt.askForPassword("Enter mysql root password"))
with open(self.configfile, "w") as openFile:
self.config.write(openFile)
print("generated/updated " + self.configfile)
| gpl-2.0 | 100,483,313,955,220,320 | 29 | 122 | 0.681818 | false |
foursquare/luigi | luigi/contrib/hadoop.py | 1 | 37779 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Run Hadoop Mapreduce jobs using Hadoop Streaming. To run a job, you need
to subclass :py:class:`luigi.contrib.hadoop.JobTask` and implement a
``mapper`` and ``reducer`` methods. See :doc:`/example_top_artists` for
an example of how to run a Hadoop job.
"""
from __future__ import print_function
import abc
import datetime
import glob
import logging
import os
import pickle
import random
import re
import shutil
import signal
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import subprocess
import sys
import tempfile
import warnings
from hashlib import md5
from itertools import groupby
from luigi import six
from luigi import configuration
import luigi
import luigi.task
import luigi.contrib.gcs
import luigi.contrib.hdfs
import luigi.contrib.s3
from luigi.contrib import mrrunner
if six.PY2:
from itertools import imap as map
try:
# See benchmark at https://gist.github.com/mvj3/02dca2bcc8b0ef1bbfb5
import ujson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
_attached_packages = []
TRACKING_RE = re.compile(r'(tracking url|the url to track the job):\s+(?P<url>.+)$')
class hadoop(luigi.task.Config):
pool = luigi.OptionalParameter(
default=None,
description=(
'Hadoop pool so use for Hadoop tasks. To specify pools per tasks, '
'see BaseHadoopJobTask.pool'
),
)
def attach(*packages):
"""
Attach a python package to hadoop map reduce tarballs to make those packages available
on the hadoop cluster.
"""
_attached_packages.extend(packages)
def dereference(f):
if os.path.islink(f):
# by joining with the dirname we are certain to get the absolute path
return dereference(os.path.join(os.path.dirname(f), os.readlink(f)))
else:
return f
def get_extra_files(extra_files):
result = []
for f in extra_files:
if isinstance(f, str):
src, dst = f, os.path.basename(f)
elif isinstance(f, tuple):
src, dst = f
else:
raise Exception()
if os.path.isdir(src):
src_prefix = os.path.join(src, '')
for base, dirs, files in os.walk(src):
for f in files:
f_src = os.path.join(base, f)
f_src_stripped = f_src[len(src_prefix):]
f_dst = os.path.join(dst, f_src_stripped)
result.append((f_src, f_dst))
else:
result.append((src, dst))
return result
def create_packages_archive(packages, filename):
"""
Create a tar archive which will contain the files for the packages listed in packages.
"""
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close()
def flatten(sequence):
"""
A simple generator which flattens a sequence.
Only one level is flattened.
.. code-block:: python
(1, (2, 3), 4) -> (1, 2, 3, 4)
"""
for item in sequence:
if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes):
for i in item:
yield i
else:
yield item
class HadoopRunContext(object):
def __init__(self):
self.job_id = None
self.application_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.application_id:
logger.info('Job interrupted, killing application %s' % self.application_id)
subprocess.call(['yarn', 'application', '-kill', self.application_id])
elif self.job_id:
logger.info('Job interrupted, killing job %s', self.job_id)
subprocess.call(['mapred', 'job', '-kill', self.job_id])
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class HadoopJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HadoopJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def __str__(self):
return self.message
def run_and_track_hadoop_job(arglist, tracking_url_callback=None, env=None):
"""
Runs the job by invoking the command from the given arglist.
Finds tracking urls from the output and attempts to fetch errors using those urls if the job fails.
Throws HadoopJobError with information about the error
(including stdout and stderr from the process)
on failure and returns normally otherwise.
:param arglist:
:param tracking_url_callback:
:param env:
:return:
"""
logger.info('%s', subprocess.list2cmdline(arglist))
def write_luigi_history(arglist, history):
"""
Writes history to a file in the job's output directory in JSON format.
Currently just for tracking the job ID in a configuration where
no history is stored in the output directory by Hadoop.
"""
history_filename = configuration.get_config().get('core', 'history-filename', '')
if history_filename and '-output' in arglist:
output_dir = arglist[arglist.index('-output') + 1]
f = luigi.contrib.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
f.write(json.dumps(history))
f.close()
def track_process(arglist, tracking_url_callback, env=None):
# Dump stdout to a temp file, poll stderr and log it
temp_stdout = tempfile.TemporaryFile('w+t')
proc = subprocess.Popen(arglist, stdout=temp_stdout, stderr=subprocess.PIPE, env=env, close_fds=True, universal_newlines=True)
# We parse the output to try to find the tracking URL.
# This URL is useful for fetching the logs of the job.
tracking_url = None
job_id = None
application_id = None
err_lines = []
with HadoopRunContext() as hadoop_context:
while proc.poll() is None:
err_line = proc.stderr.readline()
err_lines.append(err_line)
err_line = err_line.strip()
if err_line:
logger.info('%s', err_line)
err_line = err_line.lower()
tracking_url_match = TRACKING_RE.search(err_line)
if tracking_url_match:
tracking_url = tracking_url_match.group('url')
try:
tracking_url_callback(tracking_url)
except Exception as e:
logger.error("Error in tracking_url_callback, disabling! %s", e)
def tracking_url_callback(x):
return None
if err_line.find('running job') != -1:
# hadoop jar output
job_id = err_line.split('running job: ')[-1]
if err_line.find('submitted hadoop job:') != -1:
# scalding output
job_id = err_line.split('submitted hadoop job: ')[-1]
if err_line.find('submitted application ') != -1:
application_id = err_line.split('submitted application ')[-1]
hadoop_context.job_id = job_id
hadoop_context.application_id = application_id
# Read the rest + stdout
err = ''.join(err_lines + [an_err_line for an_err_line in proc.stderr])
temp_stdout.seek(0)
out = ''.join(temp_stdout.readlines())
if proc.returncode == 0:
write_luigi_history(arglist, {'job_id': job_id})
return (out, err)
# Try to fetch error logs if possible
message = 'Streaming job failed with exit code %d. ' % proc.returncode
if not tracking_url:
raise HadoopJobError(message + 'Also, no tracking url found.', out, err)
try:
task_failures = fetch_task_failures(tracking_url)
except Exception as e:
raise HadoopJobError(message + 'Additionally, an error occurred when fetching data from %s: %s' %
(tracking_url, e), out, err)
if not task_failures:
raise HadoopJobError(message + 'Also, could not fetch output from tasks.', out, err)
else:
raise HadoopJobError(message + 'Output from tasks below:\n%s' % task_failures, out, err)
if tracking_url_callback is None:
def tracking_url_callback(x): return None
return track_process(arglist, tracking_url_callback, env)
def fetch_task_failures(tracking_url):
"""
Uses mechanize to fetch the actual task logs from the task tracker.
This is highly opportunistic, and we might not succeed.
So we set a low timeout and hope it works.
If it does not, it's not the end of the world.
TODO: Yarn has a REST API that we should probably use instead:
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
"""
import mechanize
timeout = 3.0
failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed'
logger.debug('Fetching data from %s', failures_url)
b = mechanize.Browser()
b.open(failures_url, timeout=timeout)
links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why
links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails
error_text = []
for link in links:
task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset
logger.debug('Fetching data from %s', task_url)
b2 = mechanize.Browser()
try:
r = b2.open(task_url, timeout=timeout)
data = r.read()
except Exception as e:
logger.debug('Error fetching data from %s: %s', task_url, e)
continue
# Try to get the hex-encoded traceback back from the output
for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data):
error_text.append('---------- %s:' % task_url)
error_text.append(exc.split('=')[-1].decode('hex'))
return '\n'.join(error_text)
class JobRunner(object):
run_job = NotImplemented
class HadoopJobRunner(JobRunner):
"""
Takes care of uploading & executing a Hadoop job using Hadoop streaming.
TODO: add code to support Elastic Mapreduce (using boto) and local execution.
"""
def __init__(self, streaming_jar, modules=None, streaming_args=None,
libjars=None, libjars_in_hdfs=None, jobconfs=None,
input_format=None, output_format=None,
end_job_with_atomic_move_dir=True, archives=None):
def get(x, default):
return x is not None and x or default
self.streaming_jar = streaming_jar
self.modules = get(modules, [])
self.streaming_args = get(streaming_args, [])
self.libjars = get(libjars, [])
self.libjars_in_hdfs = get(libjars_in_hdfs, [])
self.archives = get(archives, [])
self.jobconfs = get(jobconfs, {})
self.input_format = input_format
self.output_format = output_format
self.end_job_with_atomic_move_dir = end_job_with_atomic_move_dir
self.tmp_dir = False
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"
" deprecated, please use the TMPDIR"
" environment variable if you wish"
" to control where luigi.contrib.hadoop may"
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
runner_arg = 'mrrunner.pex' if job.package_binary is not None else 'mrrunner.py'
command = '{0} {1} {{step}}'.format(python_executable, runner_arg)
map_cmd = command.format(step='map')
cmb_cmd = command.format(step='combiner')
red_cmd = command.format(step='reduce')
output_final = job.output().path
# atomic output: replace output with a temporary work directory
if self.end_job_with_atomic_move_dir:
illegal_targets = (
luigi.contrib.s3.S3FlagTarget, luigi.contrib.gcs.GCSFlagTarget)
if isinstance(job.output(), illegal_targets):
raise TypeError("end_job_with_atomic_move_dir is not supported"
" for {}".format(illegal_targets))
output_hadoop = '{output}-temp-{time}'.format(
output=output_final,
time=datetime.datetime.now().isoformat().replace(':', '-'))
else:
output_hadoop = output_final
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
run_cmd = luigi.contrib.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir]
logger.debug(subprocess.list2cmdline(run_cmd))
subprocess.call(run_cmd)
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# 'archives' is also a generic option
archives = []
extra_archives = job.extra_archives()
if self.archives:
archives = self.archives
if extra_archives:
archives += extra_archives
if archives:
arglist += ['-archives', ','.join(archives)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job.add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in six.iteritems(self.jobconfs):
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
# Add additonal non-generic per-job streaming args
extra_streaming_args = job.extra_streaming_arguments()
for (arg, value) in extra_streaming_args:
if not arg.startswith('-'): # safety first
arg = '-' + arg
arglist += [arg, value]
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
packages_fn = 'mrrunner.pex' if job.package_binary is not None else 'packages.tar'
files = [
runner_path if job.package_binary is None else None,
os.path.join(self.tmp_dir, packages_fn),
os.path.join(self.tmp_dir, 'job-instance.pickle'),
]
for f in filter(None, files):
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
allowed_input_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.contrib.s3.S3Target,
luigi.contrib.gcs.GCSTarget)
for target in luigi.task.flatten(job.input_hadoop()):
if not isinstance(target, allowed_input_targets):
raise TypeError('target must one of: {}'.format(
allowed_input_targets))
arglist += ['-input', target.path]
allowed_output_targets = (
luigi.contrib.hdfs.HdfsTarget,
luigi.contrib.s3.S3FlagTarget,
luigi.contrib.gcs.GCSFlagTarget)
if not isinstance(job.output(), allowed_output_targets):
raise TypeError('output must be one of: {}'.format(
allowed_output_targets))
arglist += ['-output', output_hadoop]
# submit job
if job.package_binary is not None:
shutil.copy(job.package_binary, os.path.join(self.tmp_dir, 'mrrunner.pex'))
else:
create_packages_archive(packages, os.path.join(self.tmp_dir, 'packages.tar'))
job.dump(self.tmp_dir)
run_and_track_hadoop_job(arglist, tracking_url_callback=job.set_tracking_url)
if self.end_job_with_atomic_move_dir:
luigi.contrib.hdfs.HdfsTarget(output_hadoop).move_dir(output_final)
self.finish()
def finish(self):
# FIXME: check for isdir?
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.debug('Removing directory %s', self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def __del__(self):
self.finish()
class DefaultHadoopJobRunner(HadoopJobRunner):
"""
The default job runner just reads from config and sets stuff.
"""
def __init__(self):
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar')
super(DefaultHadoopJobRunner, self).__init__(streaming_jar=streaming_jar)
# TODO: add more configurable options
class LocalJobRunner(JobRunner):
"""
Will run the job locally.
This is useful for debugging and also unit testing. Tries to mimic Hadoop Streaming.
TODO: integrate with JobTask
"""
def __init__(self, samplelines=None):
self.samplelines = samplelines
def sample(self, input_stream, n, output):
for i, line in enumerate(input_stream):
if n is not None and i >= n:
break
output.write(line)
def group(self, input_stream):
output = StringIO()
lines = []
for i, line in enumerate(input_stream):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i).encode('ascii')).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for _, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
map_input = StringIO()
for i in luigi.task.flatten(job.input_hadoop()):
self.sample(i.open('r'), self.samplelines, map_input)
map_input.seek(0)
if job.reducer == NotImplemented:
# Map only job; no combiner, no reducer
map_output = job.output().open('w')
job.run_mapper(map_input, map_output)
map_output.close()
return
# run job now...
map_output = StringIO()
job.run_mapper(map_input, map_output)
map_output.seek(0)
if job.combiner == NotImplemented:
reduce_input = self.group(map_output)
else:
combine_input = self.group(map_output)
combine_output = StringIO()
job.run_combiner(combine_input, combine_output)
combine_output.seek(0)
reduce_input = self.group(combine_output)
reduce_output = job.output().open('w')
job.run_reducer(reduce_input, reduce_output)
reduce_output.close()
class BaseHadoopJobTask(luigi.Task):
pool = luigi.OptionalParameter(default=None, significant=False, positional=False)
# This value can be set to change the default batching increment. Default is 1 for backwards compatibility.
batch_counter_default = 1
final_mapper = NotImplemented
final_combiner = NotImplemented
final_reducer = NotImplemented
mr_priority = NotImplemented
package_binary = None
_counter_dict = {}
task_id = None
def _get_pool(self):
""" Protected method """
if self.pool:
return self.pool
if hadoop().pool:
return hadoop().pool
@abc.abstractmethod
def job_runner(self):
pass
def jobconfs(self):
jcs = []
jcs.append('mapred.job.name=%s' % str(self))
jcs.append('luigi.task_id=%s' % self.task_id)
if self.mr_priority != NotImplemented:
jcs.append('mapred.job.priority=%s' % self.mr_priority())
pool = self._get_pool()
if pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs.append('mapred.fairscheduler.pool=%s' % pool)
elif scheduler_type == 'capacity':
jcs.append('mapred.job.queue.name=%s' % pool)
return jcs
def init_local(self):
"""
Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the Hadoop nodes.
"""
pass
def init_hadoop(self):
pass
# available formats are "python" and "json".
data_interchange_format = "python"
def run(self):
# The best solution is to store them as lazy `cached_property`, but it
# has extraneous dependency. And `property` is slow (need to be
# calculated every time when called), so we save them as attributes
# directly.
self.serialize = DataInterchange[self.data_interchange_format]['serialize']
self.internal_serialize = DataInterchange[self.data_interchange_format]['internal_serialize']
self.deserialize = DataInterchange[self.data_interchange_format]['deserialize']
self.init_local()
self.job_runner().run_job(self)
def requires_local(self):
"""
Default impl - override this method if you need any local input to be accessible in init().
"""
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input_hadoop(self):
return luigi.task.getpaths(self.requires_hadoop())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def on_failure(self, exception):
if isinstance(exception, HadoopJobError):
return """Hadoop job failed with message: {message}
stdout:
{stdout}
stderr:
{stderr}
""".format(message=exception.message, stdout=exception.out, stderr=exception.err)
else:
return super(BaseHadoopJobTask, self).on_failure(exception)
DataInterchange = {
"python": {"serialize": str,
"internal_serialize": repr,
"deserialize": eval},
"json": {"serialize": json.dumps,
"internal_serialize": json.dumps,
"deserialize": json.loads}
}
class JobTask(BaseHadoopJobTask):
jobconf_truncate = 20000
n_reduce_tasks = 25
reducer = NotImplemented
def jobconfs(self):
jcs = super(JobTask, self).jobconfs()
if self.reducer == NotImplemented:
jcs.append('mapred.reduce.tasks=0')
else:
jcs.append('mapred.reduce.tasks=%s' % self.n_reduce_tasks)
if self.jobconf_truncate >= 0:
jcs.append('stream.jobconf.truncate.limit=%i' % self.jobconf_truncate)
return jcs
def init_mapper(self):
pass
def init_combiner(self):
pass
def init_reducer(self):
pass
def _setup_remote(self):
self._setup_links()
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
"""
Get the MapReduce runner for this job.
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used.
Otherwise, the LocalJobRunner which streams all data through the local machine
will be used (great for testing).
"""
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.contrib.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner()
def reader(self, input_stream):
"""
Reader is a method which iterates over input lines and outputs records.
The default implementation yields one argument containing the line for each line in the input."""
for line in input_stream:
yield line,
def writer(self, outputs, stdout, stderr=sys.stderr):
"""
Writer format is a method which iterates over the output records
from the reducer and formats them for output.
The default implementation outputs tab separated items.
"""
for output in outputs:
try:
output = flatten(output)
if self.data_interchange_format == "json":
# Only dump one json string, and skip another one, maybe key or value.
output = filter(lambda x: x, output)
else:
# JSON is already serialized, so we put `self.serialize` in a else statement.
output = map(self.serialize, output)
print("\t".join(output), file=stdout)
except BaseException:
print(output, file=stderr)
raise
def mapper(self, item):
"""
Re-define to process an input item (usually a line of input data).
Defaults to identity mapper that sends all lines to the same reducer.
"""
yield None, item
combiner = NotImplemented
def incr_counter(self, *args, **kwargs):
"""
Increments a Hadoop counter.
Since counters can be a bit slow to update, this batches the updates.
"""
threshold = kwargs.get("threshold", self.batch_counter_default)
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
key = (group_name,)
else:
group, name, count = args
key = (group, name)
ct = self._counter_dict.get(key, 0)
ct += count
if ct >= threshold:
new_arg = list(key) + [ct]
self._incr_counter(*new_arg)
ct = 0
self._counter_dict[key] = ct
def _flush_batch_incr_counter(self):
"""
Increments any unflushed counter values.
"""
for key, count in six.iteritems(self._counter_dict):
if count == 0:
continue
args = list(key) + [count]
self._incr_counter(*args)
self._counter_dict[key] = 0
def _incr_counter(self, *args):
"""
Increments a Hadoop counter.
Note that this seems to be a bit slow, ~1 ms
Don't overuse this function by updating very frequently.
"""
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
print('reporter:counter:%s,%s' % (group_name, count), file=sys.stderr)
else:
group, name, count = args
print('reporter:counter:%s,%s,%s' % (group, name, count), file=sys.stderr)
def extra_modules(self):
return [] # can be overridden in subclass
def extra_files(self):
"""
Can be overriden in subclass.
Each element is either a string, or a pair of two strings (src, dst).
* `src` can be a directory (in which case everything will be copied recursively).
* `dst` can include subdirectories (foo/bar/baz.txt etc)
Uses Hadoop's -files option so that the same file is reused across tasks.
"""
return []
def extra_streaming_arguments(self):
"""
Extra arguments to Hadoop command line.
Return here a list of (parameter, value) tuples.
"""
return []
def extra_archives(self):
"""List of paths to archives """
return []
def add_link(self, src, dst):
if not hasattr(self, '_links'):
self._links = []
self._links.append((src, dst))
def _setup_links(self):
if hasattr(self, '_links'):
missing = []
for src, dst in self._links:
d = os.path.dirname(dst)
if d:
try:
os.makedirs(d)
except OSError:
pass
if not os.path.exists(src):
missing.append(src)
continue
if not os.path.exists(dst):
# If the combiner runs, the file might already exist,
# so no reason to create the link again
os.link(src, dst)
if missing:
raise HadoopJobError(
'Missing files for distributed cache: ' +
', '.join(missing))
def dump(self, directory=''):
"""
Dump instance to file.
"""
with self.no_unpicklable_properties():
file_name = os.path.join(directory, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace(b'(c__main__', "(c" + module_name)
open(file_name, "wb").write(d)
else:
pickle.dump(self, open(file_name, "wb"))
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter()
def _reduce_input(self, inputs, reducer, final=NotImplemented):
"""
Iterate over input, collect values with the same key, and call the reducer for each unique key.
"""
for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])):
for output in reducer(self.deserialize(key), (v[1] for v in values)):
yield output
if final != NotImplemented:
for output in final():
yield output
self._flush_batch_incr_counter()
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout)
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the reducer on the hadoop node.
"""
self.init_hadoop()
self.init_reducer()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer)
self.writer(outputs, stdout)
def run_combiner(self, stdin=sys.stdin, stdout=sys.stdout):
self.init_hadoop()
self.init_combiner()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.combiner, self.final_combiner)
self.internal_writer(outputs, stdout)
def internal_reader(self, input_stream):
"""
Reader which uses python eval on each part of a tab separated string.
Yields a tuple of python objects.
"""
for input_line in input_stream:
yield list(map(self.deserialize, input_line.split("\t")))
def internal_writer(self, outputs, stdout):
"""
Writer which outputs the python repr for each item.
"""
for output in outputs:
print("\t".join(map(self.internal_serialize, output)), file=stdout)
| apache-2.0 | -1,246,452,687,684,869,000 | 34.674221 | 143 | 0.583075 | false |
glemaitre/UnbalancedDataset | examples/over-sampling/plot_smote.py | 2 | 2231 | """
=====
SMOTE
=====
An illustration of the SMOTE method and its variant.
"""
# Authors: Fernando Nogueira
# Christos Aridas
# Guillaume Lemaitre <[email protected]>
# License: MIT
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from imblearn.over_sampling import SMOTE
print(__doc__)
def plot_resampling(ax, X, y, title):
c0 = ax.scatter(X[y == 0, 0], X[y == 0, 1], label="Class #0", alpha=0.5)
c1 = ax.scatter(X[y == 1, 0], X[y == 1, 1], label="Class #1", alpha=0.5)
ax.set_title(title)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.spines['left'].set_position(('outward', 10))
ax.spines['bottom'].set_position(('outward', 10))
ax.set_xlim([-6, 8])
ax.set_ylim([-6, 6])
return c0, c1
# Generate the dataset
X, y = make_classification(n_classes=2, class_sep=2, weights=[0.3, 0.7],
n_informative=3, n_redundant=1, flip_y=0,
n_features=20, n_clusters_per_class=1,
n_samples=80, random_state=10)
# Instanciate a PCA object for the sake of easy visualisation
pca = PCA(n_components=2)
# Fit and transform x to visualise inside a 2D feature space
X_vis = pca.fit_transform(X)
# Apply regular SMOTE
kind = ['regular', 'borderline1', 'borderline2', 'svm']
sm = [SMOTE(kind=k) for k in kind]
X_resampled = []
y_resampled = []
X_res_vis = []
for method in sm:
X_res, y_res = method.fit_sample(X, y)
X_resampled.append(X_res)
y_resampled.append(y_res)
X_res_vis.append(pca.transform(X_res))
# Two subplots, unpack the axes array immediately
f, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2)
# Remove axis for second plot
ax2.axis('off')
ax_res = [ax3, ax4, ax5, ax6]
c0, c1 = plot_resampling(ax1, X_vis, y, 'Original set')
for i in range(len(kind)):
plot_resampling(ax_res[i], X_res_vis[i], y_resampled[i],
'SMOTE {}'.format(kind[i]))
ax2.legend((c0, c1), ('Class #0', 'Class #1'), loc='center',
ncol=1, labelspacing=0.)
plt.tight_layout()
plt.show()
| mit | 2,950,088,382,133,793,000 | 27.974026 | 76 | 0.619005 | false |
bewantbe/eogRichExif | eogRichExif.py | 1 | 12212 | '''
eogRichExif
A eog (Eye of GNOME Image Viewer) plugin which shows many Exif info in side pane.
Thanks to the eogMetaEdit plugin.
'''
'''
eogRichExif is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
eogRichExif is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with eogRichExif. If not, see <http://www.gnu.org/licenses/>.
'''
from gi.repository import GObject, Gtk, Eog
from os.path import join, basename
from urllib.parse import urlparse
import xml.sax.saxutils
import pyexiv2
import math
class eogRichExif(GObject.Object, Eog.WindowActivatable):
# Override EogWindowActivatable's window property
# This is the EogWindow this plugin instance has been activated for
window = GObject.property(type=Eog.Window)
Debug = False
def __init__(self):
# will be execulted when activating
GObject.Object.__init__(self)
def do_activate(self):
if self.Debug:
print('The answer landed on my rooftop, whoa')
# get sidebar
self.sidebar = self.window.get_sidebar()
# need to track file changes in the EoG thumbview (any better idea?)
self.thumbview = self.window.get_thumb_view()
# the EogImage selected in the thumbview
self.thumbImage = None
self.cb_ids = {}
self.plugin_window = None
# Python and GTK
# https://python-gtk-3-tutorial.readthedocs.org/en/latest/introduction.html
# http://www.pygtk.org/pygtk2tutorial/sec-Notebooks.html
# http://gnipsel.com/glade/
builder = Gtk.Builder()
builder.add_from_file(join(self.plugin_info.get_data_dir(),\
"eogRichExif.glade"))
self.plugin_window = builder.get_object('eogRichExif')
self.label_exif = builder.get_object('label_exif')
# add dialog to the sidebar
Eog.Sidebar.add_page(self.sidebar, "RichExif", self.plugin_window)
self.cb_ids['selection-changed'] = {}
self.cb_ids['selection-changed'][self.thumbview] = \
self.thumbview.connect('selection-changed', \
self.selection_changed_cb, self)
def do_deactivate(self):
'''remove all the callbacks stored in dict self.cb_ids '''
if self.Debug:
print('The answer fell off my rooftop, woot')
for S in self.cb_ids:
for W, id in self.cb_ids[S].items():
W.disconnect(id)
# Load metadata
@staticmethod
def selection_changed_cb(thumb, self):
if self.Debug:
print("--- dbg: in selection_changed_cb ---")
# Get file path
self.thumbImage = self.thumbview.get_first_selected_image()
Event = Gtk.get_current_event()
self.filePath = None
self.fileURL = None
if self.thumbImage != None:
self.fileURL = self.thumbImage.get_uri_for_display()
# https://docs.python.org/2/library/urlparse.html
self.filePath = urlparse(self.fileURL).path
if self.Debug:
print('loading thumb meta: \n ', self.filePath, '\n URL: ', self.fileURL)
else:
if self.Debug:
print('Fail to load metadata!')
return False
# Read metadata
# http://python3-exiv2.readthedocs.org/en/latest/tutorial.html
self.metadata = pyexiv2.ImageMetadata(self.filePath)
try:
self.metadata.read()
except:
self.metadata = None
self.label_exif.set_markup("Cannot read metadata.\n self.filePath=%s" % self.filePath)
return
# try:
self.set_info()
# except KeyError as e:
# self.label_exif.set_markup("Metadata incomplete?\n Error: {0}\n".format(e))
# return False to let any other callbacks execute as well
return False
def set_info(self):
def is_integer(a):
if math.fabs(a-math.floor(a+0.5)) < 1e-5:
return True
else:
return False
st_markup = '%s\n' % self.filePath;
if 'Exif.Image.Model' in self.metadata:
image_make = ''
if 'Exif.Image.Make' in self.metadata:
image_make = xml.sax.saxutils.escape(self.metadata['Exif.Image.Make'].value) + '\n '
image_model = xml.sax.saxutils.escape(self.metadata['Exif.Image.Model'].value)
st_markup += '<b>Camera:</b>\n %s%s\n' % (image_make, image_model)
# Time
NO_TIME = '0000:00:00 00:00:00'
s_time_tag = [
[NO_TIME, 'Exif.Image.DateTime', 'DateTime'],
[NO_TIME, 'Exif.Image.DateTimeOriginal', 'DateTimeOriginal'],
[NO_TIME, 'Exif.Photo.DateTimeOriginal', 'DateTimeOriginal'],
[NO_TIME, 'Exif.Image.DateTimeDigitized', 'DateTimeDigitized'],
[NO_TIME, 'Exif.Photo.DateTimeDigitized', 'DateTimeDigitized']]
for idx, ttag in enumerate(s_time_tag):
if ttag[1] in self.metadata:
s_time_tag[idx][0] = self.metadata[ttag[1]].value
# remove nonsence data
s_time_tag = list(filter(lambda x: x[0]!=NO_TIME, s_time_tag))
if len(set([r[0] for r in s_time_tag])) > 1: # time are different
for ttag in s_time_tag:
st_markup += '<b>%s:</b>\n<tt> %s</tt>\n' % (ttag[2], ttag[0].strftime('%Y-%m-%d %H:%M:%S'))
elif len(s_time_tag) == 0:
st_markup += '<b>DateTime:</b>\n<tt> ??</tt>\n'
else: # unique time
st_markup += '<b>DateTime:</b>\n<tt> %s</tt>\n' % (s_time_tag[0][0].strftime('%Y-%m-%d %H:%M:%S'))
# ExposureTime
if 'Exif.Photo.ExposureTime' in self.metadata:
st_exposure_time = self.metadata['Exif.Photo.ExposureTime'].human_value
else:
st_exposure_time = '?? s'
# FNumber
if 'Exif.Photo.FNumber' in self.metadata:
f_number = self.metadata['Exif.Photo.FNumber'].human_value
elif 'Exif.Photo.ApertureValue' in self.metadata:
f_number = self.metadata['Exif.Photo.ApertureValue'].human_value
else:
f_number = 'F??'
# ISO
iso = ''
if 'Exif.Photo.ISOSpeedRatings' in self.metadata:
iso = self.metadata['Exif.Photo.ISOSpeedRatings'].human_value
else:
if 'Exif.Nikon3.ISOSettings' in self.metadata:
iso = self.metadata['Exif.Nikon3.ISOSettings'].human_value
if 'Exif.NikonIi.ISO' in self.metadata:
iso = self.metadata['Exif.NikonIi.ISO'].human_value
# extra ISO
if 'Exif.NikonIi.ISOExpansion' in self.metadata:
iso_ext = self.metadata['Exif.NikonIi.ISOExpansion'].human_value
if 'off' in iso_ext.lower():
iso += '' # do nothing
else:
iso += '(%s)' % iso_ext
st_markup += '<b>Exposure:</b>\n'
st_markup += '<tt> %s, %s</tt>\n' % (st_exposure_time, f_number)
st_markup += '<tt> ISO %s</tt>\n' % (iso)
# Focal Length
if 'Exif.Photo.FocalLength' in self.metadata:
st_focal_length = "%.1f mm" % self.metadata['Exif.Photo.FocalLength'].value.__float__()
else:
st_focal_length = "?? mm"
if 'Exif.Photo.FocalLengthIn35mmFilm' in self.metadata:
st_focal_length_35mm = "%.1f mm (35mm)" % self.metadata['Exif.Photo.FocalLengthIn35mmFilm'].value.__float__()
else:
st_focal_length_35mm = '?? mm (35mm)'
st_markup += '<tt> %s</tt>\n' % (st_focal_length)
st_markup += '<tt> %s</tt>\n' % (st_focal_length_35mm)
if 'Exif.Photo.Flash' in self.metadata:
st_markup += '<b>Flash:</b>\n'
st_markup += ' %s\n' % self.metadata['Exif.Photo.Flash'].human_value
def sign(a):
return (a > 0) - (a < 0)
# White Balance
st_markup += '<b>WhiteBalance:</b>\n'
if 'Exif.Nikon3.WhiteBalance' in self.metadata:
wb_extra = self.metadata['Exif.Nikon3.WhiteBalance'].human_value.strip()
if 'Exif.Nikon3.WhiteBalanceBias' in self.metadata:
v = self.metadata['Exif.Nikon3.WhiteBalanceBias'].value
wb_extra += ', Bias: %s:%d, %s:%d' % (('A','_','B')[sign(v[0])+1], abs(v[0]), ('M','_','G')[sign(v[1])+1], abs(v[1]))
st_markup += ' %s\n' % wb_extra
elif 'Exif.CanonPr.WhiteBalanceRed' in self.metadata:
wb_extra = self.metadata['Exif.Photo.WhiteBalance'].human_value.strip()
v_r = self.metadata['Exif.CanonPr.WhiteBalanceRed'].value
v_b = self.metadata['Exif.CanonPr.WhiteBalanceBlue'].value
wb_extra += ', Bias: R:%d, B:%d' % (v_r, v_b)
# not sure the logic
if 'Manual' in wb_extra:
v_t = self.metadata['Exif.CanonPr.ColorTemperature'].value
wb_extra += ', %dK' % v_t
st_markup += ' %s\n' % wb_extra
else:
if 'Exif.Photo.WhiteBalance' in self.metadata:
wb = self.metadata['Exif.Photo.WhiteBalance'].human_value
else:
wb = ''
st_markup += ' %s\n' % wb
# Focus Mode
if 'Exif.Nikon3.Focus' in self.metadata:
st_markup += '<b>Focus Mode:</b>\n'
st_markup += ' %s\n' % self.metadata['Exif.Nikon3.Focus'].value.strip()
if 'Exif.NikonAf2.ContrastDetectAF' in self.metadata:
st_cdaf = self.metadata['Exif.NikonAf2.ContrastDetectAF'].human_value
if 'on' in st_cdaf.lower():
st_markup += ' ContrastDetectAF:\n %s\n' % st_cdaf
if 'Exif.NikonAf2.PhaseDetectAF' in self.metadata:
st_pdaf = self.metadata['Exif.NikonAf2.PhaseDetectAF'].human_value
if 'on' in st_pdaf.lower():
st_markup += ' PhaseDetectAF:\n %s\n' % st_pdaf
if 'Exif.Sony1.FocusMode' in self.metadata:
st_markup += '<b>Focus Mode:</b>\n'
st_markup += ' %s\n' % self.metadata['Exif.Sony1.FocusMode'].human_value.strip()
st_markup += ' %s\n' % self.metadata['Exif.Sony1.AFMode'].human_value.strip()
if 'Exif.CanonCs.FocusMode' in self.metadata:
st_markup += '<b>Focus Mode:</b>\n'
st_markup += ' %s\n' % self.metadata['Exif.CanonCs.FocusMode'].human_value.strip()
st_markup += ' FocusType: %s\n' % self.metadata['Exif.CanonCs.FocusType'].human_value.strip()
st_markup += '<b>Extra settings:</b>\n'
s_tag_name_extra = [
('Exif.Photo.ExposureBiasValue', 'Exposure Bias Value'),
('Exif.Photo.ExposureProgram', 'Exposure Program'),
('Exif.Photo.MeteringMode', 'Metering Mode'),
('Exif.Photo.SceneCaptureType', 'Scene Capture Type'),
('Exif.Photo.ColorSpace', 'Color Space'),
# Nikon
('Exif.Nikon3.ActiveDLighting', 'DLighting'),
('Exif.NikonVr.VibrationReduction', 'Vibration Reduction'),
('Exif.Nikon3.NoiseReduction', 'Noise Reduction'),
('Exif.Nikon3.HighISONoiseReduction', 'High ISO Noise Reduction'),
('Exif.Nikon3.ShootingMode', 'Shooting Mode'),
# Canon
('Exif.CanonFi.NoiseReduction', 'Noise Reduction'),
# Sony
('Exif.Sony1.AutoHDR', 'Auto HDR'),
('Exif.Sony1.LongExposureNoiseReduction', 'LongExposureNoiseReduction')
]
for tag_name in s_tag_name_extra:
if tag_name[0] in self.metadata:
st_markup += ' <i>%s:</i>\n %s\n' % \
(tag_name[1], self.metadata[tag_name[0]].human_value)
st_markup += '<b>Lens:</b>\n'
s_tag_name_lens = [
('Exif.NikonLd3.FocalLength', 'Focal Length'),
('Exif.NikonLd3.AFAperture', 'AFAperture'),
('Exif.NikonLd3.FocusDistance', 'Focus Distance'),
]
for tag_name in s_tag_name_lens:
if tag_name[0] in self.metadata:
st_markup += ' <i>%s:</i> %s\n' % \
(tag_name[1], self.metadata[tag_name[0]].human_value)
st_markup += '<b>Lens Model:</b>\n'
if 'Exif.Nikon3.Lens' in self.metadata:
st_markup += ' %s\n' % self.metadata['Exif.Nikon3.Lens'].human_value
if 'Exif.Canon.LensModel' in self.metadata:
st_markup += ' %s\n' % self.metadata['Exif.Canon.LensModel'].human_value
if 'Exif.Photo.LensModel' in self.metadata:
st_markup += ' %s\n' % self.metadata['Exif.Photo.LensModel'].human_value
if 'Exif.GPSInfo.GPSLatitudeRef' in self.metadata:
lr = self.metadata['Exif.GPSInfo.GPSLatitudeRef'].value
lv = self.metadata['Exif.GPSInfo.GPSLatitude'].value
ar = self.metadata['Exif.GPSInfo.GPSLongitudeRef'].value
av = self.metadata['Exif.GPSInfo.GPSLongitude'].value
st_markup += '<b>GPS:</b>\n %.0f° %.0f\' %.2f" %s,\n %.0f° %.0f\' %.2f" %s,\n' % \
(float(lv[0]), float(lv[1]), float(lv[2]), lr, \
float(av[0]), float(av[1]), float(av[2]), ar)
st_markup += ' %s %s.\n' % (self.metadata['Exif.GPSInfo.GPSAltitude'].human_value,\
self.metadata['Exif.GPSInfo.GPSAltitudeRef'].human_value)
previews = self.metadata.previews
st_markup += '<b>Number of thumbnails:</b>\n <tt>%d</tt>\n' % len(previews)
# if 'NIKON' in image_make:
# if ('Exif.Photo.UserComment' in self.metadata):
# st_markup += '<b>UserComment:</b>\n <tt>%s</tt>\n' % self.metadata['Exif.Photo.UserComment'].human_value
self.label_exif.set_markup(st_markup)
| gpl-3.0 | -9,212,282,907,351,276,000 | 36.453988 | 121 | 0.661507 | false |
razz0/DataMining | apriori_sequential.py | 1 | 8210 | """Implementation of the Apriori algorithm for sequential patterns, F(k-1) x F(k-1) variant.
Model sequences like ((1, 2, 3), (4, 5), (4, 6)).
To get course sequences with empty elements as (0,):
course_seqs = [x.course_sequence for x in s.students]
course_seqs2 = [tuple([seq or (0,) for seq in x.course_sequence]) for x in s.students]
"""
from collections import defaultdict
from pprint import pprint
import copy
def flatten(sequence):
"""Flatten events in sequence elements to list of events"""
return [event for element in sequence for event in element]
def is_subsequence(seq1, seq2):
"""Check if seq1 is a subsequence of seq2
>>> is_subsequence(((2,), (3, 5)), ((2, 4), (3, 5, 6), (8,)))
True
>>> is_subsequence(((1,), (2,)), ((1, 2), (3, 4)))
False
>>> is_subsequence(((2,), (4,)), ((2, 4), (2, 4), (2, 5)))
True
"""
seq = copy.deepcopy(seq1)
for element in seq2:
if seq and set(seq[0]) <= set(element):
seq = seq[1:]
return True if not seq else False
def support_count(sequence, seq_list):
"""
Count support count for sequence
:param itemset: items to measure support count for
:param transactions: list of sets (all transactions)
>>> simple_seqs = [((1,), (2, 3)), ((2,), (3,)), ((2, 4,),), ((4,),)]
>>> [support_count(((item,),), simple_seqs) for item in range(1, 5)]
[1, 3, 2, 2]
"""
return len([seq for seq in seq_list if is_subsequence(sequence, seq)])
def _sequential_candidate_generation(sequences, k):
"""
Generate candidate sequences of length k.
:param sequences: list of sequences containing elements containing events
:param k: > 1
>>> pprint(_sequential_candidate_generation([(('A',),), (('B',),), (('C',),)], 2))
[(('A',), ('A',)),
(('A',), ('B',)),
(('A', 'B'),),
(('A',), ('C',)),
(('A', 'C'),),
(('B',), ('A',)),
(('B',), ('B',)),
(('B',), ('C',)),
(('B', 'C'),),
(('C',), ('A',)),
(('C',), ('B',)),
(('C',), ('C',))]
>>> _sequential_candidate_generation([(('A', 'B'),), (('A', 'C'),), (('B',), ('C',))], 3)
[(('A', 'B'), ('C',))]
>>> _sequential_candidate_generation([(('A',), ('B',)), (('A', 'C'),), (('B', 'C'),), (('C', 'C'),)], 3)
[(('A',), ('B', 'C')), (('A', 'C', 'C'),), (('B', 'C', 'C'),)]
>>> pprint(_sequential_candidate_generation([((1,),), ((2,),), ((3,),)], 2))
[((1,), (1,)),
((1,), (2,)),
((1, 2),),
((1,), (3,)),
((1, 3),),
((2,), (1,)),
((2,), (2,)),
((2,), (3,)),
((2, 3),),
((3,), (1,)),
((3,), (2,)),
((3,), (3,))]
>>> _sequential_candidate_generation([((1,), (2,)), ((2,), (3,))], 3)
[((1,), (2,), (3,))]
"""
new_candidates = []
for index1, seq1 in enumerate(sequences):
for index2, seq2 in enumerate(sequences):
if k == 2:
# Assume we get 1-sequences like we should
new_candidates.append((seq1[0], seq2[0],))
if seq1[0] < seq2[0]:
new_candidates.append(((seq1[0] + seq2[0]),))
elif k > 2:
seq1_flattened = flatten(seq1)
seq2_flattened = flatten(seq2)
if index1 == index2:
continue
if seq1_flattened[1:] == seq2_flattened[:-1]:
new_sequence = copy.deepcopy(seq1)
if len(seq2[-1]) > 1:
new_sequence = new_sequence[:-1] + (new_sequence[-1] + (seq2_flattened[-1],),)
else:
new_sequence += (seq2[-1],)
new_candidates.append(new_sequence)
return new_candidates
def get_subsequences(sequence):
"""
Get length k-1 subsequences of length k sequence
>>> get_subsequences((('A', 'B'), ('C',)))
[(('A', 'B'),), (('A',), ('C',)), (('B',), ('C',))]
>>> get_subsequences((('A', 'B'), ('C',), ('D', 'E')))
[(('A', 'B'), ('C',), ('D',)), (('A', 'B'), ('C',), ('E',)), (('A', 'B'), ('D', 'E')), (('A',), ('C',), ('D', 'E')), (('B',), ('C',), ('D', 'E'))]
:rtype : tuple
:return:
"""
subseqs = []
for i in reversed(range(0, len(sequence))):
element = sequence[i]
for j in reversed(range(0, len(element))):
event = element[j]
if len(element) == 1:
subseq = sequence[:i] + sequence[(i + 1):]
else:
subseq = list(sequence)
subseq[i] = subseq[i][:j] + subseq[i][(j + 1):]
subseqs.append(tuple(subseq))
return subseqs
def apriori_sequential(sequences, minsup, fixed_k=None, verbose=False):
"""
Apriori method for sequential patterns
:param transactions: list of iterables (list of transactions containing items)
:param all_items: list distinct items
:param minsup: minimum support
>>> seqs = [((1, 2, 4), (2, 3), (5,)), \
((1, 2), (2, 3, 4)), \
((1, 2), (2, 3, 4), (2, 4, 5)), \
((2,), (3, 4), (4, 5)), \
((1, 3), (2, 4, 5))]
>>> pprint(apriori_sequential(seqs, 0.8))
[{((1,),): 0.80000000000000004},
{((2,),): 1.0},
{((3,),): 1.0},
{((4,),): 1.0},
{((5,),): 0.80000000000000004},
{((1,), (2,)): 0.80000000000000004},
{((2,), (3,)): 0.80000000000000004},
{((2, 4),): 0.80000000000000004},
{((3,), (5,)): 0.80000000000000004}]
>>> seqs = [((1,), (), (), (2,), (), (), (3,)), \
((1, 2,), (), (2,3 ), (2,), (), (3,), ()), \
((1,), (2,), (), (2,), (3,), (3,), (2, 3, 4))]
"""
k = 1
N = len(sequences)
frequent_sequences = [[], []] # k index, zero always empty
support = defaultdict(int)
if verbose:
print 'Initializing length 1 frequent sequences...'
for seq in sequences:
events = sorted(set(flatten(seq)))
for event in events:
event_seq = ((event,),)
if event_seq not in support:
support[event_seq] = support_count(event_seq, sequences)
#print "k==1, event seq: %s - support: %s" % (event_seq, support[event_seq])
if support[event_seq] >= N * minsup and event_seq not in frequent_sequences[1]:
frequent_sequences[1].append(event_seq)
if verbose:
print 'Initialized %s 1-sequences' % len(frequent_sequences[1])
print 'Generating longer frequent sequences...'
pruned_candidates = ['dummy', 'dummy']
while pruned_candidates and len(pruned_candidates) > 1 and (not fixed_k or k < fixed_k):
k += 1
candidate_seqs = _sequential_candidate_generation(frequent_sequences[k - 1], k)
if verbose:
print 'k=%s - candidate sequence count %s' % (k, len(candidate_seqs),)
if not candidate_seqs:
break
pruned_candidates = []
for can_seq in candidate_seqs:
subseqs = get_subsequences(can_seq)
if all([subseq in frequent_sequences[k - 1] for subseq in subseqs]) and can_seq not in pruned_candidates:
pruned_candidates.append(can_seq)
for pruned_index, pruned_seq in enumerate(pruned_candidates):
if verbose and k > 3 and len(pruned_candidates) > 50 \
and pruned_index % (1 + len(pruned_candidates) / 5) == 0:
print 'Candidate %s / %s' % (pruned_index, len(pruned_candidates))
for seq in sequences:
if is_subsequence(pruned_seq, seq):
support[pruned_seq] += 1
frequent_sequences.append([seq for seq in pruned_candidates if support[seq] >= N * minsup])
if fixed_k:
try:
freq_items = [{freqseq: support[freqseq] / float(N)} for freqseq in frequent_sequences[fixed_k]]
except IndexError:
return []
else:
freq_items = [{freqseq: support[freqseq] / float(N)} for freq_k in frequent_sequences for freqseq in freq_k]
return freq_items
if __name__ == "__main__":
print 'Running doctests'
import doctest
res = doctest.testmod()
if not res[0]:
print 'OK!'
| gpl-2.0 | 133,243,926,120,616,290 | 32.92562 | 150 | 0.487576 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/test/test_json/test_decode.py | 1 | 4355 | import decimal
from io import StringIO
from collections import OrderedDict
from test.test_json import PyTest, CTest
class TestDecode:
def test_decimal(self):
rval = self.loads('1.1', parse_float=decimal.Decimal)
self.assertTrue(isinstance(rval, decimal.Decimal))
self.assertEqual(rval, decimal.Decimal('1.1'))
def test_float(self):
rval = self.loads('1', parse_int=float)
self.assertTrue(isinstance(rval, float))
self.assertEqual(rval, 1.0)
def test_empty_objects(self):
self.assertEqual(self.loads('{}'), {})
self.assertEqual(self.loads('[]'), [])
self.assertEqual(self.loads('""'), "")
def test_object_pairs_hook(self):
s = '{"xkd":1, "kcw":2, "art":3, "hxm":4, "qrt":5, "pad":6, "hoy":7}'
p = [("xkd", 1), ("kcw", 2), ("art", 3), ("hxm", 4),
("qrt", 5), ("pad", 6), ("hoy", 7)]
self.assertEqual(self.loads(s), eval(s))
self.assertEqual(self.loads(s, object_pairs_hook=lambda x: x), p)
self.assertEqual(self.json.load(StringIO(s),
object_pairs_hook=lambda x: x), p)
od = self.loads(s, object_pairs_hook=OrderedDict)
self.assertEqual(od, OrderedDict(p))
self.assertEqual(type(od), OrderedDict)
# the object_pairs_hook takes priority over the object_hook
self.assertEqual(self.loads(s, object_pairs_hook=OrderedDict,
object_hook=lambda x: None),
OrderedDict(p))
# check that empty object literals work (see #17368)
self.assertEqual(self.loads('{}', object_pairs_hook=OrderedDict),
OrderedDict())
self.assertEqual(self.loads('{"empty": {}}',
object_pairs_hook=OrderedDict),
OrderedDict([('empty', OrderedDict())]))
def test_decoder_optimizations(self):
# Several optimizations were made that skip over calls to
# the whitespace regex, so this test is designed to try and
# exercise the uncommon cases. The array cases are already covered.
rval = self.loads('{ "key" : "value" , "k":"v" }')
self.assertEqual(rval, {"key":"value", "k":"v"})
def check_keys_reuse(self, source, loads):
rval = loads(source)
(a, b), (c, d) = sorted(rval[0]), sorted(rval[1])
self.assertIs(a, c)
self.assertIs(b, d)
def test_keys_reuse(self):
s = '[{"a_key": 1, "b_\xe9": 2}, {"a_key": 3, "b_\xe9": 4}]'
self.check_keys_reuse(s, self.loads)
decoder = self.json.decoder.JSONDecoder()
self.check_keys_reuse(s, decoder.decode)
self.assertFalse(decoder.memo)
def test_extra_data(self):
s = '[1, 2, 3]5'
msg = 'Extra data'
self.assertRaisesRegex(self.JSONDecodeError, msg, self.loads, s)
def test_invalid_escape(self):
s = '["abc\\y"]'
msg = 'escape'
self.assertRaisesRegex(self.JSONDecodeError, msg, self.loads, s)
def test_invalid_input_type(self):
msg = 'the JSON object must be str'
for value in [1, 3.14, [], {}, None]:
self.assertRaisesRegex(TypeError, msg, self.loads, value)
def test_string_with_utf8_bom(self):
# see #18958
bom_json = "[1,2,3]".encode('utf-8-sig').decode('utf-8')
with self.assertRaises(self.JSONDecodeError) as cm:
self.loads(bom_json)
self.assertIn('BOM', str(cm.exception))
with self.assertRaises(self.JSONDecodeError) as cm:
self.json.load(StringIO(bom_json))
self.assertIn('BOM', str(cm.exception))
# make sure that the BOM is not detected in the middle of a string
bom_in_str = '"{}"'.format(''.encode('utf-8-sig').decode('utf-8'))
self.assertEqual(self.loads(bom_in_str), '\ufeff')
self.assertEqual(self.json.load(StringIO(bom_in_str)), '\ufeff')
def test_negative_index(self):
d = self.json.JSONDecoder()
self.assertRaises(ValueError, d.raw_decode, 'a'*42, -50000)
def test_deprecated_encode(self):
with self.assertWarns(DeprecationWarning):
self.loads('{}', encoding='fake')
class TestPyDecode(TestDecode, PyTest): pass
class TestCDecode(TestDecode, CTest): pass
| apache-2.0 | 4,445,666,602,125,950,000 | 41.281553 | 77 | 0.583008 | false |
kishori82/MetaPathways_Python.3.0 | utilities/compute_status.py | 1 | 18299 | #!/usr/bin/python
# File created on 27 Jan 2012.
from __future__ import division
__author__ = "Kishori M Konwar"
__copyright__ = "Copyright 2013, MetaPathways"
__credits__ = ["r"]
__version__ = "1.0"
__maintainer__ = "Kishori M Konwar"
__status__ = "Release"
try:
import sys, os, re, math, gzip
from glob import glob
from os import makedirs, sys, remove, rename, path
from optparse import OptionParser
except:
print """ Could not load some user defined module functions"""
print """ Make sure your typed 'source MetaPathwaysrc'"""
print """ """
sys.exit(3)
DBLIST = ["COG-14-2016-10-20", "kegg-uniprot-2016-10-20", "metacyc-2016-10-31", "refseq-2016-10-06-rel-78", "eggnog-v4-2016-10-30"]
MAX_NUM = 10000000
def printf(fmt, *args):
sys.stdout.write(fmt % args)
sys.stdout.flush()
def eprintf(fmt, *args):
sys.stderr.write(fmt % args)
sys.stderr.flush()
class FastaRecord():
def __init__(self, longname, sequence):
self.longname = longname
self.sequence = sequence
fields = [ x.strip() for x in self.longname.split(' ') ]
if len(fields) > 0:
self.name = fields[0]
else:
self.name = None
class FastaReader():
"""Parses a fasta record from a string or file."""
stop = False
START_PATTERN = re.compile(r'^>')
name = None
future_name =None
sequence=""
def __init__(self, fasta_filename):
try:
self.file = open(fasta_filename, 'r')
except IOError:
print "Cannot open fasta file " + fasta_filename
def __iter__(self):
return self
def close(self):
self.file.close()
def next(self):
if self.stop:
raise StopIteration
try:
if not self.name:
self.name = self.file.readline().strip()
line = self.file.readline()
except:
line = None
if not line:
self.stop = True
raise StopIteration
fragments = []
while line and not self.START_PATTERN.search(line):
fragments.append(line.strip())
line = self.file.readline()
# print line
if self.future_name:
self.name = self.future_name
if line:
self.future_name = line.strip()
self.sequence =''.join(fragments)
self.seqname = self.name
return FastaRecord(self.name, self.sequence)
usage= sys.argv[0] + """ -i file.fna """
parser = None
def createParser():
global parser
epilog = """
This script computes the sequence stats for the fasta files
"""
epilog = re.sub(r'[ \t\f\v]+',' ', epilog)
parser = OptionParser(usage=usage, epilog=epilog)
parser.add_option("-f", dest="folders", action='append',
help='add the folder to be examined, it expects a input and output folders under this folder')
parser.add_option("-s", dest="stages", default=[], action='append',
help=''' INPUT : 1\n
ORFs : 2\n
B/LAST : 3\n
PARSE : 4\n
ANNOT : 5\n
PGDB : 6\n
add the folder to be examined''')
parser.add_option("-t", dest="type", default='1', choices=['1', '2', '3', '4'],
help=''' present : 1
isNonEmpty : 2
num lines : 3
file size : 4
turns on the cumulative mod''')
parser.add_option("-c", action="store_false", dest="cumul", default=True,
help="print the preceeding stages")
parser.add_option("-m", dest="top", type='int', default=100000000,
help='max number of samples to read [default : 100000000]')
parser.add_option("-n", dest="max_num", type='int', default=100000000,
help='max number of items to count to save time [default : 100000000]')
def valid_arguments(opts, args):
state = True
if opts.folders == None :
print 'ERROR: Did not specify any folder'
state = False
return state
def isAminoAcidSequence(sequence):
if sequence:
count = 0
list = [ 'a', 't', 'c', 'g', 'A', 'T', 'C', 'G']
for x in sequence:
if x in list:
count+=1
if count/len(sequence) < 0.80:
return True
else:
return False
return True
def filter_sequence(sequence):
if isAminoAcidSequence(sequence):
return sequence
sequence = re.sub(r'[^atcgATCG]','-', sequence.strip())
subsequences = sequence.split('-')
max_length = 0;
longest_sequence = "";
for seq in subsequences:
if len(seq) > max_length :
longest_sequence = seq
max_length = len(seq)
return longest_sequence
class FastaRecord(object):
def __init__(self, name, sequence):
self.name = name
self.sequence = sequence
# return FastaRecord(title, sequence)
def read_fasta_records(input_file):
records = []
sequence=""
name=""
while 1:
line = input_file.readline()
if line == "":
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
return records
if line=='\n':
continue
line = line.rstrip()
if line.startswith(">") :
if sequence!="" and name!="":
records.append(FastaRecord(name, sequence))
name = line.rstrip()
sequence =""
else:
sequence = sequence + line.rstrip()
return records
def numLinesPf(filename):
global MAX_NUM
count = 0
commPATT = re.compile(r'^NAME')
fh = None
if path.exists(filename):
fh = open(filename)
elif path.exists(filename + ".gz"):
fh = gzip.open(filename + ".gz")
else:
return 0
for line in fh:
if commPATT.search(line):
count += 1
if count > MAX_NUM:
break
fh.close()
return count
def numLines(filename):
global MAX_NUM
count = 0
commPATT = re.compile(r'^#')
fh = None
if path.exists(filename):
fh = open(filename)
elif path.exists(filename + ".gz"):
fh = gzip.open(filename + ".gz")
else:
return 0
for line in fh:
if not commPATT.search(line):
count += 1
if count > MAX_NUM:
break
fh.close()
return count
def numSeqFasta(file):
""" process one fasta sequence at a time """
global MAX_NUM
fastareader= FastaReader(file)
count = 0
for record in fastareader:
seqname = record.name
seq = record.sequence
length = len(seq)
count += 1
if count > MAX_NUM:
break
fastareader.close()
return count
def maxSizeFasta(file):
""" process one fasta sequence at a time """
fastareader= FastaReader(file)
max_length=0
count = 0
for record in fastareader:
if count > 10000:
break
seqname = record.name
seq = record.sequence
length = len(seq)
count += 1
if length > max_length:
max_length =length
fastareader.close()
return max_length
def avgSizeFasta(file):
""" process one fasta sequence at a time """
fastareader= FastaReader(file)
tot_length=0
count = 0
for record in fastareader:
if count > 10000:
break
seqname = record.name
seq = record.sequence
length = len(seq)
tot_length += length
count += 1
fastareader.close()
avg = tot_length/count
return avg
def extractSampleName(name):
sample_name = name
sample_name = re.sub(r'^.*/','',sample_name, re.I)
sample_name = re.sub(r'^.*\\','',sample_name, re.I)
sample_name = re.sub(r'\.fasta$','',sample_name, re.I)
sample_name = re.sub(r'\.fna$','',sample_name, re.I)
sample_name = re.sub(r'\.faa$','',sample_name, re.I)
sample_name = re.sub(r'\.fas$','',sample_name, re.I)
sample_name = re.sub(r'\.fa$','',sample_name, re.I)
return sample_name
def add_samples(folder, folders_samples, top):
files = glob( folder + '/input/*.fasta')
count = top
for file in sorted(files):
if count==0:
break
count -= 1
sample_name = extractSampleName(file)
if not folder in folders_samples:
folders_samples[folder] = {}
folders_samples[folder][sample_name] = {}
def check_file(file):
if not path.exists(file):
return path.exists(file + '.gz')
return path.exists(file)
def isNotEmpty(file):
size = 0
if path.exists(file):
try:
size = path.getsize(file)
except:
pass
return size
def check_type1(folders_samples, folder, stages, top):
count = top
for sample in sorted(folders_samples[folder].keys()):
if count==0:
break
count -=1
if '1' in stages:
status = check_file(folder + '/input/' + sample+'.fasta')
if status:
folders_samples[folder][sample]['1'] = 'Present'
else:
folders_samples[folder][sample]['1'] = 'Absent'
if '2' in stages:
filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa'
status = check_file(filename)
if status:
folders_samples[folder][sample]['2'] = 'Present'
else:
folders_samples[folder][sample]['2'] = 'Absent'
for db in get_db_names(stages, '3'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout"
status = check_file(filename)
if status:
folders_samples[folder][sample]['3:' +db] = 'Present'
else:
folders_samples[folder][sample]['3:' + db] = 'Absent'
for db in get_db_names(stages, '4'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt"
status = check_file(filename)
if status:
folders_samples[folder][sample]['4:' + db] = 'Present'
else:
folders_samples[folder][sample]['4:' + db] = 'Absent'
def check_type2(folders_samples, folder, stages, top):
count = top
for sample in sorted(folders_samples[folder].keys()):
if count==0:
break
count -= 1
if '1' in stages:
filename = folder + '/input/' + sample+'.fasta'
size = isNotEmpty(filename)
folders_samples[folder][sample]['1'] = size
if '2' in stages:
filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa'
size = isNotEmpty(filename)
folders_samples[folder][sample]['2'] = size
for db in get_db_names(stages, '3'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout"
size = isNotEmpty(filename)
if size:
folders_samples[folder][sample]['3:' + db ] = int(size)
else:
folders_samples[folder][sample]['3:' + db] = int(size)
for db in get_db_names(stages, '4'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt"
size = isNotEmpty(filename)
if size:
folders_samples[folder][sample]['4:' + db ] = int(size)
else:
folders_samples[folder][sample]['4:' + db ] = int(size)
def check_type3(folders_samples, folder, stages, top):
count = top
i = 1
for sample in sorted(folders_samples[folder].keys()):
if count==0:
break
count -= 1
eprintf(" %3d\t%s\n",i, sample)
i+=1
if '1' in stages:
filename = folder + '/input/' + sample+'.fasta'
size = numSeqFasta(filename)
folders_samples[folder][sample]['1'] = int(size)
if '2' in stages:
filename =folder + '/output/' + sample+ '/orf_prediction/' + sample + '.qced.faa'
size = numLines(filename)
folders_samples[folder][sample]['2'] = size
for db in get_db_names(stages, '3'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout"
size = numLines(filename)
folders_samples[folder][sample]['3:' + db ] = int(size)
for db in get_db_names(stages, '4'):
filename =folder + '/output/' + sample+ '/blast_results/' + sample + "." + db +".LASTout.parsed.txt"
size = numLines(filename)
folders_samples[folder][sample]['4:' + db ] = int(size)
if '5' in stages:
filename =folder + '/output/' + sample+ '/results/annotation_table/' + sample + ".ORF_annotation_table.txt"
size = numLines(filename)
folders_samples[folder][sample]['5'] = int(size)
if '6' in stages:
filename =folder + '/output/' + sample+ '/results/annotation_table/' + sample + ".functional_and_taxonomic_table.txt"
size = numLines(filename)
folders_samples[folder][sample]['6'] = int(size)
if '7' in stages:
filename =folder + '/output/' + sample+ '/ptools/' + "0.pf"
size = numLinesPf(filename)
folders_samples[folder][sample]['7'] = int(size)
def get_db_names(stages, c):
threePATT = re.compile(r'^' + c + ':')
stage_suffix = {}
for stage in stages.keys():
result = threePATT.search(stage)
if result:
fields = [x.strip() for x in stage.split(':') ]
stage_suffix[fields[1]] = True
return sorted(stage_suffix.keys())
def check_type(folder_samples, folder, stages, type, top):
if type=='1':
check_type1(folder_samples, folder, stages, top)
return
if type=='2':
check_type2(folder_samples, folder, stages, top)
return
if type=='3':
check_type3(folder_samples, folder, stages, top)
return
def print_status(folders_samples, folder, sample, stage, type) :
# print folders_samples
printf('\t' + str(folders_samples[folder][sample][stage]))
# the main function
SIZE = 1000
def main(argv, errorlogger = None, runstatslogger = None):
global parser
(opts, args) = parser.parse_args(argv)
if not valid_arguments(opts, args):
print usage
sys.exit(0)
stages = {}
for stage in opts.stages:
stages[stage] = True
type = opts.type
# adding the sampls
folders_samples = {}
for folder in opts.folders:
add_samples(folder, folders_samples, opts.top)
#MAX_NUM
global MAX_NUM
MAX_NUM = opts.max_num
# stage processing
count = opts.top
for folder in opts.folders:
eprintf("%s\n",folder)
check_type(folders_samples, folder, stages, opts.type, opts.top)
printf("#FOLDER\tSAMPLE")
if '1' in stages:
printf("\tINPUT_FILE")
if '2' in stages:
printf("\tORF_FILE")
for db in get_db_names(stages, '3'):
if '3:'+db in stages:
printf("\t" + db + ".LASTout")
for db in get_db_names(stages, '4'):
if '4:'+db in stages:
printf("\t" + db + ".LASTout.parsed.txt")
if '5' in stages:
printf("\tORF_ANNOTATION")
if '6' in stages:
printf("\tTAXONOMIC_FUNCTIONAL")
if '7' in stages:
printf("\tPTOOLS_ORF")
printf("\n")
status1 = ''
status2 = ''
if type=='1':
status1 = 'Y/N'
status2 = 'Y/N'
if type=='2':
status1 = 'Size'
status2 = 'Size'
if type=='3':
status1 = 'Avg Len'
status2 = 'Num Lines'
printf("#Name\tName")
if '1' in stages:
printf("\t"+ status1 + "(1)")
if '2' in stages:
printf("\t"+ status2 + "(2)")
for db in get_db_names(stages, '3'):
if '3:'+db in stages:
printf("\t"+ status2 + "(3)")
for db in get_db_names(stages, '4'):
if '4:'+db in stages:
printf("\t"+ status2 + "(4)" )
if '5' in stages:
printf("\t"+ status2 + "(5)")
if '6' in stages:
printf("\t"+ status2 + "(6)")
if '7' in stages:
printf("\t"+ status2 + "(7)")
printf("\n")
count = opts.top
for folder in opts.folders:
for sample in sorted(folders_samples[folder].keys()):
if count ==0:
break
count -= 1
printf("%s\t%s",folder, sample)
if '1' in stages:
print_status(folders_samples, folder, sample, '1', opts.type)
if '2' in stages:
print_status(folders_samples, folder, sample, '2', opts.type)
for db in get_db_names(stages, '3'):
if '3:'+db in stages:
print_status(folders_samples, folder, sample, '3:'+ db, opts.type)
for db in get_db_names(stages, '4'):
if '4:'+db in stages:
print_status(folders_samples, folder, sample, '4:'+ db, opts.type)
if '5' in stages:
print_status(folders_samples, folder, sample, '5', opts.type)
if '6' in stages:
print_status(folders_samples, folder, sample, '6', opts.type)
if '7' in stages:
print_status(folders_samples, folder, sample, '7', opts.type)
printf("\n")
# print folders_samples
# python ~/MetaPathways_Python.3.0/utilities/compute_status.py -f . -s 1 -s 2 -s 3:COG-14-2016-10-20 -s 3:kegg-uniprot-2016-10-20 -s 3:metacyc-2016-10-31 -s 3:refseq-2016-10-06-rel-78 -s 3:eggnog-v4-2016-10-30 -s 4:COG-14-2016-10-20 -s 4:kegg-uniprot-2016-10-20 -s 4:metacyc-2016-10-31 -s 4:refseq-2016-10-06-rel-78 -s 4:eggnog-v4-2016-10-30 -s 5 -s 6 -s 7 -t 3 -m 4 -n 400
# the main function of metapaths
if __name__ == "__main__":
createParser()
main(sys.argv[1:])
| mit | 3,113,348,630,334,160,000 | 25.713869 | 384 | 0.539155 | false |
YYKI/blog-webapp | www/apis.py | 1 | 2900 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'syuson'
'''
JSON API definition
'''
import json, logging, inspect, functools
class APIError(Exception):
"""
the base APIError which contains error(required), data(optional) and message(optional)
"""
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
"""
Indicate the input value has error or invalid. The data specifies the error field of input form.
"""
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
"""
Indicate the resource was not found. The data specifies the resource name.
"""
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
"""
Indicate the api has no permission.
"""
def __init__(self, message=''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
self.arg = message
class Page(object):
"""docstring for Page"""
def __init__(self, item_count, page_index=1, page_size=3):
'''
Init Pagination by item_count, page_index and page_size.
>>> p1 = Page(100, 1)
>>> p1.page_count
10
>>> p1.offset
0
>>> p1.limit
10
>>> p2 = Page(90, 9, 10)
>>> p2.page_count
9
>>> p2.offset
80
>>> p2.limit
10
>>> p3 = Page(91, 10, 10)
>>> p3.page_count
10
>>> p3.offset
90
>>> p3.limit
10
'''
self.item_count = item_count
self.page_size = page_size
self.page_count = item_count // page_size + (1 if item_count % page_size > 0 else 0)
if (item_count == 0) or (page_index > self.page_count):
self.offset = 0
self.limit = 0
self.page_index = 1
else:
self.page_index = page_index
self.offset = self.page_size * (page_index - 1)
self.limit = self.page_size
self.has_next = self.page_index < self.page_count
self.has_previous = self.page_index > 1
print('item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit))
def __str__(self):
return 'item_count: %s, page_count: %s, page_index: %s, page_size: %s, offset: %s, limit: %s' % (
self.item_count, self.page_count, self.page_index, self.page_size, self.offset, self.limit)
__repr__ = __str__
| gpl-3.0 | -8,884,925,662,730,124,000 | 27.712871 | 105 | 0.561034 | false |
faylau/oVirt3.3WebAPITest | src/TestData/Host/ITC03010304_CreateHost_NameVerify.py | 1 | 2575 | #encoding:utf-8
__authors__ = ['"Liu Fei" <[email protected]>']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/17 初始版本 Liu Fei
#---------------------------------------------------------------------------------
'''
'''-----------------------------------------------------------------------------------------
@note: Pre-TestData
-----------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------
@note: Test-Data
-----------------------------------------------------------------------------------------'''
# 主机名称:(1)包含特殊字符;(2)超过255个字符.
host_name_list = ['node-ITC03010304-~!@#$%^',
'node-ITC03010304-abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz01234'
]
xml_host_info = '''
<data_driver>
<host>
<name>%s</name>
<address>10.1.167.4</address>
<root_password>qwer1234</root_password>
</host>
<host>
<name>%s</name>
<address>10.1.167.4</address>
<root_password>qwer1234</root_password>
</host>
</data_driver>
''' % (host_name_list[0], host_name_list[1])
'''-----------------------------------------------------------------------------------------
@note: Post-TestData
-----------------------------------------------------------------------------------------'''
xml_del_option = '''
<action>
<force>true</force>
<async>false</async>
</action>
'''
'''-----------------------------------------------------------------------------------------
@note: ExpectedResult
-----------------------------------------------------------------------------------------'''
expected_status_code = 400
expected_info_list = [
'''
<fault>
<reason>Operation Failed</reason>
<detail>[Host name must be formed of alphanumeric characters, numbers or "-_."]</detail>
</fault>
''',
'''
<fault>
<reason>Operation Failed</reason>
<detail>[size must be between 1 and 255]</detail>
</fault>
'''
] | apache-2.0 | 5,178,600,399,952,293,000 | 35.608696 | 276 | 0.366337 | false |
rabrahm/ceres | utils/FastRotators/spfr.py | 1 | 18831 | from pylab import *
import pyfits
from PyAstronomy import pyasl
import scipy
from scipy import interpolate
from scipy import ndimage
from scipy import signal
import pickle
from matplotlib.backends.backend_pdf import PdfPages
import os
#from pyevolve import G1DList
#from pyevolve import GSimpleGA
from multiprocessing import Pool
import time
def download_models(webpage='http://svo2.cab.inta-csic.es/theory/models/coelho/high/data/',dest='../../data/'):
os.system('mkdir '+dest+'/COELHO2014')
cwd = os.getcwd()
os.chdir(dest+'/COELHO2014')
tf = np.arange(6000,10001,250)
gf = np.arange(2.5,4.6,0.5)
#gf = np.array([2.5])
zf = np.array([-1.,-0.5,0.0,0.2])
for t in tf:
for g in gf:
for z in zf:
modname = get_modname(t,g,z)
if z<0:
sz = 'm'
else:
sz = 'p'
sz = sz+str(float(np.absolute(z))).replace('.','')+'p00/'
os.system('wget ' + webpage+sz+modname+'.fits')
os.system('wget ' + webpage+sz+modname+'plc.fits')
os.chdir(cwd)
return True
def n_Edlen(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 1e-8 * (8342.13 + 2406030 / (130-sigma2) + 15997/(38.9-sigma2))
return n
def n_Morton(l):
sigma = 1e4 / l
sigma2 = sigma*sigma
n = 1 + 6.4328e-5 + 2.94981e-2 / (146.-sigma2) + 2.5540e-4/(41.-sigma2)
return n
def ToAir(l):
return (l / n_Edlen(l))
def ToVacuum(l):
cond = 1
l_prev = l.copy()
while(cond):
l_new = n_Edlen(l_prev) * l
if (max(np.absolute(l_new - l_prev)) < 1e-10): cond = 0
l_prev = l_new
return l_prev
def get_modname(t,g,z):
st = str(int(t))
if t<10000:
st = '0'+st
sg = '+'+str(np.around(g,1))
if z < 0:
sz = 'm'
else:
sz = 'p'
z=float(z)
sz = sz + str(np.around(np.absolute(z),1))
sz = sz.replace('.','')
return 't'+st+'_g'+sg+'_'+sz+'p00_hr'
def get_model(t,g,z,model_path='../../data/COELHO2014/'):
modname = model_path + get_modname(t,g,z)
try:
out = pyfits.getdata(modname+'.fits')
except:
out = pyfits.getdata(modname+'plc.fits')
return out
def get_near(x,vec):
if x == vec[0]:
mmin = vec[0]
mmax = vec[1]
elif x == vec[-1]:
mmin = vec[-2]
mmax = vec[-1]
else:
tvec = vec - x
In = np.where(tvec < 0)[0]
mmin = tvec[In].max() + x
Ix = np.where(tvec >= 0)[0]
mmax = tvec[Ix].min() + x
return mmin,mmax
def trilinear_interpolation(t,g,z,model_path='../../data/COELHO2014/'):
teffs = np.arange(6000,10001,250)
loggs = np.arange(2.5,4.6,0.5)
fehs = np.array([-1.,-0.5,0.0,0.2])
x0,x1 = get_near(t,teffs)
y0,y1 = get_near(g,loggs)
z0,z1 = get_near(z,fehs)
xd = (t-x0)/(x1-x0)
yd = (g-y0)/(y1-y0)
zd = (z-z0)/(z1-z0)
try:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'.fits')
except:
hd = pyfits.getheader(model_path+get_modname(x0,y0,z0)+'plc.fits')
c000 = get_model(x0,y0,z0,model_path)
c001 = get_model(x0,y0,z1,model_path)
c010 = get_model(x0,y1,z0,model_path)
c100 = get_model(x1,y0,z0,model_path)
c110 = get_model(x1,y1,z0,model_path)
c101 = get_model(x1,y0,z1,model_path)
c011 = get_model(x0,y1,z1,model_path)
c111 = get_model(x1,y1,z1,model_path)
wav = np.arange(len(c111))*hd['CDELT1'] + hd['CRVAL1']
c00 = c000*(1-xd) + c100*xd
c01 = c001*(1-xd) + c101*xd
c10 = c010*(1-xd) + c110*xd
c11 = c011*(1-xd) + c111*xd
c0 = c00*(1-yd) + c10*yd
c1 = c01*(1-yd) + c11*yd
c = c0*(1-zd) + c1*zd
return wav,c
def normalize_model(w,f):
ow = w.copy()
of = f.copy()
#plot(w,f)
while True:
#medflts = scipy.signal.medfilt(f,1001)
coef = np.polyfit(w,f,6)
fited = np.polyval(coef,w)
res = f - fited
I = np.where(res > -np.sqrt(np.var(res)))[0]
w,f = w[I],f[I]
if len(w) < 0.3* len(ow):
break
#plot(ow,np.polyval(coef,ow))
#show()
return coef
def spec_ccf(sw,sf,mw,mf,vi,vf,dv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
v = vi
retccf = []
vels = []
while v<=vf:
swt = sw * (1 + v/299792.458)
mft = interpolate.splev(swt,tck)
#if v == 0:
# plot(swt,mft)
# plot(swt,sft)
# show()
mft -= np.mean(mft)
sft = sf - np.mean(sf)
#sft = sf.copy()
#print np.sum(mft**2),np.sum(sft**2)
retccf.append(np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2)))
vels.append(v)
v+=dv
return np.array(vels),np.array(retccf)
def ccf_fft(swt,sft,mwt,mft):
mf = mft -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(np.log(mwt),mf,k=1)
sw = np.log(swt)
tck2 = interpolate.splrep(sw,sft,k=1)
nsw = np.linspace(sw[0], sw[-1], 5000)
sf = interpolate.splev(nsw,tck2)
mf = interpolate.splev(nsw,tck)
sf -= np.mean(sf)
mf -= np.mean(mf)
plot(nsw,sf)
plot(nsw,mf)
show()
retccf = np.fft.ifft(np.conj(np.fft.fft(sf))*np.fft.fft(mf))
retccf = np.hstack((retccf[2500:],retccf[:2500]))
retvels = np.arange(len(retccf)) - 0.5*len(retccf)
retvels *= (nsw[1]-nsw[0])
retvels = 299792.458*(np.exp(retvels)-1.)
return retvels, retccf
def ccf_simple(sw,sf,mw,mf,rv):
mf = mf -1
mf = -mf
#plot(mw,mf)
tck = interpolate.splrep(mw,mf,k=1)
swt = sw * (1 + rv/299792.458)
mft = interpolate.splev(swt,tck)
mft -= np.mean(mft)
sft = sf - np.mean(sf)
return np.sum(mft*sft)/np.sqrt(np.sum(mft**2)*np.sum(sft**2))
def clean_strong_lines(mw,sc,mode=1):
if mode==1:
#""""
I = np.where((mw>6520)&(mw<6600))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4310)&(mw<4360))[0]
sc[I] = 1.
I = np.where((mw>4840)&(mw<4880))[0]
sc[I] = 1.
I = np.where((mw>4070)&(mw<4130))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
if mode==2:
#""""
I = np.where((mw>6550)&(mw<6570))[0]
sc[I] = 1.
I = np.where((mw>5888)&(mw<5897))[0]
sc[I] = 1.
I = np.where((mw>4320)&(mw<4350))[0]
sc[I] = 1.
I = np.where((mw>4850)&(mw<4870))[0]
sc[I] = 1.
I = np.where((mw>4090)&(mw<4110))[0]
sc[I] = 1.
I = np.where((mw>3875)&(mw<3900))[0]
sc[I] = 1.
I = np.where((mw>3920)&(mw<3945))[0]
sc[I] = 1.
I = np.where((mw>3955)&(mw<3980))[0]
sc[I] = 1.
I = np.where(mw<3850)[0]
sc[I] = 1.
#"""
return sc
def RVforFR(wavs,flxs,teff=6700,logg=4.0,feh=-1.0,vsini=100.,model_path='../../data/COELHO2014/',vmin=-1000.,vmax=1000.,vstep=10.):
def fitfunc(p,x):
ret = p[3] + p[0] * np.exp(-.5*((x-p[1])/p[2])**2)
return ret
errfunc = lambda p,x,y: np.ravel( (fitfunc(p,x)-y) )
#sc = get_model(teff,logg,feh)
#hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
#wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
teff = float(teff)
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
for order in range(len(flxs)):
flxs[order] = clean_strong_lines(wavs[order],flxs[order])
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
#plot(wavs[i],flxs[i])
scf = flxs[i]
scw = wavs[i]
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#plot(ccv,ccf)
#show()
#ccf = np.array(ccf)
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
#show()
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
p0 = [ccftot.min(),ccv[np.argmin(ccftot)],vsini,ccftot[0]]
p1, success = scipy.optimize.leastsq(errfunc,p0, args=(ccv,ccftot))
return p1,ccv,ccftot,fitfunc(p1,ccv)
def calc_bss2(vels,xc,coef, bot_i=0.15, bot_f=0.4, top_i=0.6, top_f=0.9, dt=0.01):
try:
I1 = np.where((vels>coef[1]-3*coef[2]) & (vels<coef[1]) )[0]
I2 = np.where((vels<coef[1]+3*coef[2]) & (vels>coef[1]) )[0]
I3 = np.where(vels<coef[1]-4*coef[2])[0]
I4 = np.where(vels>coef[1]+4*coef[2])[0]
I = np.hstack((I3,I4))
base = np.median(xc[I])
xc = base - xc
xc /= xc.max()
v1,x1 = vels[I1],xc[I1]
v2,x2 = vels[I2],xc[I2]
#plot(v1,x1)
#plot(v2,x2)
#show()
dp = top_f
vect = []
while dp >= top_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vect.append(0.5*(bs2+bs1))
dp-=dt
vect = np.array(vect)
dp = bot_f
vecb = []
while dp >= bot_i:
lb = np.where(x1>dp)[0][0]
m = (v1[lb] - v1[lb-1])/(x1[lb]-x1[lb-1])
n = v1[lb] - m*x1[lb]
bs1 = m*dp+n
lb = np.where(x2>dp)[0][-1]
m = (v2[lb] - v2[lb+1])/(x2[lb]-x2[lb+1])
n = v2[lb] - m*x2[lb]
bs2 = m*dp+n
vecb.append(0.5*(bs2+bs1))
dp-=dt
vecb = np.array(vecb)
return np.median(vecb) - np.median(vect)
except:
return -999.0
"""
def lnlike(theta, W, F, Ferr):
mw,sc = trilinear_interpolation(int(theta[0]),theta[1],theta[2])
sct = clean_strong_lines(mw,sc.copy())
#plot(mw,sc)
#show()
coef = normalize_model(mw,sct)
sc /= np.polyval(coef,mw)
#print gfd
mw = ToVacuum(mw)
mw *= 1 + theta[3]/299792.458
totD,totM,totE = np.array([]),np.array([]),np.array([])
for i in range(W.shape[0]):
scf = F[i]
scw = W[i]
scfe = Ferr[i]
J = np.where(scf!=0)[0]
scw,scf,scfe = scw[J],scf[J],scfe[J]
I = np.where((mw>scw[0]-10) & (mw<scw[-1]+10))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, theta[4])
tck = interpolate.splrep(mw[I],tmf,k=1)
tmf = interpolate.splev(scw,tck)
tmf = clean_strong_lines(scw,tmf.copy())
I = np.where(tmf!=1)[0]
#plot(scw,tmf)
#plot(scw[I],tmf[I])
#plot(scw[I],scf[I])
#show()
#print gfd
tmf = tmf[I]
scf = scf[I]
scfe = scfe[I]
tmf /= np.sum(tmf)
tsf = scf/np.sum(scf)
tse = scfe*(np.sum(scf)**2)
totD = np.hstack((totD,tsf))
totM = np.hstack((totM,tmf))
totE = np.hstack((totE,tse))
#plot(scw[I],tsf)
#plot(scw[I],tmf)
#plot(scw[I],tsf + 1./np.sqrt(tse))
#show()
#print fds
#print theta
#show()
#print gvfd
#ret = -np.log(2*np.pi) + np.log(np.sum(np.exp(-0.5*((y-model)/yerr)**2)/yerr))
#ret = -0.5*(np.sum(inv_sigma2*(F-model)**2 - np.log(inv_sigma2)))
ret = -0.5*(np.sum(totE*(totD-totM)**2 - np.log(totE)))
#for i in range(len(F)):
# errorbar(Y,F[i],yerr=Ferr[i],fmt='b')
#for j in model:
# plot(Y,j,'r')
#show()
#print theta, ret
if np.isnan(ret):
return -np.inf
else:
return ret
def lnprior(theta):
if 6000 < theta[0] < 9000 and 3.0 < theta[1] < 4.5 and -1 < theta[2] < 0.2 and -500 < theta[3] < 500 and 1. < theta[4] < 500.:
return 0.0
return -np.inf
def lnprob(theta, W,F,Ferr):
lp = lnprior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta,W,F,Ferr)
"""
def multiccf(pars):
teff,logg,feh,vsini=pars[0],pars[1],pars[2],pars[3]
vmin=-500
vmax=500.
vstep=20.
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
wav = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
try:
sc = get_model(teff,logg,feh)
hd = pyfits.getheader(model_path+get_modname(7000,4.5,0.0)+'.fits')
mw = np.arange(len(sc))*hd['CDELT1'] + hd['CRVAL1']
except:
mw,sc = trilinear_interpolation(teff,logg,feh,model_path)
sc = clean_strong_lines(mw,sc)
II = np.where(sc != 1)[0]
JJ = np.where(sc == 1)[0]
coef = normalize_model(mw[II],sc[II])
sc /= np.polyval(coef,mw)
sc[JJ] = 1.
mw = ToVacuum(mw)
weis1 = []
ccftot = []
for i in range(wavs.shape[0]):
scf = flxs[i].copy()
scw = wavs[i].copy()
J = np.where(scf!=0)[0]
scw,scf = scw[J],scf[J]
I = np.where((mw>scw[0]-100) & (mw<scw[-1]+100))
tmf = pyasl.fastRotBroad(mw[I], sc[I], 0.5, vsini)
#plot(mw[I],tmf)
J = np.where(scf!=1)[0]
if len(J)>100:
ccv,ccf = spec_ccf(scw,scf,mw[I],tmf,vmin,vmax,vstep)
#ccv,ccf = ccf_fft(scw,scf,mw[I],tmf)
#plot(ccv,ccf)
#show()
wei1 = len(np.where(scf!=1)[0])**2
weis1.append(wei1)
if len(ccftot)==0:
ccftot = ccf.copy()*wei1
else:
ccftot = np.vstack((ccftot,ccf.copy()*wei1))
weis1 = np.array(weis1)
ccftot = np.sum(ccftot,axis=0)/ np.sum(weis1)
#print gfds
#ccftot = np.mean(ccftot,axis=0)
#print pars, ccftot.min()
return ccftot.min()
def get_pars_fr(wavst,flxst,model_patht='../../data/COELHO2014/',npools=4,fixG=1.0):
for order in range(len(flxst)):
flxst[order] = clean_strong_lines(wavst[order],flxst[order],mode=1)
t0 = time.time()
global wavs,flxs
global model_path
wavs,flxs=wavst.copy(),flxst.copy()
model_path=model_patht
gt = np.array([6000,7000,8000,9000,10000])
gg = np.array([2.5,3.0,3.5,4.0,4.5])
if fixG != -1:
gg = np.array([fixG])
gz = np.array([-1,-0.5,0.0,0.2])
gr = np.array([10.,50.,100.,150.,200.,250.,300.])
#"""
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
#for pars in tot:
# pars = [8000,4.0,-0.5,40.0]
# print pars, multiccf(pars)
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
#"""
t1 = time.time()
print bt,bg,bz,br, (t1-t0)/60.,'mins'
#bt,bg,bz,br = 7000.,4.5, 0.2, 100.0
gt = np.arange(bt-1000,bt+1001,250)
I = np.where((gt>=6000) & (gt<=10000))[0]
gt = gt[I]
gr = np.arange(br-60.,br+61.,20.)
I = np.where(gr>=10)[0]
gr = gr[I]
tr = np.tile(gr,len(gt)*len(gg)*len(gz))
tg = np.repeat(np.tile(gg,len(gt)),len(gr)*len(gz))
tz = np.repeat(np.tile(gz,len(gt)*len(gg)),len(gr))
tt = np.repeat(gt,len(gg)*len(gr)*len(gz))
tot = np.vstack((tt,tg,tz,tr)).T
p = Pool(npools)
vals = np.array((p.map(multiccf, list(tot))))
p.terminate()
I = np.argmin(vals)
best_vals = tot[I]
bt,bg,bz,br = best_vals[0],best_vals[1],best_vals[2],best_vals[3]
t2 = time.time()
print bt,bg,bz,br, (t2-t1)/60.,'mins'
#np.savetxt('temp_grid.txt',vals)
if fixG==-1:
grid = np.reshape(vals,(len(gt),len(gg),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckg = interpolate.splrep(gg,np.arange(len(gg)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckg = interpolate.splrep(np.arange(len(gg)),gg,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sg = np.arange(gg[0],gg[-1]+0.01,0.1)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sg = interpolate.splev(sg,tckg)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sg)*len(sz))
tg2 = np.repeat(np.tile(sg,len(st)),len(sr)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)*len(sg)),len(sr))
tt2 = np.repeat(st,len(sg)*len(sr)*len(sz))
tot2 = np.vstack((tt2,tg2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = interpolate.splev(minval[1],itckg)
minz = interpolate.splev(minval[2],itckz)
minr = interpolate.splev(minval[3],itckr)
else:
grid = np.reshape(vals,(len(gt),len(gz),len(gr)))
tckt = interpolate.splrep(gt,np.arange(len(gt)),k=1)
tckz = interpolate.splrep(gz,np.arange(len(gz)),k=1)
tckr = interpolate.splrep(gr,np.arange(len(gr)),k=1)
itckt = interpolate.splrep(np.arange(len(gt)),gt,k=1)
itckz = interpolate.splrep(np.arange(len(gz)),gz,k=1)
itckr = interpolate.splrep(np.arange(len(gr)),gr,k=1)
st = np.arange(gt[0],gt[-1]+1,10.)
sz = np.arange(gz[0],gz[-1]+0.01,0.1)
sr = np.arange(gr[0],gr[-1]+1.,5.)
st = interpolate.splev(st,tckt)
sz = interpolate.splev(sz,tckz)
sr = interpolate.splev(sr,tckr)
tr2 = np.tile(sr,len(st)*len(sz))
tz2 = np.repeat(np.tile(sz,len(st)),len(sr))
tt2 = np.repeat(st,len(sr)*len(sz))
tot2 = np.vstack((tt2,tz2,tr2))
zi = ndimage.map_coordinates(grid, tot2, order=3, mode='nearest')
I = np.argmin(zi)
minval = tot2[:,I]
mint = interpolate.splev(minval[0],itckt)
ming = fixG
minz = interpolate.splev(minval[1],itckz)
minr = interpolate.splev(minval[2],itckr)
#d = {'grid':grid, 'zi':zi, 'tot2':tot2, 'gt':gt, 'gg':gg, 'gz':gz, 'gr':gr}
#pickle.dump(d,open('temp_dict.pkl'))
return float(mint),float(ming),float(minz),float(minr)
def plot_CCF_FR(xc_dict,path='XC.pdf'):
vels = xc_dict['vels']
xc_av = xc_dict['xc_av']
XCmodelgau = xc_dict['XCmodelgau']
#refvel = xc_dict['refvel']
p1gau = xc_dict['p1gau']
f1 = figure()
pp = PdfPages(path)
ax1 = f1.add_subplot(111)
ax1.plot(vels, xc_av,'b.', label='CCF')
ax1.plot(vels, XCmodelgau,'r-',label='Gaussian fit')
xlabel('Velocity (km/s)')
ylabel('XC')
ax1.axvline(p1gau[1],linestyle=':',color='r')
ax1.axhline(0.0,linestyle='-')
title('Average Cross-Correlation Function + Fit')
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(handles[::-1], labels[::-1],prop={'size':6})
pp.savefig()
pp.close()
clf()
pass
"""
def trans_chromosome(chromosome):
teff = chromosome[0]*100.+chromosome[1]*10.+chromosome[2]
m = (10000.- 6000.)/999.
n = 6000.
teff = teff*m + n
logg = chromosome[3] + chromosome[4]*0.1
m = (4.5 - 3.0)/9.9
n = 3.
logg = logg*m + n
feh = chromosome[5] + chromosome[6]*0.1
m = (0.2 - -1.)/9.9
n = -1.
feh = feh*m + n
vsini = chromosome[7]*10. + chromosome[8]
m = (300. - 10.)/99.
n = 10.
vsini = vsini*m + n
return teff, logg, feh, vsini
global wavs, flxs
def find_pars_GA(wavs,flxs,model_path='../../data/COELHO2014/'):
def eval_func(chromosome):
print list(chromosome)
teff, logg, feh, vsini = trans_chromosome(chromosome)
print teff, logg, feh, vsini
pt,vels,ccf,mod = RVforFR(wavs,flxs,teff=teff,logg=logg,feh=feh,vsini=vsini,model_path=model_path)
score = -ccf.min()
return score
genome = G1DList.G1DList(9)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome, interactiveMode=True)
ga.setGenerations(40)
ga.setMutationRate(0.2)
ga.setPopulationSize(20)
#ga.setCrossoverRate(1.0)
genome.setParams(rangemin=0, rangemax=9)
#ga.setMultiProcessing(True)
ga.evolve(freq_stats=10)
print ga.bestIndividual()
print trans_chromosome(ga.bestIndividual())
"""
| mit | -4,745,925,225,003,760,000 | 24.208835 | 131 | 0.602676 | false |
nexcess/r1soft-admin-console | rac/forms.py | 1 | 3719 | # -*- coding: utf-8 -*-
# Nexcess.net r1soft-admin-console
# Copyright (C) 2015 Nexcess.net L.L.C.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from rac.models import R1softHost, PolicyTemplate
from wtforms_alchemy import model_form_factory
from flask.ext.wtf import Form
from wtforms import IntegerField, BooleanField, StringField, RadioField, SelectMultipleField
from wtforms.validators import NumberRange, InputRequired
import wtforms.widgets
ModelForm = model_form_factory(Form)
class MultiCheckboxField(SelectMultipleField):
widget = wtforms.widgets.ListWidget(prefix_label=False)
option_widget = wtforms.widgets.CheckboxInput()
class R1softHostForm(ModelForm):
class Meta:
model = R1softHost
class PolicyTemplateForm(ModelForm):
class Meta:
model = PolicyTemplate
class HostConfigurationForm(Form):
hard_quota = IntegerField('Manager Disk Space (Hard Quota)',
[NumberRange(min=1, max=99)])
soft_quota = IntegerField('Manager Disk Space (Soft Quota)',
[NumberRange(min=1, max=99)])
task_history_limit = IntegerField('Days to retain Task History',
[NumberRange(min=1, max=365)])
http_enabled = BooleanField('Enabled')
http_port = IntegerField('Port',
[NumberRange(min=1, max=65535)])
http_max_conn = IntegerField('Max Connections',
[NumberRange(min=1, max=9999)])
https_enabled = BooleanField('Enabled')
https_port = IntegerField('Port',
[NumberRange(min=1, max=65535)])
https_keystore = StringField('Keystore Path')
https_max_conn = IntegerField('Max Connections',
[NumberRange(min=1, max=9999)])
class RestoreForm(Form):
base_path = StringField('Base Path')
file_names = SelectMultipleField('Files to Restore',
choices=[])
restore_target = RadioField('Restore Target',
default='original_host',
choices=[
('original_host', 'Original Host'),
('alt_host', 'Alternate Host')])
alt_restore_location = StringField('Alternate Location')
alt_restore_host = StringField('Alternate Host',
[])
alt_restore_port = IntegerField('Alternate Host Port',
[NumberRange(min=1, max=65535)])
overwrite_existing = BooleanField('Overwrite Existing Files',
[InputRequired()])
use_compression = BooleanField('Use Compression',
[InputRequired()])
estimate_size = BooleanField('Estimate Restore Size',
[InputRequired()])
| gpl-2.0 | 8,345,511,016,906,627,000 | 42.752941 | 92 | 0.59317 | false |
teampheenix/StarCraft-Casting-Tool | scctool/matchgrabber/alpha.py | 1 | 6950 | """Provide match grabber for AlphaTL."""
import logging
from datetime import datetime, timedelta, timezone
from urllib.request import urlopen, urlretrieve
import scctool.settings
import scctool.settings.translation
from scctool.matchgrabber.custom import MatchGrabber as MatchGrabberParent
# create logger
module_logger = logging.getLogger(__name__)
_ = scctool.settings.translation.gettext
class MatchGrabber(MatchGrabberParent):
"""Grabs match data from Alpha SC2 Teamleague."""
_provider = "AlphaSC2"
def __init__(self, *args):
"""Init match grabber."""
super().__init__(*args)
self._urlprefix = "https://alpha.tl/match/"
self._apiprefix = "https://alpha.tl/api?match="
def updateCountdown(self, datetime_str):
if not datetime_str or not scctool.settings.config.parser.getboolean(
"Countdown", "matchgrabber_update"):
return
dt_obj = datetime.strptime(
datetime_str, '%Y-%m-%d %H:%M:%S')
dt_obj = dt_obj.replace(tzinfo=timezone(timedelta(hours=0)))
dt_obj = dt_obj.astimezone()
self._controller.view.countdownTab.setFromTimestamp(dt_obj.timestamp())
def grabData(self, metaChange=False, logoManager=None):
"""Grab match data."""
data = self._getJson()
if(data['code'] != 200):
msg = 'API-Error: ' + data['error']
raise ValueError(msg)
else:
self._rawData = data
overwrite = (metaChange
or self._matchData.getURL().strip()
!= self.getURL().strip())
with self._matchData.emitLock(overwrite,
self._matchData.metaChanged):
self._matchData.setNoSets(5, 1, resetPlayers=overwrite)
self._matchData.setMinSets(3)
self._matchData.setSolo(False)
self._matchData.setNoVetoes(0)
self._matchData.resetLabels()
if overwrite:
self._matchData.resetSwap()
self.updateCountdown(data.get('datetime', ''))
league = data['tournament']
if not isinstance(league, str):
league = "TBD"
league = league.replace('Non-pro', 'Non-Pro')
league = league.replace('Semi-pro', 'Semi-Pro')
self._matchData.setLeague(
self._matchData.setLeague(self._aliasLeague(league)))
for idx, mapname in enumerate(data['maps']):
if not isinstance(mapname, str):
mapname = "TBD"
self._matchData.setMap(idx, mapname)
for team_idx in range(2):
for set_idx, player in enumerate(
data[f'lineup{team_idx + 1}']):
try:
playername = self._aliasPlayer(player['nickname'])
if not isinstance(playername, str):
playername = "TBD"
self._matchData.setPlayer(
self._matchData.getSwappedIdx(team_idx),
set_idx,
playername, str(player['race']))
except Exception:
self._matchData.setPlayer(
self._matchData.getSwappedIdx(team_idx),
set_idx, 'TBD', 'Random')
team = data[f'team{team_idx + 1}']
name, tag = team['name'], team['tag']
if not isinstance(name, str):
name = "TBD"
if not isinstance(tag, str):
tag = ""
self._matchData.setTeam(
self._matchData.getSwappedIdx(team_idx),
self._aliasTeam(name), tag)
for set_idx in range(5):
try:
score = int(data['games'][set_idx]) * 2 - 3
except Exception:
score = 0
self._matchData.setMapScore(
set_idx, score, overwrite, True)
self._matchData.setAllKill(False)
self._matchData.autoSetMyTeam(
swap=scctool.settings.config.parser.getboolean(
"SCT", "swap_myteam"))
if logoManager is not None:
self.downloadLogos(logoManager)
def downloadLogos(self, logoManager):
"""Download team logos."""
if self._rawData is None:
raise ValueError(
"Error: No raw data.")
for idx in range(2):
try:
logo_idx = self._matchData.getSwappedIdx(idx) + 1
oldLogo = logoManager.getTeam(logo_idx)
logo = logoManager.newLogo()
url = self._rawData[f'team{idx + 1}']['logo']
if url:
new_logo = logo.fromURL(
self._rawData[f'team{idx + 1}']['logo'],
localFile=oldLogo.getAbsFile())
if new_logo:
logoManager.setTeamLogo(logo_idx, logo)
else:
module_logger.info("Logo download is not needed.")
except Exception:
module_logger.exception("message")
def downloadBanner(self):
"""Download team logos."""
data_dir = scctool.settings.casting_data_dir
transparent = scctool.settings.config.parser.getboolean(
"SCT", "transparent_match_banner")
if self._rawData is None:
raise ValueError(
"Error: No raw data.")
fname = data_dir + "/matchbanner.png"
url = "https://alpha.tl/announcement/"\
+ str(self.getID())
if transparent:
url = url + "?transparent"
else:
url = url + "?vs"
localFile = scctool.settings.getAbsPath(fname)
needs_download = True
size = 1024 * 400
try:
with open(localFile, "rb") as in_file:
local_byte = in_file.read(size)
file = urlopen(url)
data = file.read(size)
if(data == local_byte):
needs_download = False
except FileNotFoundError:
module_logger.warning("Match banner not found.")
except Exception:
module_logger.exception("message")
if needs_download:
try:
urlretrieve(url, scctool.settings.getAbsPath(fname))
except Exception:
module_logger.exception("message")
else:
module_logger.info('No need to redownload match banner')
| gpl-3.0 | 1,358,352,931,005,396,000 | 37.611111 | 79 | 0.501439 | false |
five-elephants/a_game | pygame_hello.py | 1 | 2613 | #!/usr/bin/env python
import pygame
from pygame.locals import *
import random
class Wandering_text:
def __init__(self, text, color, center, bbox):
self.font = pygame.font.Font(None, 36)
self.surface = self.font.render(text, 1, color)
self.pos = self.surface.get_rect(center=center)
self.bbox = bbox
def draw(self, screen):
screen.blit(self.surface, self.pos)
def update(self):
self.pos.left += (random.random() - 0.5) * 10.0
self.pos.top += (random.random() - 0.5) * 10.0
if self.pos.left < self.bbox.left:
self.pos.left = self.bbox.left + (self.bbox.left - self.pos.left)
elif self.pos.right > self.bbox.right:
self.pos.right = self.bbox.right - (self.bbox.right - self.pos.right)
elif self.pos.top < self.bbox.top:
self.pos.top = self.bbox.top + (self.bbox.top - self.pos.top)
elif self.pos.bottom > self.bbox.bottom:
self.pos.bottom = self.bbox.bottom - (self.bbox.bottom - self.bbox.top)
def show_fps(screen, fps):
font = pygame.font.Font(None, 18)
surf = font.render("fps: %.1f" % (fps), 1, (180, 10, 10))
frame = surf.get_rect()
frame.right = screen.get_width()
screen.blit(surf, frame)
def main():
if not pygame.font:
print "Error: no fonts"
return
if not pygame.mixer:
print "Error: no sound"
return
random.seed()
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption('Hello World')
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((250, 250, 250))
screen.blit(background, (0,0))
#if pygame.font:
#font = pygame.font.Font(None, 36)
#text = font.render("Hello World", 1, (10, 10, 10))
#textpos = text.get_rect(centerx=background.get_width()/2)
#background.blit(text, textpos)
hello = Wandering_text("Hello World", (10, 10, 10),
center=(screen.get_width()/2, screen.get_height()/2),
bbox=screen.get_rect())
pygame.display.flip()
clock = pygame.time.Clock()
while True:
clock.tick(60)
for event in pygame.event.get():
if event.type == QUIT:
return
elif event.type == KEYDOWN and event.key == K_ESCAPE:
return
screen.blit(background, (0,0))
hello.update()
hello.draw(screen)
show_fps(screen, clock.get_fps())
pygame.display.flip()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,722,202,947,777,309,000 | 28.693182 | 83 | 0.580176 | false |
CitrineInformatics/refkit | lookup/arxiv.py | 1 | 4957 | """
Functions for working with the arXiv.org api.
"""
import urllib
import requests
from xml.etree import ElementTree
from refkit.util import arxivid
from refkit.metadata import Metadata
from refkit.format import author
def search(lookup):
"""
Search for a reference on arXiv.org given a lookup string. Since the arXiv.org api can return mutiple references
for a single query, this function raises an error in the case that more than one reference was returned.
:param lookup: String with the lookup to search for on arXiv.org
:raises ValueError: If a reference with the specified lookup could not be found on arXiv.org
:returns: Metadata object with information about the reference that was identified
"""
try:
id = arxivid.extract(lookup)
arxivData = _getMetadataFromArxivId(id)
return _saveMetadata(arxivData)
except Exception:
raise ValueError('Could not match query to arXiv')
def _getMetadataFromArxivId(id):
"""
Get metadata from arXiv.org given an arXiv identifier.
:param id: arXiv identifier to look up
:returns: Result from arXiv api query
"""
url = 'http://export.arxiv.org/api/query?id_list=' + id + '&start=0&max_results=2'
return requests.get(url)
def _saveMetadata(data):
"""
Convert the results of an arXiv api call to a Metadata object.
:param data: Results of the arXiv api call
:raises: ValueError if the metadata could not be saved
:returns: Metadata object with the content of data
"""
try:
root = ElementTree.fromstring(data.content)
entry = _getEntry(root)
return _saveMetadataFromEntry(entry)
except Exception:
raise
def _getEntry(root):
"""
Get the node in the xml data that contains the result from the query to save. If multiple entries are found
in the query result, this function raises an error.
:param root: Root of the XML data from the arXiv query
:raises: ValueError is the entry cannot be extracted from the XML data
:returns: Node that contains the results from the query
"""
entry = None
for i in root:
if i.tag.endswith('entry'):
if entry is not None:
raise ValueError('Multiple entries in result')
entry = i
return entry
def _saveMetadataFromEntry(entry):
"""
Save the metadata from an entry returned by an arXiv query.
:param entry: Entry from which to save metadata
:returns: Metadata object with the results in the entry
"""
metadata = Metadata()
metadata.publisher = 'arXiv.org'
_saveValue(metadata, 'title', entry, 'title')
_saveValue(metadata, 'url', entry, 'id')
_saveValue(metadata, 'doi', entry, 'doi')
_saveYear(metadata, entry)
_saveAuthors(metadata, entry)
metadata.tidy()
if len(metadata.author) == 0 and len(metadata.title) == 0:
raise RuntimeError()
return metadata
def _saveValue(metadata, attribute, entry, tag):
"""
Extract a value from an XML object and save it in a Metadata object.
:param metadata: Metadata object to save the value in
:param attribute: Name of the attribute to save the value as in metadata
:param entry: XML entry with the value to save
:param tag: Tag of the value in entry to save
"""
for i in entry:
if i.tag.endswith(tag):
try:
setattr(metadata, attribute, i.text)
except Exception,e:
pass
break
def _saveYear(metadata, entry):
"""
Extract the year in which the article was last updated. arXiv api query results include both the published and
updated dates. This function saves the updated year.
:param metadata: Metadata object to save the year in
:param entry: XML entry with the value to save
"""
for i in entry:
if i.tag.endswith('updated'):
try:
setattr(metadata, 'year', i.text.split('-')[0])
except Exception:
pass
break
def _saveAuthors(metadata, entry):
"""
Extract the authors from an XML object and convert them to given and family names.
:param metadata: Metadata object to save the authors in
:param entry: XML entry with the authors to save
"""
for i in entry:
if i.tag.endswith('author'):
try:
metadata.author.append(_getName(i))
except Exception:
pass
def _getName(entry):
"""
Extract the name for an XML object.
:param entry: XML entry with the name to save
:raises: ValueError if a name cannot be found
:returns: Dictionary with the given and family name in the entry
"""
for i in entry:
if i.tag.endswith('name'):
try:
return author.splitName(i.text)
except Exception:
raise
| mit | -8,469,765,585,758,530,000 | 32.268456 | 116 | 0.643938 | false |
aneesahmed/gameplan2 | scrum/urls.py | 1 | 4983 | from django.conf.urls import url
#from . import views
from scrum.views.portfolioView import *
from scrum.views.portfolioView import PortfolioListbyStatus, PortfolioList, PortfolioDetails, PortfolioCreate, PortfolioUpdate, PortfolioDelete
from scrum.views.releaseView import ReleaseList, ReleaseCreate, ReleaseDetails, ReleaseDelete, ReleaseUpdate
from scrum.views.releaseView import UserstoryDetails, UserstoryCreate, UserstoryUpdate, UserstoryDelete
from scrum.views.releaseView import TaskCreate, TaskUpdate, TaskDelete
from scrum.views.releaseView import SprintDetails, SprintCreate, SprintUpdate, SprintDelete
from scrum.views.teamview import TeamList, TeamCreate, TeamUpdate, TeamDetails, TeamDelete
from scrum.views.teamview import TeamResourceCreate, TeamResourceUpdate, TeamResourceDelete
app_name = 'scrum'
urlpatterns = [
url(r'^$', PortfolioList.as_view(), name='index'),
#url(r'^dashboard', Dashboard.as_view(), name='dashboard'),
url(r'^portfolio/list/(?P<status_id>\d+)$', PortfolioListbyStatus.as_view(), name='portfolioListbyStatus'),
url(r'^portfolio/list', PortfolioList.as_view(), name='portfolioList'),
url(r'^portfolio/(?P<pk>\d+)$', PortfolioDetails.as_view(), name='portfolio-detail'),
url(r'^portfolio/add', PortfolioCreate.as_view(), name='portfolio-add'),
url(r'^portfolio/update/(?P<pk>\d+)$', PortfolioUpdate.as_view(), name='portfolio-update'),
url(r'^portfolio/delete/(?P<pk>\d+)$', PortfolioDelete.as_view(), name='portfolio-delete'),
# release Release
url(r'^releaseList', ReleaseList.as_view(), name='releaseList'),
url(r'^release/(?P<pk>\d+)$', ReleaseDetails.as_view(), name='release-detail'),
url(r'^release/add/(?P<portfolio_id>\d+)$', ReleaseCreate.as_view(), name='release-add'),
url(r'^release/update/(?P<pk>\d+)$', ReleaseUpdate.as_view(), name='release-update'),
url(r'^release/delete/(?P<pk>\d+)$', ReleaseDelete.as_view(), name='release-delete'),
# Userstory, userstory
#url(r'^releaseList', ReleaseList.as_view(), name='releaseList'),
url(r'^Userstory/(?P<pk>\d+)$', UserstoryDetails.as_view(), name='userstory-detail'),
url(r'^Userstory/add/(?P<release_id>\d+)$',UserstoryCreate.as_view(), name='userstory-add'),
url(r'^Userstory/update/(?P<pk>\d+)$',UserstoryUpdate.as_view(), name='userstory-update'),
url(r'^Userstory/delete/(?P<pk>\d+)$', UserstoryDelete.as_view(), name='userstory-delete'),
# Task, task
#url(r'^Task/(?P<pk>\d+)$', TaskDetails.as_view(), name='userstory-detail'),
url(r'^Task/add/(?P<userstory_id>\d+)$', TaskCreate.as_view(), name='task-add'),
url(r'^Task/update/(?P<pk>\d+)$', TaskUpdate.as_view(), name='task-update'),
url(r'^Task/delete/(?P<pk>\d+)$', TaskDelete.as_view(), name='task-delete'),
# Sprint, sprint
# release Release
# not needed now may be later:
# url(r'^sprintList', SprintList.as_view(), name='releaseList'),
# not needed now may be later:
url(r'^sprint/(?P<pk>\d+)$', SprintDetails.as_view(), name='sprint-detail'),
url(r'^sprint/add/(?P<release_id>\d+)$', SprintCreate.as_view(), name='sprint-add'),
url(r'^sprint/update/(?P<pk>\d+)$', SprintUpdate.as_view(), name='sprint-update'),
url(r'^sprint/delete/(?P<pk>\d+)$', SprintDelete.as_view(), name='sprint-delete'),
#url(r'^sprint/userstory-select/(?P<pk>\d+)$', Sprint_userstorySelect.as_view(), name='sprint-userstory-select'),
# url(r'^portfolio/Details/(?P<id>\d+)/$', portfolioDetails(), name='portfolioDetails'),
# url(r'^portfolio/Update/(?P<id>\d+)/$', portfolios.portfolioUpdate, name='portfolioUpdate'),
# url(r'^portfolio/Update', portfolios.portfolioUpdate, name='portfolioUpdate'),
# url(r'^portfolio/new$', portfolios.new_portfolio, name='new_portfolio'),
# url(r'^portfolio', portfolios.portfolio, name='portfolio'),
# url(r'^userstoryList', userstories.,
#url(r'^release/(?P<id>\d+)/$', portfolios.releaseUpdate, name='releaseUpdate'),
# url(r'^release/new$', userstories.new_userstory, name='new_release'),
# url(r'^userstory/new$', userstories.new_userstory, name='new_userstory'),
#url(r'^sprint/new$', views.new_sprint, name='new_sprint'),
#url(r'^task/new$', views.new_task, name='new_task'),
#url(r'^team/new$', views.new_team, name='new_release'),
# Team, team
url(r'^team/list', TeamList.as_view(), name='team-list'),
url(r'^team/(?P<pk>\d+)$', TeamDetails.as_view(), name='team-detail'),
url(r'^team/add', TeamCreate.as_view(), name='team-add'),
url(r'^team/update/(?P<pk>\d+)$', TeamUpdate.as_view(), name='team-update'),
url(r'^team/delete/(?P<pk>\d+)$', TeamDelete.as_view(), name='team-delete'),
url(r'^teamresource/add', TeamResourceCreate.as_view(), name='teamresource-add'),
url(r'^teamresource/update/(?P<pk>[\w\-]+)$', TeamResourceUpdate.as_view(), name='teamresource-update'),
url(r'^teamresource/delete/(?P<pk>[\w\-]+)$', TeamResourceDelete.as_view(), name='teamresource-delete'),
]
| apache-2.0 | 4,923,318,147,837,823,000 | 68.208333 | 143 | 0.679109 | false |
xiaoweih/DLV | networks/imageNet.py | 1 | 1666 | import os, struct
from array import array as pyarray
from cvxopt.base import matrix
import numpy as np
import PIL.Image
# FIXME: need actual class names
def LABELS(index):
ls = labels()
if len(ls) > 0:
return ls[index]
else: return range(1000)[index]
def labels():
file = open('networks/imageNet/caffe_ilsvrc12/synset_words.txt', 'r')
data = file.readlines()
ls = []
for line in data:
words = line.split()
ls.append(' '.join(words[1:]))
return ls
def save(layer,image,filename):
"""
"""
import cv2
import copy
image_cv = copy.deepcopy(image)
image_cv = image_cv.transpose(1, 2, 0)
image_cv[:,:,0] += 103.939
image_cv[:,:,1] += 116.779
image_cv[:,:,2] += 123.68
#print(np.amax(image_cv),np.amin(image_cv))
cv2.imwrite(filename, image_cv)
# from matplotlib import pyplot
# import matplotlib as mpl
# fig = pyplot.figure()
# ax = fig.add_subplot(1,1,1)
# # image = image.reshape(3,32,32).transpose(1,2,0)
# imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys)
# imgplot.set_interpolation('nearest')
# ax.xaxis.set_ticks_position('top')
# ax.yaxis.set_ticks_position('left')
# pyplot.savefig(filename)
def show(image):
"""
"""
from matplotlib import pyplot
import matplotlib as mpl
fig = pyplot.figure()
ax = fig.add_subplot(1,1,1)
#image = image.reshape(3,32,32).transpose(1,2,0)
imgplot = ax.imshow(image.T, cmap=mpl.cm.Greys)
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
pyplot.show()
| gpl-3.0 | 3,160,891,197,615,542,300 | 23.5 | 73 | 0.612845 | false |
pidydx/grr | grr/lib/aff4.py | 1 | 100047 | #!/usr/bin/env python
"""AFF4 interface implementation.
This contains an AFF4 data model implementation.
"""
import __builtin__
import abc
import itertools
import StringIO
import threading
import time
import zlib
import logging
from grr.lib import access_control
from grr.lib import config_lib
from grr.lib import data_store
from grr.lib import lexer
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
from grr.lib import type_info
from grr.lib import utils
from grr.lib.rdfvalues import aff4_rdfvalues
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import protodict as rdf_protodict
# Factor to convert from seconds to microseconds
MICROSECONDS = 1000000
# Age specifications for opening AFF4 objects.
NEWEST_TIME = "NEWEST_TIME"
ALL_TIMES = "ALL_TIMES"
# Just something to write on an index attribute to make it exist.
EMPTY_DATA = "X"
AFF4_PREFIXES = set(["aff4:", "metadata:"])
class Error(Exception):
pass
class LockError(Error):
pass
class OversizedRead(Error, IOError):
pass
class InstantiationError(Error, IOError):
pass
class ChunkNotFoundError(IOError):
pass
class BadGetAttributeError(Exception):
pass
class MissingChunksError(Exception):
def __init__(self, message, missing_chunks=None):
super(MissingChunksError, self).__init__(message)
self.missing_chunks = missing_chunks or []
class DeletionPool(object):
"""Pool used to optimize deletion of large object hierarchies."""
def __init__(self, token=None):
super(DeletionPool, self).__init__()
if token is None:
raise ValueError("token can't be None")
self._objects_cache = {}
self._children_lists_cache = {}
self._urns_for_deletion = set()
self._token = token
def _ObjectKey(self, urn, mode):
return u"%s:%s" % (mode, utils.SmartUnicode(urn))
def Open(self, urn, aff4_type=None, mode="r"):
"""Opens the named object.
DeletionPool will only open the object if it's not in the pool already.
Otherwise it will just return the cached version. Objects are cached
based on their urn and mode. I.e. same object opened with mode="r" and
mode="rw" will be actually opened two times and cached separately.
DeletionPool's Open() also doesn't follow symlinks.
Args:
urn: The urn to open.
aff4_type: If this parameter is set, we raise an IOError if
the object is not an instance of this type.
mode: The mode to open the file with.
Returns:
An AFF4Object instance.
Raises:
IOError: If the object is not of the required type.
"""
key = self._ObjectKey(urn, mode)
try:
obj = self._objects_cache[key]
except KeyError:
obj = FACTORY.Open(
urn, mode=mode, follow_symlinks=False, token=self._token)
self._objects_cache[key] = obj
if aff4_type is not None and not isinstance(obj, aff4_type):
raise InstantiationError(
"Object %s is of type %s, but required_type is %s" %
(urn, obj.__class__.__name__, aff4_type.__name__))
return obj
def MultiOpen(self, urns, aff4_type=None, mode="r"):
"""Opens many urns efficiently, returning cached objects when possible."""
not_opened_urns = []
aff4_type = _ValidateAFF4Type(aff4_type)
for urn in urns:
key = self._ObjectKey(urn, mode)
try:
result = self._objects_cache[key]
if aff4_type is not None and not isinstance(result, aff4_type):
continue
yield result
except KeyError:
not_opened_urns.append(urn)
if not_opened_urns:
for obj in FACTORY.MultiOpen(
not_opened_urns, follow_symlinks=False, mode=mode, token=self._token):
key = self._ObjectKey(obj.urn, mode)
self._objects_cache[key] = obj
if aff4_type is not None and not isinstance(obj, aff4_type):
continue
yield obj
def ListChildren(self, urn):
"""Lists children of a given urn. Resulting list is cached."""
result = self.MultiListChildren([urn])
try:
return result[urn]
except KeyError:
return []
def MultiListChildren(self, urns):
"""Lists children of a bunch of given urns. Results are cached."""
result = {}
not_listed_urns = []
for urn in urns:
try:
result[urn] = self._children_lists_cache[urn]
except KeyError:
not_listed_urns.append(urn)
if not_listed_urns:
for urn, children in FACTORY.MultiListChildren(
not_listed_urns, token=self._token):
result[urn] = self._children_lists_cache[urn] = children
for urn in not_listed_urns:
self._children_lists_cache.setdefault(urn, [])
result.setdefault(urn, [])
return result
def RecursiveMultiListChildren(self, urns):
"""Recursively lists given urns. Results are cached."""
result = {}
checked_urns = set()
not_cached_urns = []
urns_to_check = urns
while True:
found_children = []
for urn in urns_to_check:
try:
children = result[urn] = self._children_lists_cache[urn]
found_children.extend(children)
except KeyError:
not_cached_urns.append(urn)
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break
for urn, children in FACTORY.RecursiveMultiListChildren(
not_cached_urns, token=self._token):
result[urn] = self._children_lists_cache[urn] = children
return result
def MarkForDeletion(self, urn):
"""Marks object and all of its children for deletion."""
self.MultiMarkForDeletion([urn])
def MultiMarkForDeletion(self, urns):
"""Marks multiple urns (and their children) for deletion."""
all_children_urns = self.RecursiveMultiListChildren(urns)
urns += list(itertools.chain.from_iterable(all_children_urns.values()))
self._urns_for_deletion.update(urns)
for obj in self.MultiOpen(urns):
obj.OnDelete(deletion_pool=self)
@property
def root_urns_for_deletion(self):
"""Roots of the graph of urns marked for deletion."""
roots = set()
for urn in self._urns_for_deletion:
new_root = True
str_urn = utils.SmartUnicode(urn)
fake_roots = []
for root in roots:
str_root = utils.SmartUnicode(root)
if str_urn.startswith(str_root):
new_root = False
break
elif str_root.startswith(str_urn):
fake_roots.append(root)
if new_root:
roots -= set(fake_roots)
roots.add(urn)
return roots
@property
def urns_for_deletion(self):
"""Urns marked for deletion."""
return self._urns_for_deletion
def _ValidateAFF4Type(aff4_type):
"""Validates and normalizes aff4_type to class object."""
if aff4_type is None:
return None
# Check that we have the right type.
if not isinstance(aff4_type, type):
raise TypeError("aff4_type=%s must be a type" % aff4_type)
if not issubclass(aff4_type, AFF4Object):
raise TypeError("aff4_type=%s must be a subclass of AFF4Object." %
aff4_type)
return aff4_type
class Factory(object):
"""A central factory for AFF4 objects."""
def __init__(self):
self.intermediate_cache = utils.AgeBasedCache(
max_size=config_lib.CONFIG["AFF4.intermediate_cache_max_size"],
max_age=config_lib.CONFIG["AFF4.intermediate_cache_age"])
# Create a token for system level actions. This token is used by other
# classes such as HashFileStore and NSRLFilestore to create entries under
# aff4:/files, as well as to create top level paths like aff4:/foreman
self.root_token = access_control.ACLToken(
username="GRRSystem", reason="Maintenance").SetUID()
self.notification_rules = []
self.notification_rules_timestamp = 0
@classmethod
def ParseAgeSpecification(cls, age):
"""Parses an aff4 age and returns a datastore age specification."""
try:
return (0, int(age))
except (ValueError, TypeError):
pass
if age == NEWEST_TIME:
return data_store.DB.NEWEST_TIMESTAMP
elif age == ALL_TIMES:
return data_store.DB.ALL_TIMESTAMPS
elif len(age) == 2:
start, end = age
return (int(start), int(end))
raise RuntimeError("Unknown age specification: %s" % age)
def GetAttributes(self, urns, token=None, age=NEWEST_TIME):
"""Retrieves all the attributes for all the urns."""
urns = set([utils.SmartUnicode(u) for u in urns])
to_read = {urn: self._MakeCacheInvariant(urn, token, age) for urn in urns}
# Urns not present in the cache we need to get from the database.
if to_read:
for subject, values in data_store.DB.MultiResolvePrefix(
to_read,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
token=token,
limit=None):
# Ensure the values are sorted.
values.sort(key=lambda x: x[-1], reverse=True)
yield utils.SmartUnicode(subject), values
def SetAttributes(self,
urn,
attributes,
to_delete,
add_child_index=True,
mutation_pool=None,
sync=False,
token=None):
"""Sets the attributes in the data store."""
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime.Now().SerializeToDataStore()
]
to_delete.add(AFF4Object.SchemaCls.LAST)
if mutation_pool:
mutation_pool.MultiSet(
urn, attributes, replace=False, to_delete=to_delete)
else:
data_store.DB.MultiSet(
urn,
attributes,
token=token,
replace=False,
sync=sync,
to_delete=to_delete)
if add_child_index:
self._UpdateChildIndex(urn, token, mutation_pool=mutation_pool)
def _UpdateChildIndex(self, urn, token, mutation_pool=None):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
token: The token to use.
mutation_pool: An optional MutationPool object to write to. If not given,
the data_store is used directly.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return
except KeyError:
attributes = {
# This updates the directory index.
"index:dir/%s" % utils.SmartStr(basename): [EMPTY_DATA],
}
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(user): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u"/":
attributes[AFF4Object.SchemaCls.LAST] = [
rdfvalue.RDFDatetime.Now().SerializeToDataStore()
]
if mutation_pool:
mutation_pool.MultiSet(dirname, attributes, replace=True)
else:
data_store.DB.MultiSet(
dirname, attributes, token=token, replace=True, sync=False)
self.intermediate_cache.Put(urn, 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass
def _DeleteChildFromIndex(self, urn, token, mutation_pool=None):
if mutation_pool:
pool = mutation_pool
else:
pool = data_store.DB.GetMutationPool(token=token)
try:
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.ExpireObject(urn.Path())
except KeyError:
pass
pool.DeleteAttributes(dirname,
["index:dir/%s" % utils.SmartStr(basename)])
to_set = {
AFF4Object.SchemaCls.LAST:
[rdfvalue.RDFDatetime.Now().SerializeToDataStore()]
}
pool.MultiSet(dirname, to_set, replace=True)
if mutation_pool is None:
pool.Flush()
except access_control.UnauthorizedAccess:
pass
def _MakeCacheInvariant(self, urn, token, age):
"""Returns an invariant key for an AFF4 object.
The object will be cached based on this key. This function is specifically
extracted to ensure that we encapsulate all security critical aspects of the
AFF4 object so that objects do not leak across security boundaries.
Args:
urn: The urn of the object.
token: The access token used to receive the object.
age: The age policy used to build this object. Should be one
of ALL_TIMES, NEWEST_TIME or a range.
Returns:
A key into the cache.
"""
return "%s:%s:%s" % (utils.SmartStr(urn), utils.SmartStr(token),
self.ParseAgeSpecification(age))
def CreateWithLock(self,
urn,
aff4_type,
token=None,
age=NEWEST_TIME,
force_new_version=True,
blocking=True,
blocking_lock_timeout=10,
blocking_sleep_interval=1,
lease_time=100):
"""Creates a new object and locks it.
Similar to OpenWithLock below, this creates a locked object. The difference
is that when you call CreateWithLock, the object does not yet have to exist
in the data store.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
transaction = self._AcquireLock(
urn,
token=token,
blocking=blocking,
blocking_lock_timeout=blocking_lock_timeout,
blocking_sleep_interval=blocking_sleep_interval,
lease_time=lease_time)
# Since we now own the data store subject, we can simply create the aff4
# object in the usual way.
return self.Create(
urn,
aff4_type,
mode="rw",
token=token,
age=age,
force_new_version=force_new_version,
transaction=transaction)
def OpenWithLock(self,
urn,
aff4_type=None,
token=None,
age=NEWEST_TIME,
blocking=True,
blocking_lock_timeout=10,
blocking_sleep_interval=1,
lease_time=100):
"""Open given urn and locks it.
Opens an object and locks it for 'lease_time' seconds. OpenWithLock can
only be used in 'with ...' statement. The lock is released when code
execution leaves 'with ...' block.
The urn is always opened in "rw" mode. Symlinks are not followed in
OpenWithLock() due to possible race conditions.
Args:
urn: The urn to open.
aff4_type: If this optional parameter is set, we raise an
InstantiationError if the object exists and is not an instance of this
type. This check is important when a different object can be stored in
this location.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Should be one of
NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Raises:
ValueError: The URN passed in is None.
Returns:
Context manager to be used in 'with ...' statement.
"""
transaction = self._AcquireLock(
urn,
token=token,
blocking=blocking,
blocking_lock_timeout=blocking_lock_timeout,
blocking_sleep_interval=blocking_sleep_interval,
lease_time=lease_time)
# Since we now own the data store subject, we can simply read the aff4
# object in the usual way.
return self.Open(
urn,
aff4_type=aff4_type,
mode="rw",
token=token,
age=age,
follow_symlinks=False,
transaction=transaction)
def _AcquireLock(self,
urn,
token=None,
blocking=None,
blocking_lock_timeout=None,
lease_time=None,
blocking_sleep_interval=None):
"""This actually acquires the lock for a given URN."""
if token is None:
token = data_store.default_token
if urn is None:
raise ValueError("URN cannot be None")
urn = rdfvalue.RDFURN(urn)
try:
return data_store.DB.LockRetryWrapper(
urn,
retrywrap_timeout=blocking_sleep_interval,
retrywrap_max_timeout=blocking_lock_timeout,
blocking=blocking,
lease_time=lease_time,
token=token)
except data_store.DBSubjectLockError as e:
raise LockError(e)
def Copy(self,
old_urn,
new_urn,
age=NEWEST_TIME,
token=None,
limit=None,
update_timestamps=False,
sync=False):
"""Make a copy of one AFF4 object to a different URN."""
if token is None:
token = data_store.default_token
new_urn = rdfvalue.RDFURN(new_urn)
if update_timestamps and age != NEWEST_TIME:
raise ValueError(
"Can't update timestamps unless reading the latest version.")
values = {}
for predicate, value, ts in data_store.DB.ResolvePrefix(
old_urn,
AFF4_PREFIXES,
timestamp=self.ParseAgeSpecification(age),
token=token,
limit=limit):
if update_timestamps:
values.setdefault(predicate, []).append((value, None))
else:
values.setdefault(predicate, []).append((value, ts))
if values:
data_store.DB.MultiSet(
new_urn, values, token=token, replace=False, sync=sync)
self._UpdateChildIndex(new_urn, token)
def Open(self,
urn,
aff4_type=None,
mode="r",
token=None,
local_cache=None,
age=NEWEST_TIME,
follow_symlinks=True,
transaction=None):
"""Opens the named object.
This instantiates the object from the AFF4 data store.
Note that the root aff4:/ object is a container for all other
objects. Opening it for reading will instantiate a AFF4Volume instance, even
if the row does not exist.
The mode parameter specifies, how the object should be opened. A read only
mode will raise when calling Set() on it, while a write only object will
never read from the data store. Note that its impossible to open an object
with pure write support (since we have no idea what type it should be
without reading the data base) - use Create() instead for purely write mode.
Args:
urn: The urn to open.
aff4_type: If this parameter is set, we raise an IOError if
the object is not an instance of this type. This check is important
when a different object can be stored in this location. If mode is
"w", this parameter will determine the type of the object and is
mandatory.
mode: The mode to open the file with.
token: The Security Token to use for opening this item.
local_cache: A dict containing a cache as returned by GetAttributes. If
set, this bypasses the factory cache.
age: The age policy used to build this object. Should be one of
NEWEST_TIME, ALL_TIMES or a time range given as a tuple (start, end) in
microseconds since Jan 1st, 1970.
follow_symlinks: If object opened is a symlink, follow it.
transaction: A lock in case this object is opened under lock.
Returns:
An AFF4Object instance.
Raises:
IOError: If the object is not of the required type.
AttributeError: If the requested mode is incorrect.
"""
aff4_type = _ValidateAFF4Type(aff4_type)
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if mode == "w":
if aff4_type is None:
raise AttributeError("Need a type to open in write only mode.")
return self.Create(
urn,
aff4_type,
mode=mode,
token=token,
age=age,
force_new_version=False,
transaction=transaction)
urn = rdfvalue.RDFURN(urn)
if token is None:
token = data_store.default_token
if "r" in mode and (local_cache is None or urn not in local_cache):
local_cache = dict(self.GetAttributes([urn], age=age, token=token))
# Read the row from the table. We know the object already exists if there is
# some data in the local_cache already for this object.
result = AFF4Object(
urn,
mode=mode,
token=token,
local_cache=local_cache,
age=age,
follow_symlinks=follow_symlinks,
object_exists=bool(local_cache.get(urn)),
transaction=transaction)
result.aff4_type = aff4_type
# Now we have a AFF4Object, turn it into the type it is currently supposed
# to be as specified by Schema.TYPE.
existing_type = result.Get(result.Schema.TYPE, default="AFF4Volume")
if existing_type:
try:
result = result.Upgrade(AFF4Object.classes[existing_type])
except KeyError:
raise InstantiationError("Unable to open %s, type %s unknown." %
(urn, existing_type))
if aff4_type is not None and not isinstance(result, aff4_type):
raise InstantiationError(
"Object %s is of type %s, but required_type is %s" %
(urn, result.__class__.__name__, aff4_type.__name__))
return result
def MultiOpen(self,
urns,
mode="rw",
token=None,
aff4_type=None,
age=NEWEST_TIME,
follow_symlinks=True):
"""Opens a bunch of urns efficiently."""
if token is None:
token = data_store.default_token
if mode not in ["w", "r", "rw"]:
raise RuntimeError("Invalid mode %s" % mode)
symlinks = {}
aff4_type = _ValidateAFF4Type(aff4_type)
for urn, values in self.GetAttributes(urns, token=token, age=age):
try:
obj = self.Open(
urn,
mode=mode,
token=token,
local_cache={urn: values},
age=age,
follow_symlinks=False)
# We can't pass aff4_type to Open since it will raise on AFF4Symlinks.
# Setting it here, if needed, so that BadGetAttributeError checking
# works.
if aff4_type:
obj.aff4_type = aff4_type
if follow_symlinks and isinstance(obj, AFF4Symlink):
target = obj.Get(obj.Schema.SYMLINK_TARGET)
if target is not None:
symlinks.setdefault(target, []).append(obj.urn)
elif aff4_type:
if isinstance(obj, aff4_type):
yield obj
else:
yield obj
except IOError:
pass
if symlinks:
for obj in self.MultiOpen(
symlinks, mode=mode, token=token, aff4_type=aff4_type, age=age):
to_link = symlinks[obj.urn]
for additional_symlink in to_link[1:]:
clone = obj.__class__(obj.urn, clone=obj)
clone.symlink_urn = additional_symlink
yield clone
obj.symlink_urn = symlinks[obj.urn][0]
yield obj
def OpenDiscreteVersions(self,
urn,
mode="r",
token=None,
local_cache=None,
age=ALL_TIMES,
follow_symlinks=True):
"""Returns all the versions of the object as AFF4 objects.
Args:
urn: The urn to open.
mode: The mode to open the file with.
token: The Security Token to use for opening this item.
local_cache: A dict containing a cache as returned by GetAttributes. If
set, this bypasses the factory cache.
age: The age policy used to build this object. Should be one of
ALL_TIMES or a time range
follow_symlinks: If object opened is a symlink, follow it.
Yields:
An AFF4Object for each version.
Raises:
IOError: On bad open or wrong time range specified.
This iterates through versions of an object, returning the newest version
first, then each older version until the beginning of time.
Note that versions are defined by changes to the TYPE attribute, and this
takes the version between two TYPE attributes.
In many cases as a user you don't want this, as you want to be returned an
object with as many attributes as possible, instead of the subset of them
that were Set between these two times.
"""
if age == NEWEST_TIME or len(age) == 1:
raise IOError("Bad age policy NEWEST_TIME for OpenDiscreteVersions.")
if len(age) == 2:
oldest_age = age[1]
else:
oldest_age = 0
aff4object = FACTORY.Open(
urn,
mode=mode,
token=token,
local_cache=local_cache,
age=age,
follow_symlinks=follow_symlinks)
# TYPE is always written last so we trust it to bound the version.
# Iterate from newest to oldest.
type_iter = aff4object.GetValuesForAttribute(aff4object.Schema.TYPE)
version_list = [(t.age, str(t)) for t in type_iter]
version_list.append((oldest_age, None))
for i in range(0, len(version_list) - 1):
age_range = (version_list[i + 1][0], version_list[i][0])
# Create a subset of attributes for use in the new object that represents
# this version.
clone_attrs = {}
for k, values in aff4object.synced_attributes.iteritems():
reduced_v = []
for v in values:
if v.age > age_range[0] and v.age <= age_range[1]:
reduced_v.append(v)
clone_attrs.setdefault(k, []).extend(reduced_v)
obj_cls = AFF4Object.classes[version_list[i][1]]
new_obj = obj_cls(
urn,
mode=mode,
parent=aff4object.parent,
clone=clone_attrs,
token=token,
age=age_range,
local_cache=local_cache,
follow_symlinks=follow_symlinks)
new_obj.Initialize() # This is required to set local attributes.
yield new_obj
def Stat(self, urns, token=None):
"""Returns metadata about all urns.
Currently the metadata include type, and last update time.
Args:
urns: The urns of the objects to open.
token: The token to use.
Yields:
A dict of metadata.
Raises:
RuntimeError: A string was passed instead of an iterable.
"""
if token is None:
token = data_store.default_token
if isinstance(urns, basestring):
raise RuntimeError("Expected an iterable, not string.")
for subject, values in data_store.DB.MultiResolvePrefix(
urns, ["aff4:type", "metadata:last"], token=token):
res = dict(urn=rdfvalue.RDFURN(subject))
for v in values:
if v[0] == "aff4:type":
res["type"] = v
elif v[0] == "metadata:last":
res["last"] = rdfvalue.RDFDatetime(v[1])
yield res
def Create(self,
urn,
aff4_type,
mode="w",
token=None,
age=NEWEST_TIME,
force_new_version=True,
object_exists=False,
mutation_pool=None,
transaction=None):
"""Creates the urn if it does not already exist, otherwise opens it.
If the urn exists and is of a different type, this will also promote it to
the specified type.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
mode: The desired mode for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
object_exists: If we know the object already exists we can skip index
creation.
mutation_pool: An optional MutationPool object to write to. If not given,
the data_store is used directly.
transaction: For locked objects, a lock is passed to the object.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
if mode not in ["w", "r", "rw"]:
raise AttributeError("Invalid mode %s" % mode)
if token is None:
token = data_store.default_token
if urn is not None:
urn = rdfvalue.RDFURN(urn)
aff4_type = _ValidateAFF4Type(aff4_type)
if "r" in mode:
# Check to see if an object already exists.
try:
existing = self.Open(
urn, mode=mode, token=token, age=age, transaction=transaction)
result = existing.Upgrade(aff4_type)
# We can't pass aff4_type into the Open call since it will raise with a
# type mismatch. We set it like this so BadGetAttributeError checking
# works.
if aff4_type:
result.aff4_type = aff4_type.__name__
if force_new_version and existing.Get(
result.Schema.TYPE) != aff4_type.__name__:
result.ForceNewVersion()
return result
except IOError:
pass
result = aff4_type(
urn,
mode=mode,
token=token,
age=age,
aff4_type=aff4_type.__name__,
object_exists=object_exists,
mutation_pool=mutation_pool,
transaction=transaction)
result.Initialize()
if force_new_version:
result.ForceNewVersion()
return result
def MultiDelete(self, urns, token=None):
"""Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
RuntimeError: If one of the urns is too short. This is a safety check to
ensure the root is not removed.
"""
urns = [rdfvalue.RDFURN(urn) for urn in urns]
if token is None:
token = data_store.default_token
for urn in urns:
if urn.Path() == "/":
raise RuntimeError("Can't delete root URN. Please enter a valid URN")
deletion_pool = DeletionPool(token=token)
deletion_pool.MultiMarkForDeletion(urns)
marked_root_urns = deletion_pool.root_urns_for_deletion
marked_urns = deletion_pool.urns_for_deletion
logging.debug(u"Found %d objects to remove when removing %s",
len(marked_urns), utils.SmartUnicode(urns))
logging.debug(u"Removing %d root objects when removing %s: %s",
len(marked_root_urns),
utils.SmartUnicode(urns),
utils.SmartUnicode(marked_root_urns))
pool = data_store.DB.GetMutationPool(token=token)
for root in marked_root_urns:
# Only the index of the parent object should be updated. Everything
# below the target object (along with indexes) is going to be
# deleted.
self._DeleteChildFromIndex(root, token, mutation_pool=pool)
for urn_to_delete in marked_urns:
try:
self.intermediate_cache.ExpireObject(urn_to_delete.Path())
except KeyError:
pass
pool.DeleteSubjects(marked_urns)
pool.Flush()
# Ensure this is removed from the cache as well.
self.Flush()
logging.debug("Removed %d objects", len(marked_urns))
def Delete(self, urn, token=None):
"""Drop all the information about this object.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urn: The object to remove.
token: The Security Token to use for opening this item.
Raises:
RuntimeError: If the urn is too short. This is a safety check to ensure
the root is not removed.
"""
self.MultiDelete([urn], token=token)
def MultiListChildren(self, urns, token=None, limit=None, age=NEWEST_TIME):
"""Lists bunch of directories efficiently.
Args:
urns: List of urns to list children.
token: Security token.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Tuples of Subjects and a list of children urns of a given subject.
"""
checked_subjects = set()
index_prefix = "index:dir/"
for subject, values in data_store.DB.MultiResolvePrefix(
urns,
index_prefix,
token=token,
timestamp=Factory.ParseAgeSpecification(age),
limit=limit):
checked_subjects.add(subject)
subject_result = []
for predicate, _, timestamp in values:
urn = rdfvalue.RDFURN(subject).Add(predicate[len(index_prefix):])
urn.age = rdfvalue.RDFDatetime(timestamp)
subject_result.append(urn)
yield subject, subject_result
for subject in set(urns) - checked_subjects:
yield subject, []
def ListChildren(self, urn, token=None, limit=None, age=NEWEST_TIME):
"""Lists bunch of directories efficiently.
Args:
urn: Urn to list children.
token: Security token.
limit: Max number of children to list.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Returns:
RDFURNs instances of each child.
"""
_, children_urns = list(
self.MultiListChildren([urn], token=token, limit=limit, age=age))[0]
return children_urns
def RecursiveMultiListChildren(self,
urns,
token=None,
limit=None,
age=NEWEST_TIME):
"""Recursively lists bunch of directories.
Args:
urns: List of urns to list children.
token: Security token.
limit: Max number of children to list (NOTE: this is per urn).
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
(subject<->children urns) tuples. RecursiveMultiListChildren will fetch
children lists for initial set of urns and then will fetch children's
children, etc.
For example, for the following objects structure:
a->
b -> c
-> d
RecursiveMultiListChildren(['a']) will return:
[('a', ['b']), ('b', ['c', 'd'])]
"""
checked_urns = set()
urns_to_check = urns
while True:
found_children = []
for subject, values in self.MultiListChildren(
urns_to_check, token=token, limit=limit, age=age):
found_children.extend(values)
yield subject, values
checked_urns.update(urns_to_check)
urns_to_check = set(found_children) - checked_urns
if not urns_to_check:
break
def Flush(self):
data_store.DB.Flush()
self.intermediate_cache.Flush()
class Attribute(object):
"""AFF4 schema attributes are instances of this class."""
description = ""
# A global registry of attributes by name. This ensures we do not accidentally
# define the same attribute with conflicting types.
PREDICATES = {}
# A human readable name to be used in filter queries.
NAMES = {}
def __init__(self,
predicate,
attribute_type=rdfvalue.RDFString,
description="",
name=None,
_copy=False,
default=None,
index=None,
versioned=True,
lock_protected=False,
creates_new_object_version=True):
"""Constructor.
Args:
predicate: The name of this attribute - must look like a URL
(e.g. aff4:contains). Will be used to store the attribute.
attribute_type: The RDFValue type of this attributes.
description: A one line description of what this attribute represents.
name: A human readable name for the attribute to be used in filters.
_copy: Used internally to create a copy of this object without
registering.
default: A default value will be returned if the attribute is not set on
an object. This can be a constant or a callback which receives the fd
itself as an arg.
index: The name of the index to use for this attribute. If None, the
attribute will not be indexed.
versioned: Should this attribute be versioned? Non-versioned attributes
always overwrite other versions of the same attribute.
lock_protected: If True, this attribute may only be set if the object was
opened via OpenWithLock().
creates_new_object_version: If this is set, a write to this attribute
will also write a new version of the parent attribute. This should be
False for attributes where lots of entries are collected like logs.
"""
self.name = name
self.predicate = predicate
self.attribute_type = attribute_type
self.description = description
self.default = default
self.index = index
self.versioned = versioned
self.lock_protected = lock_protected
self.creates_new_object_version = creates_new_object_version
# Field names can refer to a specific component of an attribute
self.field_names = []
if not _copy:
# Check the attribute registry for conflicts
try:
old_attribute = Attribute.PREDICATES[predicate]
if old_attribute.attribute_type != attribute_type:
msg = "Attribute %s defined with conflicting types (%s, %s)" % (
predicate, old_attribute.attribute_type.__class__.__name__,
attribute_type.__class__.__name__)
logging.error(msg)
raise RuntimeError(msg)
except KeyError:
pass
# Register
self.PREDICATES[predicate] = self
if name:
self.NAMES[name] = self
def Copy(self):
"""Return a copy without registering in the attribute registry."""
return Attribute(
self.predicate,
self.attribute_type,
self.description,
self.name,
_copy=True)
def __call__(self, semantic_value=None, **kwargs):
"""A shortcut allowing us to instantiate a new type from an attribute."""
result = semantic_value
if semantic_value is None:
result = self.attribute_type(**kwargs)
# Coerce the value into the required type if needed.
elif not isinstance(semantic_value, self.attribute_type):
result = self.attribute_type(semantic_value, **kwargs)
# We try to reuse the provided value and tag it as belonging to this
# attribute. However, if the value is reused, we must make a copy.
if getattr(result, "attribute_instance", None):
result = result.Copy()
result.attribute_instance = self
return result
def __str__(self):
return self.predicate
def __repr__(self):
return "<Attribute(%s, %s)>" % (self.name, self.predicate)
def __hash__(self):
return hash(self.predicate)
def __eq__(self, other):
return str(self.predicate) == str(other)
def __ne__(self, other):
return str(self.predicate) != str(other)
def __getitem__(self, item):
result = self.Copy()
result.field_names = item.split(".")
return result
def __len__(self):
return len(self.field_names)
def Fields(self):
return self.attribute_type.Fields()
@classmethod
def GetAttributeByName(cls, name):
# Support attribute names with a . in them:
try:
if "." in name:
name, field = name.split(".", 1)
return cls.NAMES[name][field]
return cls.NAMES[name]
except KeyError:
raise AttributeError("Invalid attribute %s" % name)
def GetRDFValueType(self):
"""Returns this attribute's RDFValue class."""
result = self.attribute_type
for field_name in self.field_names:
# Support the new semantic protobufs.
try:
result = result.type_infos.get(field_name).type
except AttributeError:
raise AttributeError("Invalid attribute %s" % field_name)
return result
def _GetSubField(self, value, field_names):
for field_name in field_names:
if value.HasField(field_name):
value = getattr(value, field_name, None)
else:
value = None
break
if value is not None:
yield value
def GetSubFields(self, fd, field_names):
"""Gets all the subfields indicated by field_names.
This resolves specifications like "Users.special_folders.app_data" where for
each entry in the Users protobuf the corresponding app_data folder entry
should be returned.
Args:
fd: The base RDFValue or Array.
field_names: A list of strings indicating which subfields to get.
Yields:
All the subfields matching the field_names specification.
"""
if isinstance(fd, rdf_protodict.RDFValueArray):
for value in fd:
for res in self._GetSubField(value, field_names):
yield res
else:
for res in self._GetSubField(fd, field_names):
yield res
def GetValues(self, fd):
"""Return the values for this attribute as stored in an AFF4Object."""
result = None
for result in fd.new_attributes.get(self, []):
# We need to interpolate sub fields in this rdfvalue.
if self.field_names:
for x in self.GetSubFields(result, self.field_names):
yield x
else:
yield result
for result in fd.synced_attributes.get(self, []):
result = result.ToRDFValue()
# We need to interpolate sub fields in this rdfvalue.
if result is not None:
if self.field_names:
for x in self.GetSubFields(result, self.field_names):
yield x
else:
yield result
if result is None:
default = self.GetDefault(fd)
if default is not None:
yield default
def GetDefault(self, fd=None, default=None):
"""Returns a default attribute if it is not set."""
if callable(self.default):
return self.default(fd)
if self.default is not None:
# We can't return mutable objects here or the default might change for all
# objects of this class.
if isinstance(self.default, rdfvalue.RDFValue):
default = self.default.Copy()
default.attribute_instance = self
return self(default)
else:
return self(self.default)
if isinstance(default, rdfvalue.RDFValue):
default = default.Copy()
default.attribute_instance = self
return default
class SubjectAttribute(Attribute):
"""An attribute which virtualises the subject."""
def __init__(self):
Attribute.__init__(self, "aff4:subject", rdfvalue.Subject,
"A subject pseudo attribute", "subject")
def GetValues(self, fd):
return [rdfvalue.Subject(fd.urn)]
class AFF4Attribute(rdfvalue.RDFString):
"""An AFF4 attribute name."""
def Validate(self):
try:
Attribute.GetAttributeByName(self._value)
except (AttributeError, KeyError):
raise type_info.TypeValueError("Value %s is not an AFF4 attribute name" %
self._value)
class ClassProperty(property):
"""A property which comes from the class object."""
def __get__(self, _, owner):
return self.fget.__get__(None, owner)()
class ClassInstantiator(property):
"""A property which instantiates the class on getting."""
def __get__(self, _, owner):
return self.fget()
class LazyDecoder(object):
"""An object which delays serialize and unserialize as late as possible.
The current implementation requires the proxied object to be immutable.
"""
def __init__(self, rdfvalue_cls=None, serialized=None, age=None,
decoded=None):
self.rdfvalue_cls = rdfvalue_cls
self.serialized = serialized
self.age = age
self.decoded = decoded
def ToRDFValue(self):
if self.decoded is None:
try:
self.decoded = self.rdfvalue_cls.FromSerializedString(
self.serialized, age=self.age)
except rdfvalue.DecodeError:
return None
return self.decoded
def FromRDFValue(self):
return self.serialized
class AFF4Object(object):
"""Base class for all objects."""
# We are a registered class.
__metaclass__ = registry.MetaclassRegistry
# This property is used in GUIs to define behaviours. These can take arbitrary
# values as needed. Behaviours are read only and set in the class definition.
_behaviours = frozenset()
# Should this object be synced back to the data store.
_dirty = False
# The data store transaction this object uses while it is being locked.
transaction = None
@property
def locked(self):
"""Is this object currently locked?"""
return self.transaction is not None
@ClassProperty
@classmethod
def behaviours(cls): # pylint: disable=g-bad-name
return cls._behaviours
# URN of the index for labels for generic AFF4Objects.
labels_index_urn = rdfvalue.RDFURN("aff4:/index/labels/generic")
# We define the parts of the schema for each AFF4 Object as an internal
# class. As new objects extend this, they can add more attributes to their
# schema by extending their parents. Note that the class must be named
# SchemaCls.
class SchemaCls(object):
"""The standard AFF4 schema."""
# We use child indexes to navigate the direct children of an object.
# If the additional storage requirements for the indexes are not worth it
# then ADD_CHILD_INDEX should be False. Note however that it will no longer
# be possible to find all the children of the parent object.
ADD_CHILD_INDEX = True
TYPE = Attribute("aff4:type", rdfvalue.RDFString,
"The name of the AFF4Object derived class.", "type")
SUBJECT = SubjectAttribute()
STORED = Attribute("aff4:stored", rdfvalue.RDFURN,
"The AFF4 container inwhich this object is stored.")
LAST = Attribute(
"metadata:last",
rdfvalue.RDFDatetime,
"The last time any attribute of this object was written.",
creates_new_object_version=False)
# Note labels should not be Set directly but should be manipulated via
# the AddLabels method.
LABELS = Attribute(
"aff4:labels_list",
aff4_rdfvalues.AFF4ObjectLabelsList,
"Any object can have labels applied to it.",
"Labels",
creates_new_object_version=False,
versioned=False)
LEASED_UNTIL = Attribute(
"aff4:lease",
rdfvalue.RDFDatetime,
"The time until which the object is leased by a "
"particular caller.",
versioned=False,
creates_new_object_version=False)
LAST_OWNER = Attribute(
"aff4:lease_owner",
rdfvalue.RDFString,
"The owner of the lease.",
versioned=False,
creates_new_object_version=False)
def __init__(self, aff4_type=None):
"""Init.
Args:
aff4_type: aff4 type string e.g. 'VFSGRRClient' if specified by the user
when the aff4 object was created. Or None.
"""
self.aff4_type = aff4_type
@classmethod
def ListAttributes(cls):
for attr in dir(cls):
attr = getattr(cls, attr)
if isinstance(attr, Attribute):
yield attr
@classmethod
def GetAttribute(cls, name):
for i in cls.ListAttributes():
# Attributes are accessible by predicate or name
if i.name == name or i.predicate == name:
return i
def __getattr__(self, attr):
"""Handle unknown attributes.
Often the actual object returned is not the object that is expected. In
those cases attempting to retrieve a specific named attribute would
normally raise, e.g.:
fd = aff4.FACTORY.Open(urn)
fd.Get(fd.Schema.DOESNTEXIST, default_value)
In this case we return None to ensure that the default is chosen.
However, if the caller specifies a specific aff4_type, they expect the
attributes of that object. If they are referencing a non-existent
attribute this is an error and we should raise, e.g.:
fd = aff4.FACTORY.Open(urn, aff4_type=module.SomeClass)
fd.Get(fd.Schema.DOESNTEXIST, default_value)
Args:
attr: Some ignored attribute.
Raises:
BadGetAttributeError: if the object was opened with a specific type
"""
if self.aff4_type:
raise BadGetAttributeError(
"Attribute %s does not exist on object opened with aff4_type %s" %
(utils.SmartStr(attr), self.aff4_type))
return None
# Make sure that when someone references the schema, they receive an instance
# of the class.
@property
def Schema(self): # pylint: disable=g-bad-name
return self.SchemaCls(self.aff4_type)
def __init__(self,
urn,
mode="r",
parent=None,
clone=None,
token=None,
local_cache=None,
age=NEWEST_TIME,
follow_symlinks=True,
aff4_type=None,
object_exists=False,
mutation_pool=None,
transaction=None):
if urn is not None:
urn = rdfvalue.RDFURN(urn)
self.urn = urn
self.mode = mode
self.parent = parent
self.token = token
self.age_policy = age
self.follow_symlinks = follow_symlinks
self.lock = threading.RLock()
self.mutation_pool = mutation_pool
self.transaction = transaction
if transaction and mutation_pool:
raise ValueError("Cannot use a locked object with a mutation pool!")
# If object was opened through a symlink, "symlink_urn" attribute will
# contain a sylmink urn.
self.symlink_urn = None
# The object already exists in the data store - we do not need to update
# indexes.
self.object_exists = object_exists
# This flag will be set whenever an attribute is changed that has the
# creates_new_object_version flag set.
self._new_version = False
# Mark out attributes to delete when Flushing()
self._to_delete = set()
# If an explicit aff4 type is requested we store it here so we know to
# verify aff4 attributes exist in the schema at Get() time.
self.aff4_type = aff4_type
# We maintain two attribute caches - self.synced_attributes reflects the
# attributes which are synced with the data_store, while self.new_attributes
# are new attributes which still need to be flushed to the data_store. When
# this object is instantiated we populate self.synced_attributes with the
# data_store, while the finish method flushes new changes.
if clone is not None:
if isinstance(clone, dict):
# Just use these as the attributes, do not go to the data store. This is
# a quick way of creating an object with data which was already fetched.
self.new_attributes = {}
self.synced_attributes = clone
elif isinstance(clone, AFF4Object):
# We were given another object to clone - we do not need to access the
# data_store now.
self.new_attributes = clone.new_attributes.copy()
self.synced_attributes = clone.synced_attributes.copy()
else:
raise RuntimeError("Cannot clone from %s." % clone)
else:
self.new_attributes = {}
self.synced_attributes = {}
if "r" in mode:
if local_cache:
try:
for attribute, value, ts in local_cache[utils.SmartUnicode(urn)]:
self.DecodeValueFromAttribute(attribute, value, ts)
except KeyError:
pass
else:
# Populate the caches from the data store.
for urn, values in FACTORY.GetAttributes(
[urn], age=age, token=self.token):
for attribute_name, value, ts in values:
self.DecodeValueFromAttribute(attribute_name, value, ts)
if clone is None:
self.Initialize()
def Initialize(self):
"""The method is called after construction to initialize the object.
This will be called after construction, and each time the object is
unserialized from the datastore.
An AFF4 object contains attributes which can be populated from the
database. This method is called to obtain a fully fledged object from
a collection of attributes.
"""
def DecodeValueFromAttribute(self, attribute_name, value, ts):
"""Given a serialized value, decode the attribute.
Only attributes which have been previously defined are permitted.
Args:
attribute_name: The string name of the attribute.
value: The serialized attribute value.
ts: The timestamp of this attribute.
"""
try:
# Get the Attribute object from our schema.
attribute = Attribute.PREDICATES[attribute_name]
cls = attribute.attribute_type
self._AddAttributeToCache(attribute,
LazyDecoder(cls, value, ts),
self.synced_attributes)
except KeyError:
pass
except (ValueError, rdfvalue.DecodeError):
logging.debug("%s: %s invalid encoding. Skipping.", self.urn,
attribute_name)
def _AddAttributeToCache(self, attribute_name, value, cache):
"""Helper to add a new attribute to a cache."""
# If there's another value in cache with the same timestamp, the last added
# one takes precedence. This helps a lot in tests that use FakeTime.
attribute_list = cache.setdefault(attribute_name, [])
if attribute_list and attribute_list[-1].age == value.age:
attribute_list.pop()
attribute_list.append(value)
def CheckLease(self):
"""Check if our lease has expired, return seconds left.
Returns:
int: seconds left in the lease, 0 if not locked or lease is expired
"""
if self.transaction:
return self.transaction.CheckLease()
return 0
def UpdateLease(self, duration):
"""Updates the lease and flushes the object.
The lease is set to expire after the "duration" time from the present
moment.
This method is supposed to be used when operation that requires locking
may run for a time that exceeds the lease time specified in OpenWithLock().
See flows/hunts locking for an example.
Args:
duration: Integer number of seconds. Lease expiry time will be set
to "time.time() + duration".
Raises:
LockError: if the object is not currently locked or the lease has
expired.
"""
if not self.locked:
raise LockError("Object must be locked to update the lease: %s." %
self.urn)
if self.CheckLease() == 0:
raise LockError("Can not update lease that has already expired.")
self.transaction.UpdateLease(duration)
def Flush(self, sync=True):
"""Syncs this object with the data store, maintaining object validity."""
if self.locked and self.CheckLease() == 0:
raise LockError("Can not update lease that has already expired.")
self._WriteAttributes(sync=sync)
self._SyncAttributes()
if self.parent:
self.parent.Flush(sync=sync)
def Close(self, sync=True):
"""Close and destroy the object.
This is similar to Flush, but does not maintain object validity. Hence the
object should not be interacted with after Close().
Args:
sync: Write the attributes synchronously to the data store.
Raises:
LockError: The lease for this object has expired.
"""
if self.locked and self.CheckLease() == 0:
raise LockError("Can not update lease that has already expired.")
# Always sync when in a lock.
if self.locked:
sync = True
self._WriteAttributes(sync=sync)
# Releasing this lock allows another thread to own it.
if self.locked:
self.transaction.Release()
if self.parent:
self.parent.Close(sync=sync)
# Interacting with a closed object is a bug. We need to catch this ASAP so
# we remove all mode permissions from this object.
self.mode = ""
def OnDelete(self, deletion_pool=None):
"""Called when the object is about to be deleted.
NOTE: If the implementation of this method has to list children or delete
other dependent objects, make sure to use DeletionPool's API instead of a
generic aff4.FACTORY one. DeletionPool is optimized for deleting large
amounts of objects - it minimizes number of expensive data store calls,
trying to group as many of them as possible into a single batch, and caches
results of these calls.
Args:
deletion_pool: DeletionPool object used for this deletion operation.
Raises:
ValueError: if deletion pool is None.
"""
if deletion_pool is None:
raise ValueError("deletion_pool can't be None")
@utils.Synchronized
def _WriteAttributes(self, sync=True):
"""Write the dirty attributes to the data store."""
# If the object is not opened for writing we do not need to flush it to the
# data_store.
if "w" not in self.mode:
return
if self.urn is None:
raise RuntimeError("Storing of anonymous AFF4 objects not supported.")
to_set = {}
for attribute_name, value_array in self.new_attributes.iteritems():
to_set_list = to_set.setdefault(attribute_name, [])
for value in value_array:
to_set_list.append((value.SerializeToDataStore(), value.age))
if self._dirty:
# We determine this object has a new version only if any of the versioned
# attributes have changed. Non-versioned attributes do not represent a new
# object version. The type of an object is versioned and represents a
# version point in the life of the object.
if self._new_version:
to_set[self.Schema.TYPE] = [(
utils.SmartUnicode(self.__class__.__name__),
rdfvalue.RDFDatetime.Now())]
# We only update indexes if the schema does not forbid it and we are not
# sure that the object already exists.
add_child_index = self.Schema.ADD_CHILD_INDEX
if self.object_exists:
add_child_index = False
# Write the attributes to the Factory cache.
FACTORY.SetAttributes(
self.urn,
to_set,
self._to_delete,
add_child_index=add_child_index,
mutation_pool=self.mutation_pool,
sync=sync,
token=self.token)
@utils.Synchronized
def _SyncAttributes(self):
"""Sync the new attributes to the synced attribute cache.
This maintains object validity.
"""
# This effectively moves all the values from the new_attributes to the
# synced_attributes caches.
for attribute, value_array in self.new_attributes.iteritems():
if not attribute.versioned or self.age_policy == NEWEST_TIME:
# Store the latest version if there are multiple unsynced versions.
value = value_array[-1]
self.synced_attributes[attribute] = [
LazyDecoder(decoded=value, age=value.age)
]
else:
synced_value_array = self.synced_attributes.setdefault(attribute, [])
for value in value_array:
synced_value_array.append(LazyDecoder(decoded=value, age=value.age))
synced_value_array.sort(key=lambda x: x.age, reverse=True)
self.new_attributes = {}
self._to_delete.clear()
self._dirty = False
self._new_version = False
def _CheckAttribute(self, attribute, value):
"""Check that the value is of the expected type.
Args:
attribute: An instance of Attribute().
value: An instance of RDFValue.
Raises:
ValueError: when the value is not of the expected type.
AttributeError: When the attribute is not of type Attribute().
"""
if not isinstance(attribute, Attribute):
raise AttributeError("Attribute %s must be of type aff4.Attribute()",
attribute)
if not isinstance(value, attribute.attribute_type):
raise ValueError("Value for attribute %s must be of type %s()" %
(attribute, attribute.attribute_type.__name__))
def Copy(self, to_attribute, from_fd, from_attribute):
values = from_fd.GetValuesForAttribute(from_attribute)
for v in values:
self.AddAttribute(to_attribute, v, age=v.age)
def Set(self, attribute, value=None):
"""Set an attribute on this object.
Set() is now a synonym for AddAttribute() since attributes are never
deleted.
Args:
attribute: The attribute to set.
value: The new value for this attribute.
"""
# Specifically ignore None here. This allows us to safely copy attributes
# from one object to another: fd.Set(fd2.Get(..))
if attribute is None:
return
self.AddAttribute(attribute, value)
def AddAttribute(self, attribute, value=None, age=None):
"""Add an additional attribute to this object.
If value is None, attribute is expected to be already initialized with a
value. For example:
fd.AddAttribute(fd.Schema.CONTAINS("some data"))
Args:
attribute: The attribute name or an RDFValue derived from the attribute.
value: The value the attribute will be set to.
age: Age (timestamp) of the attribute. If None, current time is used.
Raises:
IOError: If this object is read only.
"""
if "w" not in self.mode:
raise IOError("Writing attribute %s to read only object." % attribute)
if value is None:
value = attribute
attribute = value.attribute_instance
# Check if this object should be locked in order to add the attribute.
# NOTE: We don't care about locking when doing blind writes.
if self.mode != "w" and attribute.lock_protected and not self.transaction:
raise IOError("Object must be locked to write attribute %s." % attribute)
self._CheckAttribute(attribute, value)
# Does this represent a new version?
if attribute.versioned:
if attribute.creates_new_object_version:
self._new_version = True
# Update the time of this new attribute.
if age:
value.age = age
else:
value.age = rdfvalue.RDFDatetime.Now()
# Non-versioned attributes always replace previous versions and get written
# at the earliest timestamp (so they appear in all objects).
else:
self._to_delete.add(attribute)
self.synced_attributes.pop(attribute, None)
self.new_attributes.pop(attribute, None)
value.age = 0
self._AddAttributeToCache(attribute, value, self.new_attributes)
self._dirty = True
@utils.Synchronized
def DeleteAttribute(self, attribute):
"""Clears the attribute from this object."""
if "w" not in self.mode:
raise IOError("Deleting attribute %s from read only object." % attribute)
# Check if this object should be locked in order to delete the attribute.
# NOTE: We don't care about locking when doing blind writes.
if self.mode != "w" and attribute.lock_protected and not self.transaction:
raise IOError("Object must be locked to delete attribute %s." % attribute)
if attribute in self.synced_attributes:
self._to_delete.add(attribute)
del self.synced_attributes[attribute]
if attribute in self.new_attributes:
del self.new_attributes[attribute]
# Does this represent a new version?
if attribute.versioned and attribute.creates_new_object_version:
self._new_version = True
self._dirty = True
def IsAttributeSet(self, attribute):
"""Determine if the attribute is set.
Args:
attribute: The attribute to check.
Returns:
True if set, otherwise False.
Checking Get against None doesn't work as Get will return a default
attribute value. This determines if the attribute has been manually set.
"""
return (attribute in self.synced_attributes or
attribute in self.new_attributes)
def Get(self, attribute, default=None):
"""Gets the attribute from this object."""
if attribute is None:
return default
# Allow the user to specify the attribute by name.
elif isinstance(attribute, str):
attribute = Attribute.GetAttributeByName(attribute)
# We can't read attributes from the data_store unless read mode was
# specified. It is ok to read new attributes though.
if "r" not in self.mode and (attribute not in self.new_attributes and
attribute not in self.synced_attributes):
raise IOError("Fetching %s from object not opened for reading." %
attribute)
for result in self.GetValuesForAttribute(attribute, only_one=True):
try:
# The attribute may be a naked string or int - i.e. not an RDFValue at
# all.
result.attribute_instance = attribute
except AttributeError:
pass
return result
return attribute.GetDefault(self, default)
def GetValuesForAttribute(self, attribute, only_one=False):
"""Returns a list of values from this attribute."""
if not only_one and self.age_policy == NEWEST_TIME:
raise RuntimeError("Attempting to read all attribute versions for an "
"object opened for NEWEST_TIME. This is probably "
"not what you want.")
if attribute is None:
return []
elif isinstance(attribute, basestring):
attribute = Attribute.GetAttributeByName(attribute)
return attribute.GetValues(self)
def Update(self, attribute=None, user=None, priority=None):
"""Requests the object refresh an attribute from the Schema."""
def Upgrade(self, aff4_class):
"""Upgrades this object to the type specified.
AFF4 Objects can be upgraded on the fly to other type - As long as the new
type is derived from the current type. This feature allows creation of
placeholder objects which can later be upgraded to the fully featured
object.
Note: It is not allowed to downgrade an object if that would result in a
loss of information (since the new object has a smaller schema). This method
tries to store the new object with its new attributes and will fail if any
attributes can not be mapped.
Args:
aff4_class: A string representing the new class.
Returns:
an instance of the new class with all the same attributes as this current
object.
Raises:
RuntimeError: When the object to upgrade is locked.
AttributeError: When the new object can not accept some of the old
attributes.
InstantiationError: When we cannot instantiate the object type class.
"""
aff4_class = _ValidateAFF4Type(aff4_class)
# We are already of the required type
if self.__class__ == aff4_class:
return self
# Check that we have the right type.
if not isinstance(aff4_class, type):
raise InstantiationError("aff4_class=%s must be a type" % aff4_class)
if not issubclass(aff4_class, AFF4Object):
raise InstantiationError(
"aff4_class=%s must be a subclass of AFF4Object." % aff4_class)
# It's not allowed to downgrade the object
if isinstance(self, aff4_class):
# TODO(user): check what we should do here:
# 1) Nothing
# 2) raise
# 3) return self
# Option 3) seems ok, but we need to be sure that we don't use
# Create(mode='r') anywhere where code actually expects the object to be
# downgraded.
return self
# NOTE: It is possible for attributes to become inaccessible here if the old
# object has an attribute which the new object does not have in its
# schema. The values of these attributes will not be available any longer in
# the new object - usually because old attributes do not make sense in the
# context of the new object.
# Instantiate the class
result = aff4_class(
self.urn,
mode=self.mode,
clone=self,
parent=self.parent,
token=self.token,
age=self.age_policy,
object_exists=self.object_exists,
follow_symlinks=self.follow_symlinks,
aff4_type=self.aff4_type,
mutation_pool=self.mutation_pool,
transaction=self.transaction)
result.symlink_urn = self.urn
result.Initialize()
return result
def ForceNewVersion(self):
self._dirty = True
self._new_version = True
def __repr__(self):
return "<%s@%X = %s>" % (self.__class__.__name__, hash(self), self.urn)
# The following are used to ensure a bunch of AFF4Objects can be sorted on
# their URNs.
def __gt__(self, other):
return self.urn > other
def __lt__(self, other):
return self.urn < other
def __nonzero__(self):
"""We override this because we don't want to fall back to __len__.
We want to avoid the case where a nonzero check causes iteration over all
items. Subclasses may override as long as their implementation is efficient.
Returns:
True always
"""
return True
# Support the with protocol.
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
try:
self.Close()
except Exception: # pylint: disable=broad-except
# If anything bad happens here, we must abort the lock or the
# object will stay locked.
if self.transaction:
self.transaction.Release()
raise
def AddLabels(self, *labels_names, **kwargs):
"""Add labels to the AFF4Object."""
if not self.token and "owner" not in kwargs:
raise RuntimeError("Can't set label: No owner specified and "
"no access token available.")
owner = kwargs.get("owner") or self.token.username
current_labels = self.Get(self.Schema.LABELS, self.Schema.LABELS())
for label_name in labels_names:
label = aff4_rdfvalues.AFF4ObjectLabel(
name=label_name, owner=owner, timestamp=rdfvalue.RDFDatetime.Now())
current_labels.AddLabel(label)
self.Set(current_labels)
def RemoveLabels(self, *labels_names, **kwargs):
"""Remove specified labels from the AFF4Object."""
if not self.token and "owner" not in kwargs:
raise RuntimeError("Can't remove label: No owner specified and "
"no access token available.")
owner = kwargs.get("owner") or self.token.username
current_labels = self.Get(self.Schema.LABELS)
for label_name in labels_names:
label = aff4_rdfvalues.AFF4ObjectLabel(name=label_name, owner=owner)
current_labels.RemoveLabel(label)
self.Set(self.Schema.LABELS, current_labels)
def SetLabels(self, *labels_names, **kwargs):
self.ClearLabels()
self.AddLabels(*labels_names, **kwargs)
def ClearLabels(self):
self.Set(self.Schema.LABELS, aff4_rdfvalues.AFF4ObjectLabelsList())
def GetLabels(self):
return self.Get(self.Schema.LABELS,
aff4_rdfvalues.AFF4ObjectLabelsList()).labels
def GetLabelsNames(self, owner=None):
labels = self.Get(self.Schema.LABELS, aff4_rdfvalues.AFF4ObjectLabelsList())
return labels.GetLabelNames(owner=owner)
class AttributeExpression(lexer.Expression):
"""An expression which is used to filter attributes."""
def SetAttribute(self, attribute):
"""Checks that attribute is a valid Attribute() instance."""
# Grab the attribute registered for this name
self.attribute = attribute
self.attribute_obj = Attribute.GetAttributeByName(attribute)
if self.attribute_obj is None:
raise lexer.ParseError("Attribute %s not defined" % attribute)
def SetOperator(self, operator):
"""Sets the operator for this expression."""
self.operator = operator
# Find the appropriate list of operators for this attribute
attribute_type = self.attribute_obj.GetRDFValueType()
operators = attribute_type.operators
# Do we have such an operator?
self.number_of_args, self.operator_method = operators.get(operator,
(0, None))
if self.operator_method is None:
raise lexer.ParseError("Operator %s not defined on attribute '%s'" %
(operator, self.attribute))
self.operator_method = getattr(attribute_type, self.operator_method)
def Compile(self, filter_implemention):
"""Returns the data_store filter implementation from the attribute."""
return self.operator_method(self.attribute_obj, filter_implemention,
*self.args)
class AFF4Volume(AFF4Object):
"""Volumes contain other objects.
The AFF4 concept of a volume abstracts away how objects are stored. We simply
define an AFF4 volume as a container of other AFF4 objects. The volume may
implement any storage mechanism it likes, including virtualizing the objects
contained within it.
"""
_behaviours = frozenset(["Container"])
class SchemaCls(AFF4Object.SchemaCls):
CONTAINS = Attribute("aff4:contains", rdfvalue.RDFURN,
"An AFF4 object contained in this container.")
def ListChildren(self, limit=1000000, age=NEWEST_TIME):
"""Yields RDFURNs of all the children of this object.
Args:
limit: Total number of items we will attempt to retrieve.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range in microseconds.
Yields:
RDFURNs instances of each child.
"""
# Just grab all the children from the index.
index_prefix = "index:dir/"
for predicate, _, timestamp in data_store.DB.ResolvePrefix(
self.urn,
index_prefix,
token=self.token,
timestamp=Factory.ParseAgeSpecification(age),
limit=limit):
urn = self.urn.Add(predicate[len(index_prefix):])
urn.age = rdfvalue.RDFDatetime(timestamp)
yield urn
def OpenChildren(self,
children=None,
mode="r",
limit=1000000,
chunk_limit=100000,
age=NEWEST_TIME):
"""Yields AFF4 Objects of all our direct children.
This method efficiently returns all attributes for our children directly, in
a few data store round trips. We use the directory indexes to query the data
store.
Args:
children: A list of children RDFURNs to open. If None open all our
children.
mode: The mode the files should be opened with.
limit: Total number of items we will attempt to retrieve.
chunk_limit: Maximum number of items to retrieve at a time.
age: The age of the items to retrieve. Should be one of ALL_TIMES,
NEWEST_TIME or a range.
Yields:
Instances for each direct child.
"""
if children is None:
subjects = list(self.ListChildren(limit=limit, age=age))
else:
subjects = list(children)
subjects.sort()
# Read at most limit children at a time.
while subjects:
to_read = subjects[:chunk_limit]
subjects = subjects[chunk_limit:]
for child in FACTORY.MultiOpen(
to_read, mode=mode, token=self.token, age=age):
yield child
@property
def real_pathspec(self):
"""Returns a pathspec for an aff4 object even if there is none stored."""
pathspec = self.Get(self.Schema.PATHSPEC)
stripped_components = []
parent = self
# TODO(user): this code is potentially slow due to multiple separate
# aff4.FACTORY.Open() calls. OTOH the loop below is executed very rarely -
# only when we deal with deep files that got fetched alone and then
# one of the directories in their path gets updated.
while not pathspec and len(parent.urn.Split()) > 1:
# We try to recurse up the tree to get a real pathspec.
# These directories are created automatically without pathspecs when a
# deep directory is listed without listing the parents.
# Note /fs/os or /fs/tsk won't be updateable so we will raise IOError
# if we try.
stripped_components.append(parent.urn.Basename())
pathspec = parent.Get(parent.Schema.PATHSPEC)
parent = FACTORY.Open(parent.urn.Dirname(), token=self.token)
if pathspec:
if stripped_components:
# We stripped pieces of the URL, time to add them back.
new_path = utils.JoinPath(*reversed(stripped_components[:-1]))
pathspec.Append(
rdf_paths.PathSpec(path=new_path, pathtype=pathspec.last.pathtype))
else:
raise IOError("Item has no pathspec.")
return pathspec
class AFF4Root(AFF4Volume):
"""The root of the VFS."""
class AFF4Symlink(AFF4Object):
"""This is a symlink to another AFF4 object.
This means that opening this object will return the linked to object. To
create a symlink, one must open the symlink for writing and set the
Schema.SYMLINK_TARGET attribute.
Opening the object for reading will return the linked to object.
"""
class SchemaCls(AFF4Object.SchemaCls):
SYMLINK_TARGET = Attribute("aff4:symlink_target", rdfvalue.RDFURN,
"The target of this link.")
def __new__(cls,
unused_urn,
mode="r",
clone=None,
token=None,
age=NEWEST_TIME,
follow_symlinks=True,
**_):
# When first created, the symlink object is exposed.
if mode == "w" or not follow_symlinks:
return super(AFF4Symlink, cls).__new__(cls)
elif clone is not None:
# Get the real object (note, clone shouldn't be None during normal
# object creation process):
target_urn = clone.Get(cls.SchemaCls.SYMLINK_TARGET)
result = FACTORY.Open(target_urn, mode=mode, age=age, token=token)
result.symlink_urn = clone.urn
return result
else:
raise RuntimeError("Unable to open symlink.")
class AFF4Stream(AFF4Object):
"""An abstract stream for reading data."""
__metaclass__ = abc.ABCMeta
# The read pointer offset.
offset = 0
size = 0
class SchemaCls(AFF4Object.SchemaCls):
# Note that a file on the remote system might have stat.st_size > 0 but if
# we do not have any of the data available to read: size = 0.
SIZE = Attribute(
"aff4:size",
rdfvalue.RDFInteger,
"The total size of available data for this stream.",
"size",
default=0)
HASH = Attribute("aff4:hashobject", rdf_crypto.Hash,
"Hash object containing all known hash digests for"
" the object.")
MULTI_STREAM_CHUNK_SIZE = 1024 * 1024 * 8
@classmethod
def _MultiStream(cls, fds):
"""Method overriden by subclasses to optimize the MultiStream behavior."""
for fd in fds:
fd.Seek(0)
while True:
chunk = fd.Read(cls.MULTI_STREAM_CHUNK_SIZE)
if not chunk:
break
yield fd, chunk, None
@classmethod
def MultiStream(cls, fds):
"""Effectively streams data from multiple opened AFF4Stream objects.
Args:
fds: A list of opened AFF4Stream (or AFF4Stream descendants) objects.
Yields:
Tuples (chunk, fd) where chunk is a binary blob of data and fd is an
object from the fds argument. Chunks within one file are not shuffled:
every file's chunks are yielded in order and the file is never truncated.
The files themselves are grouped by their type and the order of the
groups is non-deterministic. The order of the files within a single
type group is the same as in the fds argument.
Raises:
ValueError: If one of items in the fds list is not an AFF4Stream.
MissingChunksError: if one or more chunks are missing. This exception
is only raised after all the files are read and their respective chunks
are yielded. MultiStream does its best to skip the file entirely if
one of its chunks is missing, but in case of very large files it's still
possible to yield a truncated file.
"""
for fd in fds:
if not isinstance(fd, AFF4Stream):
raise ValueError("All object to be streamed have to inherit from "
"AFF4Stream (found one inheriting from %s)." %
(fd.__class__.__name__))
classes_map = {}
for fd in fds:
classes_map.setdefault(fd.__class__, []).append(fd)
for fd_class, fds in classes_map.items():
# pylint: disable=protected-access
for fd, chunk, exception in fd_class._MultiStream(fds):
yield fd, chunk, exception
# pylint: enable=protected-access
def __len__(self):
return self.size
def Initialize(self):
super(AFF4Stream, self).Initialize()
# This is the configurable default length for allowing Read to be called
# without a specific length.
self.max_unbound_read = config_lib.CONFIG["Server.max_unbound_read_size"]
@abc.abstractmethod
def Read(self, length):
pass
@abc.abstractmethod
def Write(self, data):
pass
@abc.abstractmethod
def Tell(self):
pass
@abc.abstractmethod
def Seek(self, offset, whence=0):
pass
# These are file object conformant namings for library functions that
# grr uses, and that expect to interact with 'real' file objects.
def read(self, length=None): # pylint: disable=invalid-name
if length is None:
length = self.size - self.offset
if length > self.max_unbound_read:
raise OversizedRead("Attempted to read file of size %s when "
"Server.max_unbound_read_size is %s" %
(self.size, self.max_unbound_read))
return self.Read(length)
def GetContentAge(self):
return self.Get(self.Schema.TYPE).age
seek = utils.Proxy("Seek")
tell = utils.Proxy("Tell")
close = utils.Proxy("Close")
write = utils.Proxy("Write")
flush = utils.Proxy("Flush")
class AFF4MemoryStreamBase(AFF4Stream):
"""A stream which keeps all data in memory.
This is an abstract class, subclasses must define the CONTENT attribute
in the Schema to be versioned or unversioned.
"""
def Initialize(self):
"""Try to load the data from the store."""
super(AFF4MemoryStreamBase, self).Initialize()
contents = ""
if "r" in self.mode:
contents = self.Get(self.Schema.CONTENT)
try:
if contents is not None:
contents = zlib.decompress(utils.SmartStr(contents))
except zlib.error:
pass
self.fd = StringIO.StringIO(contents)
self.size = len(contents)
self.offset = 0
def Truncate(self, offset=None):
if offset is None:
offset = self.offset
self.fd = StringIO.StringIO(self.fd.getvalue()[:offset])
self.size.Set(offset)
def Read(self, length):
return self.fd.read(int(length))
def Write(self, data):
if isinstance(data, unicode):
raise IOError("Cannot write unencoded string.")
self._dirty = True
self.fd.write(data)
self.size = max(self.size, self.fd.tell())
def Tell(self):
return self.fd.tell()
def Seek(self, offset, whence=0):
self.fd.seek(offset, whence)
def Flush(self, sync=True):
if self._dirty:
compressed_content = zlib.compress(self.fd.getvalue())
self.Set(self.Schema.CONTENT(compressed_content))
self.Set(self.Schema.SIZE(self.size))
super(AFF4MemoryStreamBase, self).Flush(sync=sync)
def Close(self, sync=True):
if self._dirty:
compressed_content = zlib.compress(self.fd.getvalue())
self.Set(self.Schema.CONTENT(compressed_content))
self.Set(self.Schema.SIZE(self.size))
super(AFF4MemoryStreamBase, self).Close(sync=sync)
def OverwriteAndClose(self, compressed_data, size, sync=True):
"""Directly overwrite the current contents.
Replaces the data currently in the stream with compressed_data,
and closes the object. Makes it possible to avoid recompressing
the data.
Args:
compressed_data: The data to write, must be zlib compressed.
size: The uncompressed size of the data.
sync: Whether the close should be synchronous.
"""
self.Set(self.Schema.CONTENT(compressed_data))
self.Set(self.Schema.SIZE(size))
super(AFF4MemoryStreamBase, self).Close(sync=sync)
def GetContentAge(self):
return self.Get(self.Schema.CONTENT).age
class AFF4MemoryStream(AFF4MemoryStreamBase):
"""A versioned stream which keeps all data in memory."""
class SchemaCls(AFF4MemoryStreamBase.SchemaCls):
CONTENT = Attribute(
"aff4:content",
rdfvalue.RDFBytes,
"Total content of this file.",
default="")
class AFF4UnversionedMemoryStream(AFF4MemoryStreamBase):
"""An unversioned stream which keeps all data in memory."""
class SchemaCls(AFF4MemoryStreamBase.SchemaCls):
CONTENT = Attribute(
"aff4:content",
rdfvalue.RDFBytes,
"Total content of this file.",
default="",
versioned=False)
class ChunkCache(utils.FastStore):
"""A cache which closes its objects when they expire."""
def __init__(self, kill_cb=None, *args, **kw):
self.kill_cb = kill_cb
super(ChunkCache, self).__init__(*args, **kw)
def KillObject(self, obj):
if self.kill_cb:
self.kill_cb(obj)
def __getstate__(self):
if self.kill_cb:
raise NotImplementedError("Can't pickle callback.")
return self.__dict__
class AFF4ImageBase(AFF4Stream):
"""An AFF4 Image is stored in segments.
We are both an Image here and a volume (since we store the segments inside
us). This is an abstract class, subclasses choose the type to use for chunks.
"""
NUM_RETRIES = 10
CHUNK_ID_TEMPLATE = "%010X"
# This is the chunk size of each chunk. The chunksize can not be changed once
# the object is created.
chunksize = 64 * 1024
# Subclasses should set the name of the type of stream to use for chunks.
STREAM_TYPE = None
# How many chunks should be cached.
LOOK_AHEAD = 10
class SchemaCls(AFF4Stream.SchemaCls):
"""The schema for AFF4ImageBase."""
_CHUNKSIZE = Attribute(
"aff4:chunksize",
rdfvalue.RDFInteger,
"Total size of each chunk.",
default=64 * 1024)
# Note that we can't use CONTENT.age in place of this, since some types
# (specifically, AFF4Image) do not have a CONTENT attribute, since they're
# stored in chunks. Rather than maximising the last updated time over all
# chunks, we store it and update it as an attribute here.
CONTENT_LAST = Attribute(
"metadata:content_last",
rdfvalue.RDFDatetime,
"The last time any content was written.",
creates_new_object_version=False)
@classmethod
def _GenerateChunkPaths(cls, fds):
for fd in fds:
num_chunks = fd.size / fd.chunksize + 1
for chunk in xrange(num_chunks):
yield fd.urn.Add(fd.CHUNK_ID_TEMPLATE % chunk), fd
MULTI_STREAM_CHUNKS_READ_AHEAD = 1000
@classmethod
def _MultiStream(cls, fds):
"""Effectively streams data from multiple opened AFF4ImageBase objects.
Args:
fds: A list of opened AFF4Stream (or AFF4Stream descendants) objects.
Yields:
Tuples (chunk, fd, exception) where chunk is a binary blob of data and fd
is an object from the fds argument.
If one or more chunks are missing, exception will be a MissingChunksError
while chunk will be None. _MultiStream does its best to skip the file
entirely if one of its chunks is missing, but in case of very large files
it's still possible to yield a truncated file.
"""
missing_chunks_by_fd = {}
for chunk_fd_pairs in utils.Grouper(
cls._GenerateChunkPaths(fds), cls.MULTI_STREAM_CHUNKS_READ_AHEAD):
chunks_map = dict(chunk_fd_pairs)
contents_map = {}
for chunk_fd in FACTORY.MultiOpen(
chunks_map, mode="r", token=fds[0].token):
if isinstance(chunk_fd, AFF4Stream):
fd = chunks_map[chunk_fd.urn]
contents_map[chunk_fd.urn] = chunk_fd.read()
for chunk_urn, fd in chunk_fd_pairs:
if chunk_urn not in contents_map or not contents_map[chunk_urn]:
missing_chunks_by_fd.setdefault(fd, []).append(chunk_urn)
for chunk_urn, fd in chunk_fd_pairs:
if fd in missing_chunks_by_fd:
continue
yield fd, contents_map[chunk_urn], None
for fd, missing_chunks in missing_chunks_by_fd.iteritems():
e = MissingChunksError(
"%d missing chunks (multi-stream)." % len(missing_chunks),
missing_chunks=missing_chunks)
yield fd, None, e
def Initialize(self):
"""Build a cache for our chunks."""
super(AFF4ImageBase, self).Initialize()
self.offset = 0
# A cache for segments.
self.chunk_cache = ChunkCache(self._WriteChunk, 100)
if "r" in self.mode:
self.size = int(self.Get(self.Schema.SIZE))
# pylint: disable=protected-access
self.chunksize = int(self.Get(self.Schema._CHUNKSIZE))
# pylint: enable=protected-access
self.content_last = self.Get(self.Schema.CONTENT_LAST)
else:
self.size = 0
self.content_last = None
def SetChunksize(self, chunksize):
# pylint: disable=protected-access
self.Set(self.Schema._CHUNKSIZE(chunksize))
# pylint: enable=protected-access
self.chunksize = int(chunksize)
self.Truncate(0)
def Seek(self, offset, whence=0):
# This stream does not support random writing in "w" mode. When the stream
# is opened in "w" mode we can not read from the data store and therefore we
# can not merge writes with existing data. It only makes sense to append to
# existing streams.
if self.mode == "w":
# Seeking to the end of the stream is ok.
if not (whence == 2 and offset == 0):
raise IOError("Can not seek with an AFF4Image opened for write only.")
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = long(self.size) + offset
def Tell(self):
return self.offset
def Truncate(self, offset=0):
self._dirty = True
self.size = offset
self.offset = offset
self.chunk_cache.Flush()
def _ReadChunk(self, chunk):
self._ReadChunks([chunk])
return self.chunk_cache.Get(chunk)
def _ReadChunks(self, chunks):
chunk_names = {
self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk): chunk
for chunk in chunks
}
for child in FACTORY.MultiOpen(
chunk_names, mode="rw", token=self.token, age=self.age_policy):
if isinstance(child, AFF4Stream):
fd = StringIO.StringIO(child.read())
fd.dirty = False
fd.chunk = chunk_names[child.urn]
self.chunk_cache.Put(fd.chunk, fd)
def _WriteChunk(self, chunk):
if chunk.dirty:
chunk_name = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk.chunk)
with FACTORY.Create(
chunk_name, self.STREAM_TYPE, mode="rw", token=self.token) as fd:
fd.write(chunk.getvalue())
def _GetChunkForWriting(self, chunk):
"""Opens a chunk for writing, creating a new one if it doesn't exist yet."""
try:
chunk = self.chunk_cache.Get(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
try:
chunk = self._ReadChunk(chunk)
chunk.dirty = True
return chunk
except KeyError:
pass
fd = StringIO.StringIO()
fd.chunk = chunk
fd.dirty = True
self.chunk_cache.Put(chunk, fd)
return fd
def _GetChunkForReading(self, chunk):
"""Returns the relevant chunk from the datastore and reads ahead."""
try:
return self.chunk_cache.Get(chunk)
except KeyError:
pass
# We don't have this chunk already cached. The most common read
# access pattern is contiguous reading so since we have to go to
# the data store already, we read ahead to reduce round trips.
missing_chunks = []
for chunk_number in range(chunk, chunk + self.LOOK_AHEAD):
if chunk_number not in self.chunk_cache:
missing_chunks.append(chunk_number)
self._ReadChunks(missing_chunks)
# This should work now - otherwise we just give up.
try:
return self.chunk_cache.Get(chunk)
except KeyError:
raise ChunkNotFoundError("Cannot open chunk %s" % chunk)
def _ReadPartial(self, length):
"""Read as much as possible, but not more than length."""
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
available_to_read = min(length, self.chunksize - chunk_offset)
retries = 0
while retries < self.NUM_RETRIES:
fd = self._GetChunkForReading(chunk)
if fd:
break
# Arriving here means we know about blobs that cannot be found in the db.
# The most likely reason is that they have not been synced yet so we
# retry a couple of times just in case they come in eventually.
logging.warning("Chunk not found.")
time.sleep(1)
retries += 1
if retries >= self.NUM_RETRIES:
raise IOError("Chunk not found for reading.")
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result
def Read(self, length):
"""Read a block of data from the file."""
result = ""
# The total available size in the file
length = int(length)
length = min(length, self.size - self.offset)
while length > 0:
data = self._ReadPartial(length)
if not data:
break
length -= len(data)
result += data
return result
def _WritePartial(self, data):
"""Writes at most one chunk of data."""
chunk = self.offset / self.chunksize
chunk_offset = self.offset % self.chunksize
data = utils.SmartStr(data)
available_to_write = min(len(data), self.chunksize - chunk_offset)
fd = self._GetChunkForWriting(chunk)
fd.seek(chunk_offset)
fd.write(data[:available_to_write])
self.offset += available_to_write
return data[available_to_write:]
def Write(self, data):
self._dirty = True
if isinstance(data, unicode):
raise IOError("Cannot write unencoded string.")
while data:
data = self._WritePartial(data)
self.size = max(self.size, self.offset)
self.content_last = rdfvalue.RDFDatetime.Now()
def Flush(self, sync=True):
"""Sync the chunk cache to storage."""
if self._dirty:
self.Set(self.Schema.SIZE(self.size))
if self.content_last is not None:
self.Set(self.Schema.CONTENT_LAST, self.content_last)
# Flushing the cache will write all chunks to the blob store.
self.chunk_cache.Flush()
super(AFF4ImageBase, self).Flush(sync=sync)
def Close(self, sync=True):
"""This method is called to sync our data into storage.
Args:
sync: Should flushing be synchronous.
"""
self.Flush(sync=sync)
def GetContentAge(self):
# TODO(user): make CONTENT_LAST reliable. For some reason, sometimes
# CONTENT_LAST gets set even though file's data is not downloaded from the
# client.
return self.content_last
def __getstate__(self):
# We can't pickle the callback.
if "chunk_cache" in self.__dict__:
self.chunk_cache.Flush()
res = self.__dict__.copy()
del res["chunk_cache"]
return res
return self.__dict__
def __setstate__(self, state):
self.__dict__ = state
self.chunk_cache = ChunkCache(self._WriteChunk, 100)
class AFF4Image(AFF4ImageBase):
"""An AFF4 Image containing a versioned stream."""
STREAM_TYPE = AFF4MemoryStream
class AFF4UnversionedImage(AFF4ImageBase):
"""An AFF4 Image containing an unversioned stream."""
STREAM_TYPE = AFF4UnversionedMemoryStream
# Utility functions
class AFF4InitHook(registry.InitHook):
pre = ["ACLInit", "DataStoreInit"]
def Run(self):
"""Delayed loading of aff4 plugins to break import cycles."""
# pylint: disable=unused-variable,global-statement,g-import-not-at-top
from grr.lib import aff4_objects
global FACTORY
FACTORY = Factory() # pylint: disable=g-bad-name
# pylint: enable=unused-variable,global-statement,g-import-not-at-top
stats.STATS.RegisterCounterMetric("aff4_cache_hits")
stats.STATS.RegisterCounterMetric("aff4_cache_misses")
class AFF4Filter(object):
"""A simple filtering system to be used with Query()."""
__metaclass__ = registry.MetaclassRegistry
def __init__(self, *args):
self.args = args
@abc.abstractmethod
def FilterOne(self, fd):
"""Filter a single aff4 object."""
def Filter(self, subjects):
"""A generator which filters the subjects.
Args:
subjects: An iterator of aff4 objects.
Yields:
The Objects which pass the filter.
"""
for subject in subjects:
if self.FilterOne(subject):
yield subject
@classmethod
def GetFilter(cls, filter_name):
return cls.classes[filter_name]
# A global registry of all AFF4 classes
FACTORY = None
ROOT_URN = rdfvalue.RDFURN("aff4:/")
def issubclass(obj, cls): # pylint: disable=redefined-builtin,g-bad-name
"""A sane implementation of issubclass.
See http://bugs.python.org/issue10569
Python bare issubclass must be protected by an isinstance test first since it
can only work on types and raises when provided something which is not a type.
Args:
obj: Any object or class.
cls: The class to check against.
Returns:
True if obj is a subclass of cls and False otherwise.
"""
return isinstance(obj, type) and __builtin__.issubclass(obj, cls)
def AuditLogBase():
return ROOT_URN.Add("audit").Add("logs")
def CurrentAuditLog():
"""Get the rdfurn of the current audit log."""
now_sec = rdfvalue.RDFDatetime.Now().AsSecondsFromEpoch()
rollover = config_lib.CONFIG["Logging.aff4_audit_log_rollover"]
# This gives us a filename that only changes every
# Logging.aff4_audit_log_rollover seconds, but is still a valid timestamp.
current_log = (now_sec // rollover) * rollover
return AuditLogBase().Add(str(current_log))
| apache-2.0 | -3,943,746,568,183,437,300 | 31.335811 | 80 | 0.645377 | false |
eirmag/weboob | modules/sachsen/browser.py | 1 | 1640 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2012 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser
from .pages import ListPage, HistoryPage
__all__ = ['SachsenBrowser']
class SachsenBrowser(BaseBrowser):
DOMAIN = u'www.umwelt.sachsen.de'
ENCODING = None
PAGES = {'.*inhalt_re.html.*': ListPage,
'.*hwz/MP/.*': HistoryPage
}
def __init__(self, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('/de/wu/umwelt/lfug/lfug-internet/hwz/inhalt_re.html')
def get_rivers_list(self):
if not self.is_on_page(ListPage):
self.location('/de/wu/umwelt/lfug/lfug-internet/hwz/inhalt_re.html')
return self.page.get_rivers_list()
def iter_history(self, sensor):
self.location('/de/wu/umwelt/lfug/lfug-internet/hwz/MP/%d/index.html' % int(sensor.gaugeid))
return self.page.iter_history(sensor)
| agpl-3.0 | 2,830,914,907,962,726,400 | 31.8 | 100 | 0.686585 | false |
CranleighAD/isams-tools | settings_example.py | 1 | 2947 | # enable or disable the whole program
ENABLED = True
# if we're in testing mode, output more debug and allow testers to add their own email
DEBUG = True
# used with above, you can check the output of emails that would have been sent
SEND_EMAILS = True
# iSAMS Batch API key
API_KEY = "11D497FF-A7D9-4646-A6B8-D9D1B8718FAC"
# iSAMS URL
URL = 'https://isams.school.com'
# Choose which connection method from: JSON, XML, MSSQL
CONNECTION_METHOD = 'JSON'
# Database settings
DATABASE = ''
DATABASE_SERVER = ''
DATABASE_USER = ''
DATABASE_PASSWORD = ''
# specify your own dates to use when testing, e.g. a date that has already had the register taken for
DEBUG_START_DATE = '2016-09-18'
DEBUG_END_DATE = '2016-09-19'
# allows you to specify a file with XML or JSON content to test with rather tha using live data
DEBUG_DATA = 'test_data.xml'
# outgoing SMTP details
EMAIL = {
'server': 'smtp.example.com',
'port': 465,
'username': '[email protected]',
'password': 'p455w0rd',
'subject': 'Register not completed',
'from': '[email protected]',
'to': '[email protected]',
'cc': '[email protected]',
'bcc': '[email protected]'
}
# whether to log into the SMTP server
EMAIL_LOGIN = True
# whether to create an SSL connection or not
EMAIL_SSL = True
# Default: Monday - Friday, 0 = Mon, 6 = Sun
WORKING_DAYS = (0, 1, 2, 3, 4)
# weekdays which are not school days
# for help generating these:
# import pandas
# pandas.bdate_range('2016-12-15', '2017-01-07')
HOLIDAYS = (
# Winter break
'2016-12-15', '2016-12-16', '2016-12-19', '2016-12-20',
'2016-12-21', '2016-12-22', '2016-12-23', '2016-12-26',
'2016-12-27', '2016-12-28', '2016-12-29', '2016-12-30',
'2017-01-02', '2017-01-03', '2017-01-04', '2017-01-05',
'2017-01-06',
)
# email templates
FIRST_EMAIL = """
Dear Teacher,
This is a friendly reminder to complete your register. One or more of your students has not yet been registered.
If you are having problems completing it, please email XXX
If this message is in error, please forward to the helpdesk.
Regards,
iSAMS Bot
"""
SECOND_EMAIL = """
Dear Teacher,
There are still one or more of your students has not yet been registered.
If you are having problems completing it, please email XXX
If this message is in error, please forward to the helpdesk.
Regards,
iSAMS Bot
"""
# You can use %list_of_missing_registers% for a list in the template
FINAL_EMAIL = """
Here is a list of forms that still are oustanding:
%list_of_missing_registers%
Regards,
iSAMS Bot
"""
# separate with commas if you want more than one recipient
FINAL_EMAIL_TO = "[email protected]"
#######################
# Data Check Settings #
#######################
DATA_CHECK_ENABED = True
# who to email when it fails
DATA_CHECK_FAIL_EMAIL = "[email protected]"
# list of subjects to ignore from checks in single quotes
DATA_CHECK_IGNORE_SUBJECTS = ["Games", "Physical Education"] | gpl-3.0 | 5,770,833,458,472,531,000 | 23.773109 | 112 | 0.687479 | false |
deiv/plasmate-pkg | plasmate/templates/mainRunner.py | 1 | 3638 | # -*- coding: iso-8859-1 -*-
#
# Author: $AUTHOR <$EMAIL>
# Date: $DATE
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation; either version 2, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU Library General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Import essential modules
from PyQt4.QtCore import *
from PyQt4 import uic
from PyKDE4.kdeui import *
from PyKDE4.kdecore import *
from PyKDE4.plasma import *
from PyKDE4 import plasmascript
class $RUNNER_NAME(plasmascript.Runner):
# Constructor, forward initialization to its superclass
# Note: try to NOT modify this constructor; all the setup code
# should be placed in the init method.
def __init__(self,parent,args=None):
plasmascript.Runner.__init__(self,parent)
self.actions = []
# init method
# Put here all the code needed to initialize your runner
def init(self):
# Simple runner example
#print '### init'
#self.reloadConfiguration()
#ign = Plasma.RunnerContext.Types(Plasma.RunnerContext.Directory |
#Plasma.RunnerContext.File | \
#Plasma.RunnerContext.NetworkLocation)
#self.setIgnoredTypes(ign)
#description = i18n("Python Test Runner. Responds Hello World! to hello (= :q:)");
#self.addSyntax(Plasma.RunnerSyntax(":q:", description))
#self.setSpeed(Plasma.AbstractRunner.NormalSpeed)
#self.setPriority(Plasma.AbstractRunner.LowestPriority)
#self.setHasRunOptions(True)
# match method
# Assign an action for a defined match
def match(self, search):
#print '### match:',
#term = search.query()
#print term
#if term.toLower() == 'hello':
# match = Plasma.QueryMatch(self.runner)
# match.setType(Plasma.QueryMatch.ExactMatch)
# match.setIcon(KIcon('text-x-python'))
# match.setText(i18n('Hello World!'))
# match.setData(self.url)
# search.addMatch(term, match)
# run method
# Runs the runner
def run(self, search, action):
#print '### run:',
#data = action.data().toString()
#print data
#KToolInvocation.invokeBrowser(data)
# prepare method
# Sets up the runner
def prepare(self):
#print '### prepare',
#lock = QMutexLocker(self.bigLock())
#print 'in BIG lock'
# teardown method
#
def teardown(self):
print '### teardown'
# createRunOption method
# Loads an UI to configure the runner options
def createRunOptions(self, parent):
#print '### createRunOptions', parent
#uic.loadUi(self.package().filePath('ui', 'config.ui'), parent)
# reloadConfiguration method
#
def reloadConfiguration(self):
#print '### reloadConfiguration: ',
#self.url = unicode(self.config().readEntry('url', 'http://www.kde.org').toString())
#print self.url
# CreateRunner method
# Note: do NOT modify it, needed by Plasma
def CreateRunner(parent):
return $RUNNER_NAME(parent)
| gpl-2.0 | 202,224,936,770,810,270 | 33 | 92 | 0.646509 | false |
cliixtech/bigorna | tests/tasks/test_sched.py | 1 | 1892 | from unittest import TestCase
from unittest.mock import create_autospec
from nose.tools import istest
from bigorna.commons import Event
from bigorna.tasks import TaskScheduler, TaskDefinition, task_status_changed_evt
from bigorna.tasks.executor import Executor
from bigorna.commons import Config
class TaskSchedulerTest(TestCase):
def setUp(self):
self.config_mock = create_autospec(Config)
self.config_mock.concurrent_tasks = 1
self.executor_mock = create_autospec(Executor)
self.task_def = TaskDefinition("ls -la", '.')
self.scheduler = TaskScheduler(self.config_mock, self.executor_mock)
@istest
def submit_calls_executor_and_submit_if_no_tasks(self):
self.executor_mock.running_tasks_counter = 0
self.scheduler.submit(self.task_def)
self.executor_mock.submit.assert_called_once_with(self.task_def)
@istest
def submit_dont_submit_to_executor_if_too_many_tasks(self):
self.executor_mock.running_tasks_counter = 1
self.scheduler.submit(self.task_def)
self.executor_mock.submit.assert_not_called()
@istest
def handle_event_and_submit_to_executor(self):
self.executor_mock.running_tasks_counter = 1
self.scheduler.submit(self.task_def)
other_task = TaskDefinition("ls -la", None)
self.scheduler.submit(other_task)
self.executor_mock.running_tasks_counter = 0
self.scheduler._event_handler(Event(task_status_changed_evt, None))
self.executor_mock.submit.assert_called_once_with(self.task_def)
self.assertEqual(self.scheduler.pending_tasks_counter, 1)
@istest
def handle_event_and_not_pending_tasks(self):
self.executor_mock.running_tasks_counter = 0
self.scheduler._event_handler(Event(task_status_changed_evt, None))
self.executor_mock.submit.assert_not_called()
| gpl-3.0 | 3,351,970,099,618,253,000 | 32.192982 | 80 | 0.705603 | false |
Runscope/pysaml2 | example/idp2/idp.py | 1 | 35623 | #!/usr/bin/env python
import argparse
import base64
import importlib
import logging
import os
import re
import socket
import time
from Cookie import SimpleCookie
from hashlib import sha1
from urlparse import parse_qs
from saml2 import BINDING_HTTP_ARTIFACT
from saml2 import BINDING_URI
from saml2 import BINDING_PAOS
from saml2 import BINDING_SOAP
from saml2 import BINDING_HTTP_REDIRECT
from saml2 import BINDING_HTTP_POST
from saml2 import server
from saml2 import time_util
from saml2.authn_context import AuthnBroker
from saml2.authn_context import PASSWORD
from saml2.authn_context import UNSPECIFIED
from saml2.authn_context import authn_context_class_ref
from saml2.httputil import Response
from saml2.httputil import NotFound
from saml2.httputil import geturl
from saml2.httputil import get_post
from saml2.httputil import Redirect
from saml2.httputil import Unauthorized
from saml2.httputil import BadRequest
from saml2.httputil import ServiceError
from saml2.ident import Unknown
from saml2.metadata import create_metadata_string
from saml2.profile import ecp
from saml2.s_utils import rndstr
from saml2.s_utils import exception_trace
from saml2.s_utils import UnknownPrincipal
from saml2.s_utils import UnsupportedBinding
from saml2.s_utils import PolicyError
from saml2.sigver import verify_redirect_signature
from saml2.sigver import encrypt_cert_from_item
from idp_user import USERS
from idp_user import EXTRA
from mako.lookup import TemplateLookup
logger = logging.getLogger("saml2.idp")
logger.setLevel(logging.WARNING)
class Cache(object):
def __init__(self):
self.user2uid = {}
self.uid2user = {}
def _expiration(timeout, tformat="%a, %d-%b-%Y %H:%M:%S GMT"):
"""
:param timeout:
:param tformat:
:return:
"""
if timeout == "now":
return time_util.instant(tformat)
elif timeout == "dawn":
return time.strftime(tformat, time.gmtime(0))
else:
# validity time should match lifetime of assertions
return time_util.in_a_while(minutes=timeout, format=tformat)
# -----------------------------------------------------------------------------
def dict2list_of_tuples(d):
return [(k, v) for k, v in d.items()]
# -----------------------------------------------------------------------------
class Service(object):
def __init__(self, environ, start_response, user=None):
self.environ = environ
logger.debug("ENVIRON: %s" % environ)
self.start_response = start_response
self.user = user
def unpack_redirect(self):
if "QUERY_STRING" in self.environ:
_qs = self.environ["QUERY_STRING"]
return dict([(k, v[0]) for k, v in parse_qs(_qs).items()])
else:
return None
def unpack_post(self):
_dict = parse_qs(get_post(self.environ))
logger.debug("unpack_post:: %s" % _dict)
try:
return dict([(k, v[0]) for k, v in _dict.items()])
except Exception:
return None
def unpack_soap(self):
try:
query = get_post(self.environ)
return {"SAMLRequest": query, "RelayState": ""}
except Exception:
return None
def unpack_either(self):
if self.environ["REQUEST_METHOD"] == "GET":
_dict = self.unpack_redirect()
elif self.environ["REQUEST_METHOD"] == "POST":
_dict = self.unpack_post()
else:
_dict = None
logger.debug("_dict: %s" % _dict)
return _dict
def operation(self, saml_msg, binding):
logger.debug("_operation: %s" % saml_msg)
if not saml_msg or not 'SAMLRequest' in saml_msg:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
else:
try:
_encrypt_cert = encrypt_cert_from_item(
saml_msg["req_info"].message)
return self.do(saml_msg["SAMLRequest"], binding,
saml_msg["RelayState"],
encrypt_cert=_encrypt_cert)
except KeyError:
# Can live with no relay state
return self.do(saml_msg["SAMLRequest"], binding)
def artifact_operation(self, saml_msg):
if not saml_msg:
resp = BadRequest("Missing query")
return resp(self.environ, self.start_response)
else:
# exchange artifact for request
request = IDP.artifact2message(saml_msg["SAMLart"], "spsso")
try:
return self.do(request, BINDING_HTTP_ARTIFACT,
saml_msg["RelayState"])
except KeyError:
return self.do(request, BINDING_HTTP_ARTIFACT)
def response(self, binding, http_args):
resp = None
if binding == BINDING_HTTP_ARTIFACT:
resp = Redirect()
elif http_args["data"]:
resp = Response(http_args["data"], headers=http_args["headers"])
else:
for header in http_args["headers"]:
if header[0] == "Location":
resp = Redirect(header[1])
if not resp:
resp = ServiceError("Don't know how to return response")
return resp(self.environ, self.start_response)
def do(self, query, binding, relay_state="", encrypt_cert=None):
pass
def redirect(self):
""" Expects a HTTP-redirect request """
_dict = self.unpack_redirect()
return self.operation(_dict, BINDING_HTTP_REDIRECT)
def post(self):
""" Expects a HTTP-POST request """
_dict = self.unpack_post()
return self.operation(_dict, BINDING_HTTP_POST)
def artifact(self):
# Can be either by HTTP_Redirect or HTTP_POST
_dict = self.unpack_either()
return self.artifact_operation(_dict)
def soap(self):
"""
Single log out using HTTP_SOAP binding
"""
logger.debug("- SOAP -")
_dict = self.unpack_soap()
logger.debug("_dict: %s" % _dict)
return self.operation(_dict, BINDING_SOAP)
def uri(self):
_dict = self.unpack_either()
return self.operation(_dict, BINDING_SOAP)
def not_authn(self, key, requested_authn_context):
ruri = geturl(self.environ, query=False)
return do_authentication(self.environ, self.start_response,
authn_context=requested_authn_context,
key=key, redirect_uri=ruri)
# -----------------------------------------------------------------------------
REPOZE_ID_EQUIVALENT = "uid"
FORM_SPEC = """<form name="myform" method="post" action="%s">
<input type="hidden" name="SAMLResponse" value="%s" />
<input type="hidden" name="RelayState" value="%s" />
</form>"""
# -----------------------------------------------------------------------------
# === Single log in ====
# -----------------------------------------------------------------------------
class AuthenticationNeeded(Exception):
def __init__(self, authn_context=None, *args, **kwargs):
Exception.__init__(*args, **kwargs)
self.authn_context = authn_context
class SSO(Service):
def __init__(self, environ, start_response, user=None):
Service.__init__(self, environ, start_response, user)
self.binding = ""
self.response_bindings = None
self.resp_args = {}
self.binding_out = None
self.destination = None
self.req_info = None
self.op_type = ""
def verify_request(self, query, binding):
"""
:param query: The SAML query, transport encoded
:param binding: Which binding the query came in over
"""
resp_args = {}
if not query:
logger.info("Missing QUERY")
resp = Unauthorized('Unknown user')
return resp_args, resp(self.environ, self.start_response)
if not self.req_info:
self.req_info = IDP.parse_authn_request(query, binding)
logger.info("parsed OK")
_authn_req = self.req_info.message
logger.debug("%s" % _authn_req)
try:
self.binding_out, self.destination = IDP.pick_binding(
"assertion_consumer_service",
bindings=self.response_bindings,
entity_id=_authn_req.issuer.text, request=_authn_req)
except Exception as err:
logger.error("Couldn't find receiver endpoint: %s" % err)
raise
logger.debug("Binding: %s, destination: %s" % (self.binding_out,
self.destination))
resp_args = {}
try:
resp_args = IDP.response_args(_authn_req)
_resp = None
except UnknownPrincipal as excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
except UnsupportedBinding as excp:
_resp = IDP.create_error_response(_authn_req.id,
self.destination, excp)
return resp_args, _resp
def do(self, query, binding_in, relay_state="", encrypt_cert=None):
"""
:param query: The request
:param binding_in: Which binding was used when receiving the query
:param relay_state: The relay state provided by the SP
:param encrypt_cert: Cert to use for encryption
:return: A response
"""
try:
resp_args, _resp = self.verify_request(query, binding_in)
except UnknownPrincipal as excp:
logger.error("UnknownPrincipal: %s" % (excp,))
resp = ServiceError("UnknownPrincipal: %s" % (excp,))
return resp(self.environ, self.start_response)
except UnsupportedBinding as excp:
logger.error("UnsupportedBinding: %s" % (excp,))
resp = ServiceError("UnsupportedBinding: %s" % (excp,))
return resp(self.environ, self.start_response)
if not _resp:
identity = USERS[self.user].copy()
#identity["eduPersonTargetedID"] = get_eptid(IDP, query, session)
logger.info("Identity: %s" % (identity,))
if REPOZE_ID_EQUIVALENT:
identity[REPOZE_ID_EQUIVALENT] = self.user
try:
try:
metod = self.environ["idp.authn"]
except KeyError:
pass
else:
resp_args["authn"] = metod
_resp = IDP.create_authn_response(
identity, userid=self.user,
encrypt_cert=encrypt_cert,
**resp_args)
except Exception as excp:
logging.error(exception_trace(excp))
resp = ServiceError("Exception: %s" % (excp,))
return resp(self.environ, self.start_response)
logger.info("AuthNResponse: %s" % _resp)
if self.op_type == "ecp":
kwargs = {"soap_headers": [
ecp.Response(
assertion_consumer_service_url=self.destination)]}
else:
kwargs = {}
http_args = IDP.apply_binding(self.binding_out,
"%s" % _resp, self.destination,
relay_state, response=True, **kwargs)
logger.debug("HTTPargs: %s" % http_args)
return self.response(self.binding_out, http_args)
def _store_request(self, saml_msg):
logger.debug("_store_request: %s" % saml_msg)
key = sha1(saml_msg["SAMLRequest"]).hexdigest()
# store the AuthnRequest
IDP.ticket[key] = saml_msg
return key
def redirect(self):
""" This is the HTTP-redirect endpoint """
logger.info("--- In SSO Redirect ---")
saml_msg = self.unpack_redirect()
try:
_key = saml_msg["key"]
saml_msg = IDP.ticket[_key]
self.req_info = saml_msg["req_info"]
del IDP.ticket[_key]
except KeyError:
try:
self.req_info = IDP.parse_authn_request(saml_msg["SAMLRequest"],
BINDING_HTTP_REDIRECT)
except KeyError:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
_req = self.req_info.message
if "SigAlg" in saml_msg and "Signature" in saml_msg: # Signed request
issuer = _req.issuer.text
_certs = IDP.metadata.certs(issuer, "any", "signing")
verified_ok = False
for cert in _certs:
if verify_redirect_signature(saml_msg, cert):
verified_ok = True
break
if not verified_ok:
resp = BadRequest("Message signature verification failure")
return resp(self.environ, self.start_response)
if self.user:
if _req.force_authn:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_REDIRECT)
def post(self):
"""
The HTTP-Post endpoint
"""
logger.info("--- In SSO POST ---")
saml_msg = self.unpack_either()
self.req_info = IDP.parse_authn_request(
saml_msg["SAMLRequest"], BINDING_HTTP_POST)
_req = self.req_info.message
if self.user:
if _req.force_authn:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
else:
return self.operation(saml_msg, BINDING_HTTP_POST)
else:
saml_msg["req_info"] = self.req_info
key = self._store_request(saml_msg)
return self.not_authn(key, _req.requested_authn_context)
# def artifact(self):
# # Can be either by HTTP_Redirect or HTTP_POST
# _req = self._store_request(self.unpack_either())
# if isinstance(_req, basestring):
# return self.not_authn(_req)
# return self.artifact_operation(_req)
def ecp(self):
# The ECP interface
logger.info("--- ECP SSO ---")
resp = None
try:
authz_info = self.environ["HTTP_AUTHORIZATION"]
if authz_info.startswith("Basic "):
_info = base64.b64decode(authz_info[6:])
logger.debug("Authz_info: %s" % _info)
try:
(user, passwd) = _info.split(":")
if PASSWD[user] != passwd:
resp = Unauthorized()
self.user = user
self.environ[
"idp.authn"] = AUTHN_BROKER.get_authn_by_accr(
PASSWORD)
except ValueError:
resp = Unauthorized()
else:
resp = Unauthorized()
except KeyError:
resp = Unauthorized()
if resp:
return resp(self.environ, self.start_response)
_dict = self.unpack_soap()
self.response_bindings = [BINDING_PAOS]
# Basic auth ?!
self.op_type = "ecp"
return self.operation(_dict, BINDING_SOAP)
# -----------------------------------------------------------------------------
# === Authentication ====
# -----------------------------------------------------------------------------
def do_authentication(environ, start_response, authn_context, key,
redirect_uri):
"""
Display the login form
"""
logger.debug("Do authentication")
auth_info = AUTHN_BROKER.pick(authn_context)
if len(auth_info):
method, reference = auth_info[0]
logger.debug("Authn chosen: %s (ref=%s)" % (method, reference))
return method(environ, start_response, reference, key, redirect_uri)
else:
resp = Unauthorized("No usable authentication method")
return resp(environ, start_response)
# -----------------------------------------------------------------------------
PASSWD = {
"daev0001": "qwerty",
"haho0032": "qwerty",
"roland": "dianakra",
"babs": "howes",
"upper": "crust"}
def username_password_authn(environ, start_response, reference, key,
redirect_uri):
"""
Display the login form
"""
logger.info("The login page")
headers = []
resp = Response(mako_template="login.mako", template_lookup=LOOKUP,
headers=headers)
argv = {
"action": "/verify",
"login": "",
"password": "",
"key": key,
"authn_reference": reference,
"redirect_uri": redirect_uri
}
logger.info("do_authentication argv: %s" % argv)
return resp(environ, start_response, **argv)
def verify_username_and_password(dic):
global PASSWD
# verify username and password
if PASSWD[dic["login"][0]] == dic["password"][0]:
return True, dic["login"][0]
else:
return False, ""
def do_verify(environ, start_response, _):
query = parse_qs(get_post(environ))
logger.debug("do_verify: %s" % query)
try:
_ok, user = verify_username_and_password(query)
except KeyError:
_ok = False
user = None
if not _ok:
resp = Unauthorized("Unknown user or wrong password")
else:
uid = rndstr(24)
IDP.cache.uid2user[uid] = user
IDP.cache.user2uid[user] = uid
logger.debug("Register %s under '%s'" % (user, uid))
kaka = set_cookie("idpauthn", "/", uid, query["authn_reference"][0])
lox = "%s?id=%s&key=%s" % (query["redirect_uri"][0], uid,
query["key"][0])
logger.debug("Redirect => %s" % lox)
resp = Redirect(lox, headers=[kaka], content="text/html")
return resp(environ, start_response)
def not_found(environ, start_response):
"""Called if no URL matches."""
resp = NotFound()
return resp(environ, start_response)
# -----------------------------------------------------------------------------
# === Single log out ===
# -----------------------------------------------------------------------------
#def _subject_sp_info(req_info):
# # look for the subject
# subject = req_info.subject_id()
# subject = subject.text.strip()
# sp_entity_id = req_info.message.issuer.text.strip()
# return subject, sp_entity_id
class SLO(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Single Log Out Service ---")
try:
_, body = request.split("\n")
logger.debug("req: '%s'" % body)
req_info = IDP.parse_logout_request(body, binding)
except Exception as exc:
logger.error("Bad request: %s" % exc)
resp = BadRequest("%s" % exc)
return resp(self.environ, self.start_response)
msg = req_info.message
if msg.name_id:
lid = IDP.ident.find_local_id(msg.name_id)
logger.info("local identifier: %s" % lid)
if lid in IDP.cache.user2uid:
uid = IDP.cache.user2uid[lid]
if uid in IDP.cache.uid2user:
del IDP.cache.uid2user[uid]
del IDP.cache.user2uid[lid]
# remove the authentication
try:
IDP.session_db.remove_authn_statements(msg.name_id)
except KeyError as exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
resp = IDP.create_logout_response(msg, [binding])
try:
hinfo = IDP.apply_binding(binding, "%s" % resp, "", relay_state)
except Exception as exc:
logger.error("ServiceError: %s" % exc)
resp = ServiceError("%s" % exc)
return resp(self.environ, self.start_response)
#_tlh = dict2list_of_tuples(hinfo["headers"])
delco = delete_cookie(self.environ, "idpauthn")
if delco:
hinfo["headers"].append(delco)
logger.info("Header: %s" % (hinfo["headers"],))
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Manage Name ID service
# ----------------------------------------------------------------------------
class NMI(Service):
def do(self, query, binding, relay_state="", encrypt_cert=None):
logger.info("--- Manage Name ID Service ---")
req = IDP.parse_manage_name_id_request(query, binding)
request = req.message
# Do the necessary stuff
name_id = IDP.ident.handle_manage_name_id_request(
request.name_id, request.new_id, request.new_encrypted_id,
request.terminate)
logger.debug("New NameID: %s" % name_id)
_resp = IDP.create_manage_name_id_response(request)
# It's using SOAP binding
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "",
relay_state, response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Assertion ID request ===
# ----------------------------------------------------------------------------
# Only URI binding
class AIDR(Service):
def do(self, aid, binding, relay_state="", encrypt_cert=None):
logger.info("--- Assertion ID Service ---")
try:
assertion = IDP.create_assertion_id_request_response(aid)
except Unknown:
resp = NotFound(aid)
return resp(self.environ, self.start_response)
hinfo = IDP.apply_binding(BINDING_URI, "%s" % assertion, response=True)
logger.debug("HINFO: %s" % hinfo)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
def operation(self, _dict, binding, **kwargs):
logger.debug("_operation: %s" % _dict)
if not _dict or "ID" not in _dict:
resp = BadRequest('Error parsing request or no request')
return resp(self.environ, self.start_response)
return self.do(_dict["ID"], binding, **kwargs)
# ----------------------------------------------------------------------------
# === Artifact resolve service ===
# ----------------------------------------------------------------------------
class ARS(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
_req = IDP.parse_artifact_resolve(request, binding)
msg = IDP.create_artifact_response(_req, _req.artifact.text)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Authn query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class AQS(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Authn Query Service ---")
_req = IDP.parse_authn_query(request, binding)
_query = _req.message
msg = IDP.create_authn_query_response(_query.subject,
_query.requested_authn_context,
_query.session_index)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# === Attribute query service ===
# ----------------------------------------------------------------------------
# Only SOAP binding
class ATTR(Service):
def do(self, request, binding, relay_state="", encrypt_cert=None):
logger.info("--- Attribute Query Service ---")
_req = IDP.parse_attribute_query(request, binding)
_query = _req.message
name_id = _query.subject.name_id
uid = name_id.text
logger.debug("Local uid: %s" % uid)
identity = EXTRA[uid]
# Comes in over SOAP so only need to construct the response
args = IDP.response_args(_query, [BINDING_SOAP])
msg = IDP.create_attribute_response(identity,
name_id=name_id, **args)
logger.debug("response: %s" % msg)
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % msg, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Name ID Mapping service
# When an entity that shares an identifier for a principal with an identity
# provider wishes to obtain a name identifier for the same principal in a
# particular format or federation namespace, it can send a request to
# the identity provider using this protocol.
# ----------------------------------------------------------------------------
class NIM(Service):
def do(self, query, binding, relay_state="", encrypt_cert=None):
req = IDP.parse_name_id_mapping_request(query, binding)
request = req.message
# Do the necessary stuff
try:
name_id = IDP.ident.handle_name_id_mapping_request(
request.name_id, request.name_id_policy)
except Unknown:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
except PolicyError:
resp = BadRequest("Unknown entity")
return resp(self.environ, self.start_response)
info = IDP.response_args(request)
_resp = IDP.create_name_id_mapping_response(name_id, **info)
# Only SOAP
hinfo = IDP.apply_binding(BINDING_SOAP, "%s" % _resp, "", "",
response=True)
resp = Response(hinfo["data"], headers=hinfo["headers"])
return resp(self.environ, self.start_response)
# ----------------------------------------------------------------------------
# Cookie handling
# ----------------------------------------------------------------------------
def info_from_cookie(kaka):
logger.debug("KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get("idpauthn", None)
if morsel:
try:
key, ref = base64.b64decode(morsel.value).split(":")
return IDP.cache.uid2user[key], ref
except KeyError:
return None, None
else:
logger.debug("No idpauthn cookie")
return None, None
def delete_cookie(environ, name):
kaka = environ.get("HTTP_COOKIE", '')
logger.debug("delete KAKA: %s" % kaka)
if kaka:
cookie_obj = SimpleCookie(kaka)
morsel = cookie_obj.get(name, None)
cookie = SimpleCookie()
cookie[name] = ""
cookie[name]['path'] = "/"
logger.debug("Expire: %s" % morsel)
cookie[name]["expires"] = _expiration("dawn")
return tuple(cookie.output().split(": ", 1))
return None
def set_cookie(name, _, *args):
cookie = SimpleCookie()
cookie[name] = base64.b64encode(":".join(args))
cookie[name]['path'] = "/"
cookie[name]["expires"] = _expiration(5) # 5 minutes from now
logger.debug("Cookie expires: %s" % cookie[name]["expires"])
return tuple(cookie.output().split(": ", 1))
# ----------------------------------------------------------------------------
# map urls to functions
AUTHN_URLS = [
# sso
(r'sso/post$', (SSO, "post")),
(r'sso/post/(.*)$', (SSO, "post")),
(r'sso/redirect$', (SSO, "redirect")),
(r'sso/redirect/(.*)$', (SSO, "redirect")),
(r'sso/art$', (SSO, "artifact")),
(r'sso/art/(.*)$', (SSO, "artifact")),
# slo
(r'slo/redirect$', (SLO, "redirect")),
(r'slo/redirect/(.*)$', (SLO, "redirect")),
(r'slo/post$', (SLO, "post")),
(r'slo/post/(.*)$', (SLO, "post")),
(r'slo/soap$', (SLO, "soap")),
(r'slo/soap/(.*)$', (SLO, "soap")),
#
(r'airs$', (AIDR, "uri")),
(r'ars$', (ARS, "soap")),
# mni
(r'mni/post$', (NMI, "post")),
(r'mni/post/(.*)$', (NMI, "post")),
(r'mni/redirect$', (NMI, "redirect")),
(r'mni/redirect/(.*)$', (NMI, "redirect")),
(r'mni/art$', (NMI, "artifact")),
(r'mni/art/(.*)$', (NMI, "artifact")),
(r'mni/soap$', (NMI, "soap")),
(r'mni/soap/(.*)$', (NMI, "soap")),
# nim
(r'nim$', (NIM, "soap")),
(r'nim/(.*)$', (NIM, "soap")),
#
(r'aqs$', (AQS, "soap")),
(r'attr$', (ATTR, "soap"))
]
NON_AUTHN_URLS = [
#(r'login?(.*)$', do_authentication),
(r'verify?(.*)$', do_verify),
(r'sso/ecp$', (SSO, "ecp")),
]
# ----------------------------------------------------------------------------
def metadata(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath( __file__ ))
if path[-1] != "/":
path += "/"
metadata = create_metadata_string(path+args.config, IDP.config,
args.valid, args.cert, args.keyfile,
args.id, args.name, args.sign)
start_response('200 OK', [('Content-Type', "text/xml")])
return metadata
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def staticfile(environ, start_response):
try:
path = args.path
if path is None or len(path) == 0:
path = os.path.dirname(os.path.abspath(__file__))
if path[-1] != "/":
path += "/"
path += environ.get('PATH_INFO', '').lstrip('/')
start_response('200 OK', [('Content-Type', "text/xml")])
return open(path, 'r').read()
except Exception as ex:
logger.error("An error occured while creating metadata:" + ex.message)
return not_found(environ, start_response)
def application(environ, start_response):
"""
The main WSGI application. Dispatch the current request to
the functions from above and store the regular expression
captures in the WSGI environment as `myapp.url_args` so that
the functions from above can access the url placeholders.
If nothing matches, call the `not_found` function.
:param environ: The HTTP application environment
:param start_response: The application to run when the handling of the
request is done
:return: The response as a list of lines
"""
path = environ.get('PATH_INFO', '').lstrip('/')
if path == "metadata":
return metadata(environ, start_response)
kaka = environ.get("HTTP_COOKIE", None)
logger.info("<application> PATH: %s" % path)
if kaka:
logger.info("= KAKA =")
user, authn_ref = info_from_cookie(kaka)
if authn_ref:
environ["idp.authn"] = AUTHN_BROKER[authn_ref]
else:
try:
query = parse_qs(environ["QUERY_STRING"])
logger.debug("QUERY: %s" % query)
user = IDP.cache.uid2user[query["id"][0]]
except KeyError:
user = None
url_patterns = AUTHN_URLS
if not user:
logger.info("-- No USER --")
# insert NON_AUTHN_URLS first in case there is no user
url_patterns = NON_AUTHN_URLS + url_patterns
for regex, callback in url_patterns:
match = re.search(regex, path)
if match is not None:
try:
environ['myapp.url_args'] = match.groups()[0]
except IndexError:
environ['myapp.url_args'] = path
logger.debug("Callback: %s" % (callback,))
if isinstance(callback, tuple):
cls = callback[0](environ, start_response, user)
func = getattr(cls, callback[1])
return func()
return callback(environ, start_response, user)
if re.search(r'static/.*', path) is not None:
return staticfile(environ, start_response)
return not_found(environ, start_response)
# ----------------------------------------------------------------------------
if __name__ == '__main__':
from wsgiref.simple_server import make_server
parser = argparse.ArgumentParser()
parser.add_argument('-p', dest='path', help='Path to configuration file.')
parser.add_argument('-v', dest='valid',
help="How long, in days, the metadata is valid from the time of creation")
parser.add_argument('-c', dest='cert', help='certificate')
parser.add_argument('-i', dest='id',
help="The ID of the entities descriptor")
parser.add_argument('-k', dest='keyfile',
help="A file with a key to sign the metadata with")
parser.add_argument('-n', dest='name')
parser.add_argument('-s', dest='sign', action='store_true',
help="sign the metadata")
parser.add_argument('-m', dest='mako_root', default="./")
parser.add_argument(dest="config")
args = parser.parse_args()
AUTHN_BROKER = AuthnBroker()
AUTHN_BROKER.add(authn_context_class_ref(PASSWORD),
username_password_authn, 10,
"http://%s" % socket.gethostname())
AUTHN_BROKER.add(authn_context_class_ref(UNSPECIFIED),
"", 0, "http://%s" % socket.gethostname())
CONFIG = importlib.import_module(args.config)
IDP = server.Server(args.config, cache=Cache())
IDP.ticket = {}
_rot = args.mako_root
LOOKUP = TemplateLookup(directories=[_rot + 'templates', _rot + 'htdocs'],
module_directory=_rot + 'modules',
input_encoding='utf-8', output_encoding='utf-8')
HOST = CONFIG.HOST
PORT = CONFIG.PORT
SRV = make_server(HOST, PORT, application)
print "IdP listening on %s:%s" % (HOST, PORT)
SRV.serve_forever()
| bsd-2-clause | 900,716,577,187,012,400 | 34.551896 | 98 | 0.528703 | false |
docusign/docusign-python-client | docusign_esign/apis/workspaces_api.py | 1 | 66547 | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..client.configuration import Configuration
from ..client.api_client import ApiClient
class WorkspacesApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def create_workspace(self, account_id, **kwargs):
"""
Create a Workspace
Creates a new workspace.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_workspace(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param Workspace workspace:
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_workspace_with_http_info(account_id, **kwargs)
else:
(data) = self.create_workspace_with_http_info(account_id, **kwargs)
return data
def create_workspace_with_http_info(self, account_id, **kwargs):
"""
Create a Workspace
Creates a new workspace.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_workspace_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param Workspace workspace:
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'workspace']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_workspace`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'workspace' in params:
body_params = params['workspace']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_workspace_file(self, account_id, folder_id, workspace_id, **kwargs):
"""
Creates a workspace file.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_workspace_file(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: WorkspaceItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, **kwargs)
return data
def create_workspace_file_with_http_info(self, account_id, folder_id, workspace_id, **kwargs):
"""
Creates a workspace file.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_workspace_file_with_http_info(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: WorkspaceItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'folder_id', 'workspace_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_workspace_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `create_workspace_file`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `create_workspace_file`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `create_workspace_file`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceItem',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_workspace(self, account_id, workspace_id, **kwargs):
"""
Delete Workspace
Deletes an existing workspace (logically).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_workspace(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_workspace_with_http_info(account_id, workspace_id, **kwargs)
else:
(data) = self.delete_workspace_with_http_info(account_id, workspace_id, **kwargs)
return data
def delete_workspace_with_http_info(self, account_id, workspace_id, **kwargs):
"""
Delete Workspace
Deletes an existing workspace (logically).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_workspace_with_http_info(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'workspace_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_workspace`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_workspace`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_workspace_folder_items(self, account_id, folder_id, workspace_id, **kwargs):
"""
Deletes workspace one or more specific files/folders from the given folder or root.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_workspace_folder_items(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param WorkspaceItemList workspace_item_list:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs)
return data
def delete_workspace_folder_items_with_http_info(self, account_id, folder_id, workspace_id, **kwargs):
"""
Deletes workspace one or more specific files/folders from the given folder or root.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param WorkspaceItemList workspace_item_list:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'folder_id', 'workspace_id', 'workspace_item_list']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_workspace_folder_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `delete_workspace_folder_items`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `delete_workspace_folder_items`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `delete_workspace_folder_items`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'workspace_item_list' in params:
body_params = params['workspace_item_list']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_workspace(self, account_id, workspace_id, **kwargs):
"""
Get Workspace
Retrives properties about a workspace given a unique workspaceId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_workspace(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_workspace_with_http_info(account_id, workspace_id, **kwargs)
else:
(data) = self.get_workspace_with_http_info(account_id, workspace_id, **kwargs)
return data
def get_workspace_with_http_info(self, account_id, workspace_id, **kwargs):
"""
Get Workspace
Retrives properties about a workspace given a unique workspaceId.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_workspace_with_http_info(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'workspace_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_workspace`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_workspace`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_workspace_file(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
Get Workspace File
Retrieves a workspace file (the binary).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_workspace_file(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str is_download: When set to **true**, the Content-Disposition header is set in the response. The value of the header provides the filename of the file. Default is **false**.
:param str pdf_version: When set to **true** the file returned as a PDF.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
return data
def get_workspace_file_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
Get Workspace File
Retrieves a workspace file (the binary).
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str is_download: When set to **true**, the Content-Disposition header is set in the response. The value of the header provides the filename of the file. Default is **false**.
:param str pdf_version: When set to **true** the file returned as a PDF.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id', 'is_download', 'pdf_version']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_workspace_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `get_workspace_file`")
# verify the required parameter 'file_id' is set
if ('file_id' not in params) or (params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `get_workspace_file`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `get_workspace_file`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `get_workspace_file`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'file_id' in params:
path_params['fileId'] = params['file_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'is_download' in params:
query_params['is_download'] = params['is_download']
if 'pdf_version' in params:
query_params['pdf_version'] = params['pdf_version']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_workspace_file_pages(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
List File Pages
Retrieves a workspace file as rasterized pages.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspace_file_pages(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str count: The maximum number of results to be returned by this request.
:param str dpi: Number of dots per inch for the resulting image. The default if not used is 94. The range is 1-310.
:param str max_height: Sets the maximum height (in pixels) of the returned image.
:param str max_width: Sets the maximum width (in pixels) of the returned image.
:param str start_position: The position within the total result set from which to start returning values. The value **thumbnail** may be used to return the page image.
:return: PageImages
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
return data
def list_workspace_file_pages_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
List File Pages
Retrieves a workspace file as rasterized pages.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspace_file_pages_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str count: The maximum number of results to be returned by this request.
:param str dpi: Number of dots per inch for the resulting image. The default if not used is 94. The range is 1-310.
:param str max_height: Sets the maximum height (in pixels) of the returned image.
:param str max_width: Sets the maximum width (in pixels) of the returned image.
:param str start_position: The position within the total result set from which to start returning values. The value **thumbnail** may be used to return the page image.
:return: PageImages
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id', 'count', 'dpi', 'max_height', 'max_width', 'start_position']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_workspace_file_pages" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list_workspace_file_pages`")
# verify the required parameter 'file_id' is set
if ('file_id' not in params) or (params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `list_workspace_file_pages`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `list_workspace_file_pages`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `list_workspace_file_pages`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}/pages'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'file_id' in params:
path_params['fileId'] = params['file_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'count' in params:
query_params['count'] = params['count']
if 'dpi' in params:
query_params['dpi'] = params['dpi']
if 'max_height' in params:
query_params['max_height'] = params['max_height']
if 'max_width' in params:
query_params['max_width'] = params['max_width']
if 'start_position' in params:
query_params['start_position'] = params['start_position']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageImages',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_workspace_folder_items(self, account_id, folder_id, workspace_id, **kwargs):
"""
List Workspace Folder Contents
Retrieves workspace folder contents, which can include sub folders and files.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspace_folder_items(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str count: The maximum number of results to be returned by this request.
:param str include_files: When set to **true**, file information is returned in the response along with folder information. The default is **false**.
:param str include_sub_folders: When set to **true**, information about the sub-folders of the current folder is returned. The default is **false**.
:param str include_thumbnails: When set to **true**, thumbnails are returned as part of the response. The default is **false**.
:param str include_user_detail: Set to **true** to return extended details about the user. The default is **false**.
:param str start_position: The position within the total result set from which to start returning values.
:param str workspace_user_id: If set, then the results are filtered to those associated with the specified userId.
:return: WorkspaceFolderContents
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, **kwargs)
return data
def list_workspace_folder_items_with_http_info(self, account_id, folder_id, workspace_id, **kwargs):
"""
List Workspace Folder Contents
Retrieves workspace folder contents, which can include sub folders and files.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspace_folder_items_with_http_info(account_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param str count: The maximum number of results to be returned by this request.
:param str include_files: When set to **true**, file information is returned in the response along with folder information. The default is **false**.
:param str include_sub_folders: When set to **true**, information about the sub-folders of the current folder is returned. The default is **false**.
:param str include_thumbnails: When set to **true**, thumbnails are returned as part of the response. The default is **false**.
:param str include_user_detail: Set to **true** to return extended details about the user. The default is **false**.
:param str start_position: The position within the total result set from which to start returning values.
:param str workspace_user_id: If set, then the results are filtered to those associated with the specified userId.
:return: WorkspaceFolderContents
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'folder_id', 'workspace_id', 'count', 'include_files', 'include_sub_folders', 'include_thumbnails', 'include_user_detail', 'start_position', 'workspace_user_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_workspace_folder_items" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list_workspace_folder_items`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `list_workspace_folder_items`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `list_workspace_folder_items`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
if 'count' in params:
query_params['count'] = params['count']
if 'include_files' in params:
query_params['include_files'] = params['include_files']
if 'include_sub_folders' in params:
query_params['include_sub_folders'] = params['include_sub_folders']
if 'include_thumbnails' in params:
query_params['include_thumbnails'] = params['include_thumbnails']
if 'include_user_detail' in params:
query_params['include_user_detail'] = params['include_user_detail']
if 'start_position' in params:
query_params['start_position'] = params['start_position']
if 'workspace_user_id' in params:
query_params['workspace_user_id'] = params['workspace_user_id']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceFolderContents',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_workspaces(self, account_id, **kwargs):
"""
List Workspaces
Gets information about the Workspaces that have been created.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspaces(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:return: WorkspaceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_workspaces_with_http_info(account_id, **kwargs)
else:
(data) = self.list_workspaces_with_http_info(account_id, **kwargs)
return data
def list_workspaces_with_http_info(self, account_id, **kwargs):
"""
List Workspaces
Gets information about the Workspaces that have been created.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_workspaces_with_http_info(account_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:return: WorkspaceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_workspaces" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `list_workspaces`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_workspace(self, account_id, workspace_id, **kwargs):
"""
Update Workspace
Updates information about a specific workspace.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_workspace(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param Workspace workspace:
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_workspace_with_http_info(account_id, workspace_id, **kwargs)
else:
(data) = self.update_workspace_with_http_info(account_id, workspace_id, **kwargs)
return data
def update_workspace_with_http_info(self, account_id, workspace_id, **kwargs):
"""
Update Workspace
Updates information about a specific workspace.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_workspace_with_http_info(account_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:param Workspace workspace:
:return: Workspace
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'workspace_id', 'workspace']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_workspace" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `update_workspace`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `update_workspace`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'workspace' in params:
body_params = params['workspace']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Workspace',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_workspace_file(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
Update Workspace File Metadata
Updates workspace item metadata for one or more specific files/folders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_workspace_file(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: WorkspaceItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
else:
(data) = self.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, **kwargs)
return data
def update_workspace_file_with_http_info(self, account_id, file_id, folder_id, workspace_id, **kwargs):
"""
Update Workspace File Metadata
Updates workspace item metadata for one or more specific files/folders.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_workspace_file_with_http_info(account_id, file_id, folder_id, workspace_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: The external account number (int) or account ID Guid. (required)
:param str file_id: Specifies the room file ID GUID. (required)
:param str folder_id: The ID of the folder being accessed. (required)
:param str workspace_id: Specifies the workspace ID GUID. (required)
:return: WorkspaceItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'file_id', 'folder_id', 'workspace_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_workspace_file" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if ('account_id' not in params) or (params['account_id'] is None):
raise ValueError("Missing the required parameter `account_id` when calling `update_workspace_file`")
# verify the required parameter 'file_id' is set
if ('file_id' not in params) or (params['file_id'] is None):
raise ValueError("Missing the required parameter `file_id` when calling `update_workspace_file`")
# verify the required parameter 'folder_id' is set
if ('folder_id' not in params) or (params['folder_id'] is None):
raise ValueError("Missing the required parameter `folder_id` when calling `update_workspace_file`")
# verify the required parameter 'workspace_id' is set
if ('workspace_id' not in params) or (params['workspace_id'] is None):
raise ValueError("Missing the required parameter `workspace_id` when calling `update_workspace_file`")
collection_formats = {}
resource_path = '/v2.1/accounts/{accountId}/workspaces/{workspaceId}/folders/{folderId}/files/{fileId}'.replace('{format}', 'json')
path_params = {}
if 'account_id' in params:
path_params['accountId'] = params['account_id']
if 'file_id' in params:
path_params['fileId'] = params['file_id']
if 'folder_id' in params:
path_params['folderId'] = params['folder_id']
if 'workspace_id' in params:
path_params['workspaceId'] = params['workspace_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WorkspaceItem',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| mit | -6,547,864,770,429,699,000 | 47.788123 | 197 | 0.578058 | false |
xuweiliang/Codelibrary | openstack_dashboard/dashboards/admin/access_and_security/keypairs/urls.py | 1 | 1463 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.access_and_security.keypairs \
import views
urlpatterns = [
url(r'^create/$', views.CreateView.as_view(), name='create'),
url(r'^import/$', views.ImportView.as_view(), name='import'),
url(r'^(?P<keypair_name>[^/]+)/download/$', views.DownloadView.as_view(),
name='download'),
url(r'^(?P<keypair_name>[^/]+)/generate/$', views.GenerateView.as_view(),
name='generate'),
url(r'^(?P<keypair_name>[^/]+)/(?P<optional>[^/]+)/generate/$',
views.GenerateView.as_view(), name='generate'),
url(r'^(?P<keypair_name>[^/]+)/$', views.DetailView.as_view(),
name='detail'),
]
| apache-2.0 | -5,623,635,435,713,008,000 | 39.638889 | 78 | 0.678742 | false |
FlintHill/SUAS-Competition | UpdatedImageProcessing/UpdatedImageProcessing/ShapeDetection/utils/bounding_box.py | 1 | 3445 | import cv2
import numpy
from alpha_fill import alpha_fill
class BoundingBox(object):
def __init__(self, pil_img, show_plot=False):
self.mask_img = alpha_fill(pil_img)
self.set_bounding_box()
self.set_side_lengths()
self.set_areas()
if show_plot:
self.show_plot()
def set_bounding_box(self):
_,contours,_ = cv2.findContours(self.mask_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
self.contour = contours[0]
rect = cv2.minAreaRect(self.contour)
self.box = cv2.boxPoints(rect)
#self.box = numpy.int0(box)
def set_side_lengths(self):
point1 = self.box[0]
point2 = self.box[1]
point3 = self.box[2]
diffx = numpy.abs(point1[0]-point2[0])
diffy = numpy.abs(point1[1]-point2[1])
side1 = numpy.hypot(diffx,diffy)
diffx = numpy.abs(point2[0]-point3[0])
diffy = numpy.abs(point2[1]-point3[1])
side2 = numpy.hypot(diffx,diffy)
self.bounding_box_side_lengths = (side1,side2)
"""
def set_areas(self):
self.hull = cv2.convexHull(self.contour)
self.unpacked_hull = []
for i in range(len(self.hull)):
self.unpacked_hull.append((self.hull[i][0][0],self.hull[i][0][1]))
self.hull_area = self.polygon_area(self.unpacked_hull)
self.bounding_box_area = self.bounding_box_side_lengths[0]*self.bounding_box_side_lengths[1]
"""
def set_areas(self):
epsilon = 0.001*cv2.arcLength(self.contour,True)
self.contour_approx = cv2.approxPolyDP(self.contour,epsilon,True)
self.unpacked_contour_approx = []
for i in range(len(self.contour_approx)):
self.unpacked_contour_approx.append((self.contour_approx[i][0][0],self.contour_approx[i][0][1]))
self.contour_approx_area = self.polygon_area(self.unpacked_contour_approx)
self.bounding_box_area = self.bounding_box_side_lengths[0]*self.bounding_box_side_lengths[1]
def show_plot(self):
color_img = cv2.cvtColor(self.mask_img, cv2.COLOR_GRAY2RGB)
#cv2.drawContours(color_img,self.hull,0,(0,0,255),4)
cv2.drawContours(color_img,[self.contour_approx],0,(0,0,255),1)
cv2.drawContours(color_img,[numpy.int0(self.box)],0,(0,255,0),1)
cv2.imshow('image',color_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_box(self):
return self.box
def get_side_lengths(self):
return self.bounding_box_side_lengths
def get_side_length_difference(self):
return numpy.abs(side_lengths[0]-side_lengths[1])
def get_area_difference(self):
return numpy.abs(self.contour_approx_area-self.bounding_box_area)
def polygon_area(self, corners):
n = len(corners)
cx = float(sum(x for x, y in corners)) / n
cy = float(sum(y for x, y in corners)) / n
cornersWithAngles = []
for x, y in corners:
an = (numpy.arctan2(y - cy, x - cx) + 2.0 * numpy.pi) % (2.0 * numpy.pi)
cornersWithAngles.append((x, y, an))
cornersWithAngles.sort(key = lambda tup: tup[2])
corners = map(lambda (x, y, an): (x, y), cornersWithAngles)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += corners[i][0] * corners[j][1]
area -= corners[j][0] * corners[i][1]
area = abs(area) / 2.0
return area
| mit | 5,891,784,764,528,450,000 | 33.45 | 108 | 0.598549 | false |
locuslab/dreaml | dreaml/dataframe/transform.py | 1 | 5833 | from abc import ABCMeta, abstractmethod
from threading import Thread
from time import sleep, time
class Transform(object):
__metaclass__ = ABCMeta
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
@abstractmethod
def apply(self,target_df=None):
""" Apply func(df, ...) and return the resulting dataframe
Some restrictions:
1. func must take in df as the first argument (this is to
allow for automated generation of entries when necessary)
2. func must return a dataframe
"""
pass
# return self.func(target_df,*self.args, **self.kwargs)
# def apply_init(self,target_df=None):
# if self.init_func is not None:
# self.init_func(target_df,*self.args,**self.kwargs)
def _apply_function_to(self, target, f):
val = f(target, *self.args, **self.kwargs)
if val == None:
return
i_j = target._row_query,target._col_query
# If the transform returns something, it should not be empty.
if val.shape[0] == 0 or val.shape[1] == 0:
raise ValueError
# TODO: Remove this code, and require the user to specify the reshaping
# in the init function?
# If the target is empty, nothing depends on it yet and we can treat it
# as a new partition
# TODO: If the target is a matrix or an integer, we should handle that
# as well.
# If the target is empty, then we can just set the value.
if target.empty():
target.set_dataframe(val)
# If the target is non-empty and shapes are wrong, then extend it
elif val.shape != target.shape:
for k_l in target._top_df._get_all_dependents(i_j):
target._top_df._propogate_stop(k_l)
# Extend the DF, set the value, and set to green.
target._extend(val._row_index.keys(),val._col_index.keys())
target.set_dataframe(val)
target._top_df._graph.node[i_j]["status"] = target.STATUS_GREEN
# Now restart all the rest
for k_l in target._top_df._get_all_dependents(i_j):
target._top_df._propogate_start(k_l,ignore=i_j)
# If the target is non-empty but the value matches, then set the data
else:
target.set_dataframe(val)
# def apply_continuous(self, target):
# """ Apply a function continuously in a thread, and return the thread.
# """
# # Run at least once
# print "running continuously"
# thread = Thread(target = self._continuous_wrapper, args=(target,))
# thread.start()
# return thread
class BatchTransform(Transform):
def apply(self, target_df):
self._apply_function_to(target_df,self.func)
@abstractmethod
def func(self, target_df, *args, **kwargs):
pass
class ContinuousTransform(Transform):
def __init__(self, *args, **kwargs):
self.threaded = kwargs.pop('threaded',True)
self.delay = kwargs.pop('delay',0)
self.max_iters = kwargs.pop('max_iters',None)
super(ContinuousTransform,self).__init__(*args,**kwargs)
def apply(self, target_df):
self.init_func(target_df, *self.args, **self.kwargs)
if self.threaded:
thread = Thread(target = self._continuous_wrapper, args=(target_df,))
thread.start()
return thread
else:
self._continuous_wrapper(target_df)
@abstractmethod
def init_func(self, target_df, *args, **kwargs):
pass
@abstractmethod
def continuous_func(self, target_df, *args, **kwargs):
pass
def _continuous_wrapper(self, target_df):
i_j = (target_df._row_query,target_df._col_query)
graph = target_df._top_df._graph
niters = 0
while(graph.node[i_j]["status"] is not target_df.STATUS_RED):
# Apply the function
self._apply_function_to(target_df,self.continuous_func)
# Increment iteration counter if max_iters is set, and break if
# threshold is exceeded.
if self.max_iters != None:
niters += 1
if niters >= self.max_iters:
graph.node[i_j]["status"] = target_df.STATUS_RED
# If delay requested, sleep for the desired amount of time.
if self.delay > 0:
sleep(self.delay)
else:
while(graph.node[i_j]["status"] is not target_df.STATUS_RED):
self._apply_function_to(target_df,self.continuous_func)
def _eval_init_func(self,target):
return self.init_func(target,*self.args,**self.kwargs)
def _eval_continuous_func(self,target):
return self.continuous_func(target,*self.args,**self.kwargs)
Transform.register(BatchTransform)
Transform.register(ContinuousTransform)
from bokeh.client import push_session
from bokeh.io import curdoc
from bokeh.embed import autoload_server
class FigureTransform(ContinuousTransform):
def apply(self,target_df):
self.init_func(target_df, *self.args, **self.kwargs)
self.session = push_session(curdoc())
tag = autoload_server(self.p,session_id=self.session.id)
target_df._top_df._plots.append(tag)
thread = Thread(target = self._continuous_wrapper, args=(target_df,))
thread.start()
return thread
def init_func(self,target_df,*args,**kwargs):
self.p = self.create_figure(target_df,*args,**kwargs)
@abstractmethod
def create_figure(self, target_df, *args, **kwargs):
pass
def continuous_func(self,target_df,*args,**kwargs):
self.update(self.p)
@abstractmethod
def update(self,p):
pass | apache-2.0 | -404,568,103,778,613,500 | 33.116959 | 81 | 0.60312 | false |
MartinHeinz/training-and-food-tracker | src/models/model.py | 1 | 28995 | import textwrap
from datetime import date
from itertools import chain
import decimal
from sqlalchemy import Table, Column, Integer, ForeignKey, Date, Numeric, String, Text, Boolean, Time, and_, or_, cast, \
func, CheckConstraint
from sqlalchemy.orm import relationship, backref
from sqlalchemy.dialects import postgresql
from src.models.base import MixinGetByName, MixinSearch
from src import Base
from src.models.util import sort_to_match
recipe_tag_table = Table('recipe_tag', Base.metadata,
Column("recipe_id", Integer, ForeignKey('recipe.id')),
Column('tag_id', Integer, ForeignKey('tag.id')),
extend_existing=True
)
exercise_equipment_table = Table('exercise_equipment', Base.metadata,
Column('equipment_id', Integer, ForeignKey('equipment.id')),
Column('exercise_id', Integer, ForeignKey('exercise.id')),
extend_existing=True
)
exercise_tag_table = Table('exercise_tag', Base.metadata,
Column('tag_id', Integer, ForeignKey('tag.id')),
Column('exercise_id', Integer, ForeignKey('exercise.id')),
extend_existing=True
)
food_tag_table = Table('food_tag', Base.metadata,
Column("food_id", Integer, ForeignKey('food.id')),
Column('tag_id', Integer, ForeignKey('tag.id')),
extend_existing=True
)
class Day(Base):
__tablename__ = 'day'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
body_composition = relationship("BodyComposition", uselist=False, back_populates="day")
date = Column(Date, unique=True)
target_cal = Column(postgresql.INT4RANGE)
target_carbs = Column(postgresql.INT4RANGE)
target_protein = Column(postgresql.INT4RANGE)
target_fat = Column(postgresql.INT4RANGE)
target_fibre = Column(postgresql.INT4RANGE)
training_id = Column(Integer, ForeignKey('training.id'))
training = relationship("Training", uselist=False, back_populates="day")
meals = relationship("Meal", cascade="all, delete-orphan", back_populates="day")
@classmethod
def get_by_date(cls, session, date):
return session.query(cls).filter(cls.date == date).scalar()
@classmethod
def get_most_recent(cls, session):
return session.query(cls).order_by(cls.date.desc()).first()
@classmethod
def get_most_recent_passed(cls, session):
""" Returns day in interval <first - today> that is closest to today and is
contained in database."""
return session.query(cls).filter(cls.date <= date.today()).order_by(cls.date.desc()).first()
class BodyComposition(Base):
__tablename__ = 'body_composition'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
day_id = Column(Integer, ForeignKey('day.id'))
day = relationship("Day", back_populates="body_composition")
body_fat = Column(Numeric(precision=5, scale=2))
chest = Column(Numeric(precision=5, scale=2))
arm = Column(Numeric(precision=5, scale=2))
waist = Column(Numeric(precision=5, scale=2))
belly = Column(Numeric(precision=5, scale=2))
thigh = Column(Numeric(precision=5, scale=2))
calf = Column(Numeric(precision=5, scale=2))
forearm = Column(Numeric(precision=5, scale=2))
weight = Column(Numeric(precision=5, scale=2))
class Ingredient(Base):
__tablename__ = 'ingredient'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
amount = Column(Numeric(precision=5, scale=2))
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship("Measurement", back_populates="ingredients")
food_id = Column(Integer, ForeignKey('food.id'))
food = relationship("Food", back_populates="ingredients")
recipe_id = Column(Integer, ForeignKey('recipe.id'))
recipe = relationship("Recipe", back_populates="ingredients")
def get_calories(self):
# return self.food.round(self.food.cal * (self.measurement/100), 2)
if not self.measurement:
return round(self.food.cal * (self.amount / 100))
return round(self.food.cal * ((self.amount * self.measurement.grams) / 100))
def get_attr_amount(self, attr_name):
if not self.measurement:
return getattr(self.food, attr_name, 0) * (self.amount / 100)
return getattr(self.food, attr_name, 0) * ((self.amount * self.measurement.grams) / 100)
def get_amount_by_cal(self, cal):
cal = decimal.Decimal(cal)
if not self.measurement:
return round((100 * cal) / self.food.cal, 2)
amount = (100 * cal) / self.food.cal
return round(amount / self.measurement.grams, 2)
class FoodUsage(Base):
__tablename__ = 'food_usage'
__table_args__ = {'extend_existing': True}
meal_id = Column(Integer, ForeignKey('meal.id'))
food_id = Column(Integer, ForeignKey('food.id'))
recipe_id = Column(Integer, ForeignKey('recipe.id'))
amount = Column(Numeric(precision=5, scale=2))
meal = relationship("Meal", back_populates="foods")
food = relationship("Food", back_populates="meals")
id = Column(Integer, primary_key=True)
measurement_id = Column(Integer, ForeignKey('measurement.id'))
measurement = relationship("Measurement", back_populates="food_usages")
def get_calories(self):
# return self.food.round(self.food.cal * (self.measurement/100), 2)
if not self.measurement:
return round(self.food.cal * (self.amount / 100))
return round(self.food.cal * ((self.amount * self.measurement.grams) / 100))
def get_attr_amount(self, attr_name):
if not self.measurement:
return getattr(self.food, attr_name, 0) * (self.amount / 100)
return getattr(self.food, attr_name, 0) * ((self.amount * self.measurement.grams) / 100)
def get_amount_by_cal(self, cal):
cal = decimal.Decimal(cal)
if not self.measurement:
return round((100 * cal) / self.food.cal, 2)
amount = (100 * cal) / self.food.cal
return round(amount / self.measurement.grams, 2)
class Food(MixinGetByName, MixinSearch, Base):
__tablename__ = 'food'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
cal = Column(Numeric(precision=5, scale=2))
protein = Column(Numeric(precision=5, scale=2))
carbs = Column(Numeric(precision=5, scale=2))
fat = Column(Numeric(precision=5, scale=2))
fibre = Column(Numeric(precision=5, scale=2))
brand = Column(String)
description = Column(Text) # TODO test
measurements = relationship("Measurement", back_populates="food")
meals = relationship("FoodUsage", back_populates="food")
ingredients = relationship("Ingredient", back_populates="food")
tags = relationship(
"Tag",
secondary=food_tag_table,
back_populates="foods")
supplements = relationship("FoodSupplement", back_populates="food")
@classmethod # TODO check if it works when rows with type other than "food" are added
def search_by_tag(cls, session, search_string):
words = " & ".join(search_string.split())
return session.query(Food). \
join(Food.tags). \
filter(and_(or_(func.to_tsvector('english', Tag.name).match(words, postgresql_regconfig='english'),
func.to_tsvector('english', Tag.description).match(words, postgresql_regconfig='english')),
Tag.type == "food")).all()
def get_field_secondary_text(self):
text = "Brand: {brand: <10} {cal: >10} cal {protein: >10} protein {fat: >10} fat {carbs: >10} carbs {fibre: >10} fibre" \
.format(brand="Undefined" if self.brand is None else self.brand,
cal=self.cal,
protein=self.protein,
fat=self.fat,
carbs=self.carbs,
fibre=self.fibre)
return text
class FoodSupplement(Base):
__tablename__ = 'food_supplement'
food_id = Column(Integer, ForeignKey('food.id'), primary_key=True)
supplement_id = Column(Integer, ForeignKey('supplement.id'), primary_key=True)
amount = Column(Numeric(precision=5, scale=2))
food = relationship("Food", back_populates="supplements")
supplement = relationship("Supplement", back_populates="foods")
class Supplement(MixinGetByName, Base):
__tablename__ = 'supplement'
__table_args__ = (CheckConstraint("type ~* '^(vitamin|micronutrient|stimulant){1}$'", name="type_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
type = Column(String) # vitamin/micronutrient/stimulant
foods = relationship("FoodSupplement", back_populates="supplement")
class Measurement(MixinGetByName, Base):
__tablename__ = 'measurement'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
food_id = Column(Integer, ForeignKey('food.id'))
food = relationship("Food", back_populates="measurements")
grams = Column(Numeric(precision=5, scale=2))
food_usages = relationship("FoodUsage", back_populates="measurement")
ingredients = relationship("Ingredient", back_populates="measurement")
class Meal(MixinGetByName, Base):
__tablename__ = 'meal'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
foods = relationship("FoodUsage", cascade="all, delete-orphan", back_populates="meal")
# description = Column(Text) # TODO test
day_id = Column(Integer, ForeignKey('day.id'))
day = relationship("Day", back_populates="meals")
time = Column(Time)
recipes = relationship("Recipe", cascade="all, delete-orphan", back_populates="meal")
def add_food(self, food, amount, measurement=None):
food_usage = FoodUsage(amount=amount)
food_usage.food = food
food_usage.measurement = measurement
if self.foods is None:
self.foods = [food_usage]
else:
self.foods.append(food_usage)
def add_recipe(self, recipe):
if self.recipes is not None:
self.recipes.append(recipe)
else:
self.recipes = [recipe]
def get_calories(self):
return sum(i.get_calories() for i in chain(self.foods, self.recipes))
def get_attr_amount(self, attr_name):
return sum(f.get_attr_amount(attr_name) for f in chain(self.foods, self.recipes))
class Recipe(MixinGetByName, MixinSearch, Base):
__tablename__ = 'recipe'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
serving_size = Column(Numeric(precision=5, scale=2))
notes = Column(Text)
tags = relationship(
"Tag",
secondary=recipe_tag_table,
back_populates="recipes")
ingredients = relationship("Ingredient", cascade="all, delete-orphan", back_populates="recipe")
is_template = Column(Boolean, default=False)
recipe_executions = relationship("Recipe",
uselist=True,
foreign_keys='Recipe.template_id',
backref=backref("template", uselist=False, remote_side=[id]))
template_id = Column(Integer, ForeignKey('recipe.id'))
meal_id = Column(Integer, ForeignKey('meal.id'))
meal = relationship("Meal", back_populates="recipes")
@classmethod # TODO check if it works when rows with type other than "recipe" are added
def search_by_tag(cls, session, search_string):
words = " & ".join(search_string.split())
return session.query(Recipe). \
join(Recipe.tags). \
filter(and_(or_(func.to_tsvector('english', Tag.name).match(words, postgresql_regconfig='english'),
func.to_tsvector('english', Tag.description).match(words, postgresql_regconfig='english')),
Tag.type == "recipe")).all()
def add_food(self, food, amount, measurement=None):
ingredient = Ingredient(amount=amount)
ingredient.food = food
ingredient.measurement = measurement
if self.ingredients is None:
self.ingredients = [ingredient]
else:
self.ingredients.append(ingredient)
def get_calories(self):
return sum(i.get_calories() for i in self.ingredients)
def get_attr_amount(self, attr_name):
return sum(f.get_attr_amount(attr_name) for f in self.ingredients)
def get_field_secondary_text(self):
text = "Calories per serving: {cal: <4} Ingredients: {ing}"\
.format(cal=self.get_calories()/self.serving_size,
ing=textwrap.shorten(', '.join([getattr(n.food, "name") for n in self.ingredients]),
width=50, placeholder="..."))
return text
@classmethod
def search_by_attribute(cls, session, search_string, field, only_template=False):
if only_template:
return session.query(cls). \
filter(and_(
cls.is_template == True,
func.to_tsvector('english', getattr(cls, field)).match(search_string,
postgresql_regconfig='english'))).all()
return session.query(cls). \
filter(
func.to_tsvector('english', getattr(cls, field)).match(search_string, postgresql_regconfig='english')).all()
class Exercise(MixinGetByName, MixinSearch, Base):
__tablename__ = 'exercise'
__table_args__ = (CheckConstraint("tempo ~ '^((\d|X){4}|((\d{1,2}|X)-){3}(\d{1,2}|X))$'", name="tempo_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
weight_id = Column(Integer, ForeignKey('weight.id'))
weight = relationship("Weight", back_populates="exercise")
tempo = Column(String)
pause = Column(postgresql.INT4RANGE) # TODO test
set_range = Column(postgresql.INT4RANGE)
rep_range = Column(postgresql.INT4RANGE)
notes = Column(Text)
equipment = relationship(
"Equipment",
secondary=exercise_equipment_table,
back_populates="exercises")
tags = relationship(
"Tag",
secondary=exercise_tag_table,
back_populates="exercises")
training_exercises = relationship("TrainingExercise", back_populates="exercise")
def get_field_secondary_text(self):
if self.set_range.lower is None:
sets = "None"
elif self.set_range.upper is None:
sets = str(self.set_range.lower) + "+"
else:
sets = str(self.set_range.lower) + "-"
sets += str(self.set_range.upper) if self.set_range.upper_inc else str(self.set_range.upper-1)
if self.rep_range.lower is None:
reps = "None"
elif self.rep_range.upper is None:
reps = str(self.rep_range.lower) + "+"
else:
reps = str(self.rep_range.lower) + "-"
reps += str(self.rep_range.upper) if self.rep_range.upper_inc else str(self.rep_range.upper-1)
if getattr(self.pause, "lower", None) is None:
pause = "None"
elif self.pause.upper is None:
pause = str(self.pause.lower) + "+"
else:
if self.pause.lower == self.pause.upper_inc: # TODO test
pause = str(self.pause.lower)
else:
pause = str(self.pause.lower) + "-"
pause += str(self.pause.upper) if self.pause.upper_inc else str(self.pause.upper)
if self.weight is not None:
if self.weight.kilogram is not None:
weight = str(self.weight.kilogram.lower) + "-" + str(self.weight.kilogram.upper)
elif self.weight.BW:
weight = "BW"
else:
weight = ""
if self.weight.RM is not None:
weight = str(self.weight.RM) + "RM = " + weight
elif self.weight.percentage_range is not None:
weight = str(self.weight.percentage_range.lower) + "-" + str(self.weight.percentage_range.upper) \
+ "% of " + str(self.weight.RM) + " RM = " + weight
if self.weight.band is not None:
weight += " + " + self.weight.band
else:
weight = "None"
weight = weight.strip("= ")
weight = weight if weight != "" else "None"
return "Sets: {sets: <8} Reps: {reps: <8} Tempo: {tempo: <8} Pause: {pause: <8} Weight: {weight} ".format(sets=sets, reps=reps, weight=weight, tempo=str(self.tempo), pause=pause)
@classmethod
def get_by_tag(cls, session, tags):
return session.query(cls).join(cls.tags).filter(Tag.name.in_(tags)).all()
@classmethod # TODO check if it works when rows with type other than "exercise" are added
def search_by_tag(cls, session, search_string):
words = " & ".join(search_string.split())
return session.query(Exercise). \
join(Exercise.tags). \
filter(and_(or_(func.to_tsvector('english', Tag.name).match(words, postgresql_regconfig='english'),
func.to_tsvector('english', Tag.description).match(words, postgresql_regconfig='english')),
Tag.type == "exercise")).all()
@classmethod
def search_by_equipment(cls, session, search_string):
return session.query(Exercise). \
join(Exercise.equipment). \
filter(or_(func.to_tsvector('english', Equipment.name).match(search_string, postgresql_regconfig='english'),
func.to_tsvector('english', Equipment.description).match(search_string, postgresql_regconfig='english'))).all()
@classmethod
def search_by_weight(cls, session, search_string, field):
return session.query(Exercise). \
join(Exercise.weight). \
filter(
func.to_tsvector('english', getattr(Weight, field)).match(search_string, postgresql_regconfig='english')).all()
class Weight(Base):
__tablename__ = 'weight'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
exercise = relationship("Exercise", back_populates="weight", uselist=False)
RM = Column(Integer)
percentage_range = Column(postgresql.NUMRANGE)
kilogram = Column(postgresql.NUMRANGE)
BW = Column(Boolean)
band = Column(String)
class Equipment(Base, MixinGetByName):
__tablename__ = 'equipment'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
name = Column(String)
description = Column(Text)
exercises = relationship(
"Exercise",
secondary=exercise_equipment_table,
back_populates="equipment")
class Tag(Base, MixinGetByName):
__tablename__ = 'tag'
__table_args__ = (CheckConstraint("type ~* '^(exercise|recipe|food){1}$'", name="type_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
name = Column(String)
type = Column(String) # exercise/recipe/food
description = Column(Text)
exercises = relationship(
"Exercise",
secondary=exercise_tag_table,
back_populates="tags")
recipes = relationship(
"Recipe",
secondary=recipe_tag_table,
back_populates="tags")
foods = relationship(
"Food",
secondary=food_tag_table,
back_populates="tags")
class Set(Base):
__tablename__ = 'set'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
reps = Column(Integer)
weight = Column(Numeric(precision=5, scale=2))
is_PR = Column(Boolean, default=False)
is_AMRAP = Column(Boolean, default=False)
training_exercise_id = Column(Integer, ForeignKey('training_exercise.id'))
class Training(Base, MixinGetByName):
__tablename__ = 'training'
__table_args__ = (CheckConstraint("start < \"end\"", name="time_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
name = Column(String)
start = Column(Time)
end = Column(Time)
day = relationship("Day", back_populates="training", uselist=False)
training_exercises = relationship("TrainingExercise", cascade="save-update, merge, delete", back_populates="training")
description = Column(Text)
next = relationship("Training",
uselist=False,
foreign_keys='Training.next_id',
remote_side=[id],
backref=backref("prev", uselist=False))
next_id = Column(Integer, ForeignKey('training.id'))
is_first = Column(Boolean, default=False)
is_template = Column(Boolean, default=False)
training_schedule_id = Column(Integer, ForeignKey('training_schedule.id'))
training_schedule = relationship("TrainingSchedule", back_populates="trainings")
template_executions = relationship("Training",
uselist=True,
foreign_keys='Training.template_id',
backref=backref("template", uselist=False, remote_side=[id]))
template_id = Column(Integer, ForeignKey('training.id'))
training_plan_history_id = Column(Integer, ForeignKey('training_plan_history.id'))
training_plan_history = relationship("TrainingPlanHistory", back_populates="trainings")
def get_exercises(self):
exercises = []
for exercise in self.training_exercises:
exercises.append(exercise.get_superset())
return exercises
@classmethod
def create_training_sessions(cls, s, templates): # TREBA NAKONCI COMMITNUT PO POUZITI FUNKCIE
training_sessions = []
for i, template in enumerate(templates):
training_session = Training(template=template)
if i == 0:
training_session.is_first = True
else:
training_sessions[i-1].next = training_session
training_sessions.append(training_session)
training_exercises = []
s.add(training_session)
s.flush()
supersets = template.get_exercises()
for superset in supersets:
ex_ids = []
for ex in superset:
ex_ids.append(ex.exercise_id)
training_exercises.append(
TrainingExercise.create_superset(s, ex_ids, training_session.id))
return training_sessions
@classmethod
def get_schedules_by_template(cls, session, template):
schedules = session.query(Training).filter(and_(Training.template_id == template.id,
Training.is_first == True)).all()
return schedules
class TrainingExercise(Base):
__tablename__ = 'training_exercise'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
training_id = Column(Integer, ForeignKey('training.id'))
exercise_id = Column(Integer, ForeignKey('exercise.id'))
exercise = relationship("Exercise", back_populates="training_exercises")
training = relationship("Training", back_populates="training_exercises")
is_optional = Column(Boolean, default=False)
superset_with = relationship("TrainingExercise",
cascade="save-update, merge, delete",
uselist=False,
backref=backref("prev", uselist=False, remote_side=[id]))
prev_training_exercise_id = Column(Integer, ForeignKey('training_exercise.id'))
pause = Column(postgresql.INT4RANGE)
sets = relationship("Set", cascade="save-update, merge, delete",)
# TODO: TEST
@classmethod
def create_superset(cls, session, exercise_ids, training_id):
training = session.query(Training).filter(Training.id == training_id).first()
training_exercises = [TrainingExercise() for _ in range(len(exercise_ids))]
exercises = session.query(Exercise).filter(Exercise.id.in_(exercise_ids)).all()
exercises = sort_to_match(exercise_ids, exercises)
for i, ex in enumerate(exercises):
training_exercises[i].exercise = ex
if ex.set_range.upper is not None:
sets = [Set(reps=ex.rep_range.upper) for _ in range(ex.set_range.upper)]
else:
sets = [Set(reps=ex.rep_range.lower) for _ in range(ex.set_range.lower)]
training_exercises[i].sets = sets
if i == 0:
training.training_exercises.append(training_exercises[i])
if i != len(exercises)-1:
training_exercises[i].superset_with = training_exercises[i+1]
return training_exercises
def get_superset(self):
exercises = [self]
ex = self
while ex.superset_with is not None:
exercises.append(ex.superset_with)
ex = ex.superset_with
return exercises
class TrainingSchedule(Base, MixinGetByName):
__tablename__ = 'training_schedule'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
name = Column(String)
phase_id = Column(Integer, ForeignKey('phase.id'))
phase = relationship("Phase", back_populates="training_schedules")
description = Column(Text)
trainings = relationship("Training", back_populates="training_schedule")
class Phase(Base, MixinGetByName):
__tablename__ = 'phase'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
training_schedules = relationship("TrainingSchedule", back_populates="phase")
training_plan_id = Column(Integer, ForeignKey('training_plan.id'))
training_plan = relationship("TrainingPlan", back_populates="phases")
name = Column(String)
length = Column(postgresql.INT4RANGE)
description = Column(Text)
class TrainingPlan(MixinGetByName, Base):
__tablename__ = 'training_plan'
__table_args__ = {'extend_existing': True}
id = Column(Integer, primary_key=True)
description = Column(Text)
phases = relationship("Phase", back_populates="training_plan")
training_plan_history = relationship("TrainingPlanHistory", back_populates="training_plan")
@classmethod
def get_current(cls, session):
return session.query(cls).join(cls.training_plan_history).filter(
TrainingPlanHistory.end == None
).scalar()
@classmethod # TODO test with more t_p, phases, t_t in tables
def get_schedules(cls, session, plan):
return session.query(TrainingSchedule).select_from(TrainingPlan).\
join(TrainingPlan.phases).\
join(TrainingSchedule).\
filter(plan.id == TrainingPlan.id).all()
class TrainingPlanHistory(Base):
__tablename__ = 'training_plan_history'
__table_args__ = (CheckConstraint("start < \"end\"", name="date_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
training_plan_id = Column(Integer, ForeignKey('training_plan.id'))
training_plan = relationship("TrainingPlan", back_populates="training_plan_history")
goals = relationship("Goal", back_populates="training_plan_history")
start = Column(Date)
end = Column(Date)
description = Column(Text) # TODO test
trainings = relationship("Training", back_populates="training_plan_history")
@classmethod
def get_all(cls, session): # TODO test
return session.query(TrainingPlanHistory)\
.join(TrainingPlanHistory.training_plan)\
.group_by(TrainingPlanHistory.id, TrainingPlan.name)\
.order_by(TrainingPlanHistory.start).all()
class Goal(MixinGetByName, Base):
__tablename__ = 'goal'
__table_args__ = (CheckConstraint("start_date < end_date", name="date_check"),
{'extend_existing': True},
)
id = Column(Integer, primary_key=True)
achieved = Column(Boolean, default=False)
notes = Column(Text)
start_date = Column(Date)
end_date = Column(Date)
training_plan_history_id = Column(Integer, ForeignKey('training_plan_history.id'))
training_plan_history = relationship("TrainingPlanHistory", back_populates="goals")
@classmethod
def create_goal(cls, session, size):
# TODO
return
| mit | 8,773,634,508,920,029,000 | 41.082729 | 186 | 0.614037 | false |
synclab/radclock | python-module/setup.py | 1 | 2076 |
# Copyright (C) 2006-2011 Julien Ridoux <[email protected]>
#
# This file is part of the radclock program.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
from distutils.core import setup, Extension
import os, re
OS_LINUX = 'Linux'
OS_FREEBSD = 'FreeBSD'
os_uname = os.uname()
OS = os_uname[0]
module_radclock_linux = Extension('radclock',
include_dirs = ['../libradclock'],
libraries = ['radclock', 'nl'],
library_dirs = ['/usr/local/lib'],
sources = [ 'radclockmodule.c' ]
)
module_radclock_freebsd = Extension('radclock',
include_dirs = ['../libradclock'],
libraries = ['radclock'],
library_dirs = ['/usr/local/lib'],
sources = [ 'radclockmodule.c' ]
)
if OS == OS_LINUX:
module_radclock = module_radclock_linux
if OS == OS_FREEBSD:
module_radclock = module_radclock_freebsd
setup ( name = 'python-radclock',
version = '0.2.2',
description = 'This package provides python bindings to the libradclock C library.',
author = 'Julien Ridoux',
author_email = '[email protected]',
url = 'http://www.synclab.org/tscclock/',
long_description = '''
This package provides python bindings to the libradclock C library.
It provides ways of creating a radclock instance and get the time as
created by the radclock.
It provides all basic functions of the libradclock library: absolute
clock, difference clock, clock status and system data.
''',
ext_modules = [module_radclock]
)
| bsd-2-clause | 8,208,464,072,412,713,000 | 28.657143 | 86 | 0.71869 | false |
dragondjf/musicplayertest | player/__init__.py | 1 | 25345 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Deepin, Inc.
# 2011 Hou Shaohui
#
# Author: Hou Shaohui <[email protected]>
# Maintainer: Hou ShaoHui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
import threading
import copy
from time import time
from config import config
from library import MediaDB
from logger import Logger
from player.fadebin import PlayerBin, BAD_STREAM_SCHEMES
from utils import (fix_charset, ThreadRun, get_uris_from_asx,
get_uris_from_pls, get_uris_from_m3u,
get_uris_from_xspf, get_mime_type, get_scheme)
from helper import Dispatcher
DEBUG = False
class DeepinMusicPlayer(gobject.GObject, Logger):
__gsignals__ = {
"play-end" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"new-song" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"instant-new-song" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"paused" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"played" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"stopped" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"seeked" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"loaded" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"init-status" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"fetch-start" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
"fetch-end" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
def __init__(self):
gobject.GObject.__init__(self)
# Init.
self.song = None
self.fetch_song = None
self.__source = None
self.__need_load_prefs = True
self.__current_stream_seeked = False
self.__next_already_called = False
self.__emit_signal_new_song_id = None
self.skip_error_song_flag = False
self.stop_after_this_track = False
self.__current_song_reported = False
self.__current_duration = None
self.play_thread_id = 0
MediaDB.connect("simple-changed", self.__on_change_songs)
self.bin = PlayerBin()
self.bin.connect("eos", self.__on_eos)
self.bin.connect("error", self.__on_error)
self.bin.connect("tags-found", self.__on_tag)
self.bin.connect("tick", self.__on_tick)
self.bin.connect("playing-stream", self.__on_playing)
def __on_change_songs(self, db, songs):
if not self.song: return
if self.song.get_type() in ("cue", "cdda"):
return
if self.song in songs:
self.song = songs[songs.index(self.song)]
def __on_eos(self, bin, uri):
self.logdebug("received eos for %s", uri)
if uri == self.song.get("uri") and not self.__next_already_called:
if config.get("setting", "loop_mode") == "single_mode" and \
config.getboolean("player", "crossfade"):
pass
else:
self.logdebug("request new song: eos and play-end not emit")
self.emit("play-end")
self.next()
self.__next_already_called = False
def __on_error(self, bin, uri):
self.logdebug("gst error received for %s", uri)
if self.skip_error_song_flag:
self.skip_error_song_flag = False
return
self.bin.xfade_close()
config.set("player", "play", "false")
self.emit("paused")
if self.song:
if getattr(self.__source, 'add_invaild_song', None):
self.__source.add_invaild_song(self.song)
if not self.song:
self.emit("init-status")
self.song = None
return
if self.song.get_type() != "local":
return
if self.song.get_type() in [ "cdda", "webcast"]:
self.emit("init-status")
self.song = None
return
if uri == self.song.get("uri") and not self.__next_already_called:
self.logdebug("request new song: error and play-end not emit")
self.emit("play-end")
self.next(True)
self.__next_already_called = False
def __on_tag(self, bin, taglist):
''' The playbin found the tag information'''
if not self.song: return
if not self.song.get("title") and self.song.get_type() not in ["cue", "cdda", "webcast", "local"]:
self.logdebug("tag found %s", taglist)
IDS = {
"title": "title",
"genre": "genre",
"artist": "artist",
"album": "album",
"bitrate": "#bitrate",
'track-number':"#track"
}
mod = {}
for key in taglist.keys():
if IDS.has_key(key):
if key == "lenght":
value = int(taglist[key]) * 1000
elif key == "bitrate":
value = int(taglist[key] / 100)
elif isinstance(taglist[key], long):
value = int(taglist[key])
else:
value = fix_charset(taglist[key])
mod[IDS[key]] = value
MediaDB.set_property(self.song, mod)
def __on_tick(self, bin, pos, duration):
if self.song:
if self.song.get_type() == "webcast":
return
pos /= 1000
duration /= 1000
if not duration or duration <= 0:
return
else:
if not self.song.get("#duration") or self.song.get("#duration") != duration * 1000:
if self.song.get_type() != "cue":
MediaDB.set_property(self.song, {"#duration": duration * 1000})
if self.song.get_type() == "cue":
duration = self.song.get("#duration") / 1000
pos = pos - self.song.get("seek", 0)
self.perhap_report(pos, duration) # todo
crossfade = self.get_crossfade()
if crossfade < 0:
crossfade = 0
remaining = duration - pos
if crossfade:
if remaining < crossfade:
if not self.__next_already_called and remaining > 0:
self.logdebug("request new song: on tick and play-end not emit")
self.emit("play-end")
self.next()
self.__next_already_called = True
# else:
# self.__next_already_called = False
# else:
# self.__next_already_called = False
def __on_playing(self, bin, uri):
'''Signal emitted by fadebin when a new stream previously queued start'''
if not self.song: return
if uri == self.song.get("uri"):
self.logdebug("signal playing-stream receive by %s", uri)
config.set("player", "play", "true")
self.emit("played")
self.__emit_signal_new_song()
def __emit_signal_new_song(self):
def real_emit_signal_new_song():
self.emit("new-song", self.song)
self.__emit_signal_new_song_id = None
if self.__emit_signal_new_song_id is not None:
gobject.source_remove(self.__emit_signal_new_song_id)
self.__emit_signal_new_song_id = None
self.__emit_signal_new_song_id = gobject.timeout_add(5000, real_emit_signal_new_song)
def set_source(self, source):
self.__source = source
def get_source(self):
return self.__source
def async_fetch(self, song):
uri = song.get("uri")
ntry = 2
uris = None
mime_type = get_mime_type(uri)
while not uris:
if mime_type == "audio/x-scpls":
uris = get_uris_from_pls(uri)
elif mime_type == "audio/x-mpegurl":
uris = get_uris_from_m3u(uri)
elif mime_type == "video/x-ms-asf":
uris = get_uris_from_asx(uri)
elif mime_type == "application/xspf+xml":
uris = get_uris_from_xspf(uri)
ntry += 1
if ntry > 3: break
# TODO: Improve multiple webradio url
if uris:
self.loginfo("%s choosen in %s", uris[0], uri)
uri = uris[0]
else:
self.loginfo("no playable uri found in %s", uri)
uri = None
return song, uri
def play_radio(self, fetch_result, play=False, crossfade=None, seek=None):
song, uri = fetch_result
if self.fetch_song:
if self.fetch_song == song:
if uri:
song["uri"] = uri
self.__set_song(song, play, crossfade, seek)
self.emit("fetch-end", uri)
self.fetch_song = None
def set_song(self, song, play=False, crossfade=None, seek=None):
uri = song.get("uri")
mime_type = get_mime_type(uri)
if mime_type in [ "audio/x-scpls", "audio/x-mpegurl", "video/x-ms-asf", "application/xspf+xml" ]:
if get_scheme(song.get("uri")) != "file":
self.fetch_song = song
self.emit("fetch-start", song)
ThreadRun(self.async_fetch, self.play_radio, (song,), (play, crossfade, seek)).start()
else:
self.fetch_song = None
self.__set_song(song, play, crossfade, seek)
else:
self.fetch_song = None
self.__set_song(song, play, crossfade, seek)
def __set_song(self, song, play=False, crossfade=None, seek=None):
'''set song'''
if not song:
return
is_stop = False
# report playcount
self.perhap_report()
self.stop_after_this_track = False
self.__need_load_prefs = False
if seek:
self.__current_stream_seeked = True
else:
self.__current_stream_seeked = False
# get crossfade.
if crossfade is None:
crossfade = self.get_crossfade()
if song.get_type() == "cue":
uri = song.get("real_uri")
else:
uri = song.get('uri')
self.logdebug("player try to load %s", uri)
# remove old stream for pipeline excepted when need to fade
if self.song:
if self.song.get_type() == "webcast":
self.force_fade_close()
is_stop = True
if song and song.get_type() == "webcast":
if not is_stop:
self.force_fade_close()
is_stop = True
if self.song and (crossfade == -1 or self.is_paused() or not self.is_playable()):
if not is_stop:
self.force_fade_close()
# if song.get_scheme() in BAD_STREAM_SCHEMES:
# self.bin.dispose_streams()
# set current song and try play it.
self.song = song
self.__current_song_reported = False
self.emit("instant-new-song", self.song)
mime_type = get_mime_type(uri)
if mime_type in [ "audio/x-scpls", "audio/x-mpegurl", "video/x-ms-asf", "application/xspf+xml" ]:
# TODO: Read playlist need to be async
ntry = 2
uris = None
while not uris:
if mime_type == "audio/x-scpls":
uris = get_uris_from_pls(uri)
elif mime_type == "audio/x-mpegurl":
uris = get_uris_from_m3u(uri)
elif mime_type == "video/x-ms-asf":
uris = get_uris_from_asx(uri)
elif mime_type == "application/xspf+xml":
uris = get_uris_from_xspf(uri)
ntry += 1
if ntry > 3: break
# TODO: Improve multiple webradio url
if uris:
self.loginfo("%s choosen in %s", uris[0], uri)
uri = uris[0]
else:
self.loginfo("no playable uri found in %s", uri)
uri = None
if song.get_scheme() in BAD_STREAM_SCHEMES:
self.play_thread_id += 1
play_thread_id = copy.deepcopy(self.play_thread_id)
self.thread_play(uri, song, play, play_thread_id)
else:
ret = uri and self.bin.xfade_open(uri)
if not ret:
# gobject.idle_add(self.emit, "play-end")
if self.song:
if getattr(self.__source, 'add_invaild_song', None):
self.__source.add_invaild_song(self.song)
self.skip_error_song_flag = True
self.next()
elif play:
self.play(crossfade, seek)
def force_fade_close(self):
if not self.song: return
self.logdebug("Force remove stream: %s", self.song.get("uri"))
if self.song.get_scheme() in BAD_STREAM_SCHEMES:
try:
threading.Thread(target=self.bin.xfade_close, args=((self.song.get("uri"),))).start()
except Exception, e:
self.logdebug("Force stop song:%s failed! error: %s", self.song.get("uri"), e)
else:
self.bin.xfade_close(self.song.get("uri"))
def thread_play(self, uri, song, play, thread_id):
ThreadRun(self.bin.xfade_open, self.emit_and_play, (uri,), (song, play, thread_id)).start()
def emit_and_play(self, ret, song, play, thread_id):
if thread_id == self.play_thread_id:
if song == self.song:
if ret and play:
self.play(0, play_emit=False)
def play_new(self, song, crossfade=None, seek=None):
'''add new song and try to play it'''
self.set_song(song, True, crossfade, seek)
def play(self, crossfade=-1, seek=None, play_emit=True):
'''play currnet song'''
if self.song is None:
return
if seek:
crossfade = -1
ret = self.bin.xfade_play(crossfade)
if play_emit:
if not ret:
self.emit("paused")
config.set("player", "play", "false")
gobject.idle_add(self.emit, "play-end")
else:
if seek:
gobject.idle_add(self.seek, seek)
self.emit("played")
return ret
def pause(self):
'''pause'''
if self.song is None :
return
self.bin.xfade_pause()
config.set("player", "play", "false")
self.emit("paused")
def stop(self):
self.stop_after_this_track = False
self.update_skipcount()
self.bin.xfade_close()
config.set("player", "play", "false")
self.emit("stopped")
def previous(self):
'''previous song'''
self.update_skipcount()
if self.__source:
song = self.__source.get_previous_song()
if song:
self.play_new(song, seek=song.get("seek", None))
return
else:
self.stop()
def next(self, maunal=False):
'''next song'''
self.update_skipcount()
if not self.__source:
return
song = self.__source.get_next_song(maunal)
# self.fadeout_and_stop()
if song:
if config.getboolean("player", "crossfade") and \
config.getboolean("player", "crossfade_gapless_album") and \
self.song and song.get("album") == self.song.get("album"):
self.logdebug("request gapless to the backend")
self.play_new(song, seek=song.get("seek", None))
else:
self.play_new(song, seek=song.get("seek", None))
return
# else:
# # stop the current song
# self.fadeout_and_stop()
def get_song_seek(self, song):
seek = None
if song.has_key("seek"):
seek = song.get("seek")
return seek
def rewind(self):
'''rewind'''
length = self.get_length()
if not length:
self.logdebug("Can't rewind a stream with on duration")
return
jump = max(5, length * 0.05)
pos = self.get_position()
if pos >= 0:
pos = max(0, pos - jump)
self.seek(pos)
def forward(self):
'''forward'''
length = self.get_length()
if not length:
self.logdebug("Can't forward a stream with on duration")
return
jump = max(5, length * 0.05)
pos = float(self.get_position())
if pos >=0:
pos = float(min(pos+jump, length))
self.logdebug("request seek to %d", pos)
self.seek(pos)
def playpause(self):
'''play or pause'''
self.logdebug("is paused %s ?", self.is_paused())
if self.song is None: return False
if not self.is_paused():
self.pause()
else:
self.logdebug('is playable ? %s', self.is_playable())
if self.is_playable():
self.play(-1)
else:
self.logdebug("have song %s", self.song)
if self.song:
# Reload the current song
self.play_new(self.song, seek=self.song.get("seek", None))
elif self.__source != None:
self.next(True)
else:
self.stop()
return True
def seek(self, pos, emit_signal=True):
'''seek'''
if self.bin.xfade_seekable():
self.__current_stream_seeked = True
self.bin.xfade_set_time(pos)
if emit_signal:
gobject.idle_add(self.emit, "seeked", pos)
# self.emit("seeked", pos)
else:
self.logdebug("current song is not seekable")
def set_volume(self, num):
self.__volume = num
self.bin.set_volume(num)
def get_volume(self):
return self.bin.get_volume()
volume = property(get_volume, set_volume)
def increase_volume(self):
current_volume = self.get_volume()
current_volume += 0.2
if current_volume > 1.0:
current_volume = 1.0
# self.set_volume(current_volume)
Dispatcher.volume(current_volume)
def decrease_volume(self):
current_volume = self.get_volume()
current_volume -= 0.2
if current_volume < 0:
current_volume = 0.0
# self.set_volume(current_volume)
Dispatcher.volume(current_volume)
def mute_volume(self):
# self.set_volume(0.0)
Dispatcher.volume(0.0)
def update_volume(self, volume):
if volume > 1.0 : volume = 1.0
if volume < 0.0 : volume = 0.0
Dispatcher.volume(volume)
def is_paused(self):
'''whether the current song is paused.'''
return not self.bin.xfade_playing()
def is_playable(self):
'''whethe the current stream is opened'''
return self.bin.xfade_opened()
def get_position(self):
'''get postion'''
pos = self.bin.xfade_get_time()
pos /= 1000
# return value
if self.song and self.song.get_type() == "cue":
return int(pos) - self.song.get("seek", 0)
return int(pos)
def get_lyrics_position(self):
pos = self.bin.xfade_get_time()
if self.song and self.song.get_type() == "cue":
return pos - self.song.get("seek", 0) * 1000
return pos
def get_lyrics_length(self):
if self.song and self.song.get_type() == "cue":
return self.song.get("#duration") * 1000
return self.bin.xfade_get_duration()
def get_length(self):
'''get lenght'''
if self.song is not None:
if self.song.get_type() == "cue":
return self.song.get("#duration")
duration = self.bin.xfade_get_duration()
duration /= 1000
if duration != -1:
# if current song_dict not have '#duration' and set it
if not self.song.get("#duration"):
MediaDB.set_property(self.song, {"#duration": duration * 1000})
return duration
elif self.song.get("#duration"):
return self.song.get("#duration") / 1000
return 0
def get_crossfade(self):
'''get crossfade'''
if config.getboolean("player", "crossfade"):
try:
crossfade = float(config.get("player", "crossfade_time"))
except:
crossfade = 3.5
if crossfade > 50:
crossfade = 3.5
else:
crossfade = -1
return crossfade
def perhap_report(self, pos=None, duration=None):
'''report song'''
if not self.song: return
if self.song.get_type() != "local": return
if not duration:
duration = self.get_length()
if not pos:
pos = self.get_position()
if self.song \
and not self.__current_stream_seeked \
and not self.__next_already_called \
and not self.__current_song_reported \
and duration > 10 and pos and pos >= min(duration / 2, 240 * 1000):
MediaDB.set_property(self.song, {"#playcount": self.song.get("#playcount", 0) + 1})
MediaDB.set_property(self.song, {"#lastplayed":time()})
self.__current_song_reported = True
def current_stream_seeked(self):
return self.__current_stream_seeked
def load(self):
'''load configure'''
pass
def save_state(self):
'''save current song's state'''
if not self.song:
return
if self.song.get_type() == "local":
config.set("player", "song_type", self.song.get_type())
config.set("player", "uri", self.song.get("uri"))
config.set("player", "seek", str(self.get_position()))
if not self.is_playable():
state = "stop"
elif self.is_paused():
state = "paused"
else:
state = "playing"
config.set("player", "state", state)
self.logdebug("player status saved, %s", state)
def update_skipcount(self):
'''update skipcount.'''
# if not played until the end
if not self.song: return
if self.song.get_type() != "local": return
if not self.__current_song_reported and self.song:
MediaDB.set_property(self.song, {"#skipcount":self.song.get("#skipcount", 0) + 1})
def fadeout_and_stop(self):
remaining = self.get_length() - self.get_position()
if remaining <= 0:
# when there is no crossfade
self.stop()
else:
handler_id = self.bin.connect("eos", lambda * args: self.stop())
gobject.timeout_add(remaining, lambda * args: self.bin.disconnect(handler_id) is not None, handler_id)
self.logdebug("playlist finished")
Player = DeepinMusicPlayer()
| gpl-2.0 | 6,983,345,662,516,980,000 | 35.946064 | 114 | 0.499704 | false |
hsoft/musicguru | core/manualfs.py | 1 | 9237 | # Created By: Virgil Dupras
# Created On: 2004-12-27
# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
import hsfs as fs
from jobprogress import job
from hscommon.util import nonone
from hscommon.conflict import get_conflicted_name, is_conflicted
class _CopyOf:
#--- Public
def copy(self, refnode):
self.copyof = refnode
def detach_copy(self, keep_original_files=False, keep_original_dirs=False):
if self.is_container:
keep = keep_original_dirs
else:
keep = keep_original_files
if keep:
self.copyof = self.original
else:
self.copyof = None
for child in self:
child.detach_copy(keep_original_files,keep_original_dirs)
#--- Properties
copyof = None
@property
def original(self):
if hasattr(self.copyof, 'original'):
return self.copyof.original
else:
return nonone(self.copyof, self)
class Node(fs.Node):
#--- Override
def __init__(self, parent=None, name=''):
try:
super(Node, self).__init__(parent,name)
except fs.AlreadyExistsError:
newname = parent._resolve_conflict(parent[name], self, name)
if newname:
if isinstance(newname, str):
super(Node, self).__init__(parent, newname)
else:
raise
def _set_name(self, newname):
try:
super(Node, self)._set_name(newname)
except fs.AlreadyExistsError:
newname = self.parent._resolve_conflict(self.parent[newname], self, newname)
if newname:
if isinstance(newname, str):
super(Node, self)._set_name(newname)
else:
raise
#--- Public
def delete(self):
self.parent = None
def move(self, dest, newname=None):
dest.add_child(self, newname)
def rename(self, newname):
self.name = newname
class File(fs.File, Node, _CopyOf):
#--- Public
def copy(self, reffile):
super(File,self).copy(reffile)
for attrname in reffile.INITIAL_INFO:
if attrname in reffile.__dict__:
setattr(self, attrname, getattr(reffile, attrname))
self.INITIAL_INFO = reffile.INITIAL_INFO
class Directory(fs.Directory, Node, _CopyOf):
"""A Directory that you can manipulate at will
This is the opposite of auto.Directory. When you subclass this, you have
to manually add/delete/move everything.
Littles notes:
You might notice that some AlreadyExistsError are raised in this unit.
You might think "hey, fs.Directory covers all possible occurance of
AlreadyExistsError, why do you duplicate code here?" It is true that
fs.Directory takes care of all this. However, if you look at the code
after the raise (in this unit), you will see that , first, it is only in
move. And what's special about move funcs is that you can change the
name as you move. And to do this, you must delete the child from
it's former parent before you add it in it's new parent. If you don't
check for conflict *before* and there's is a conflict occuring, you're
left with a parent less child.
"""
#--- Class Attributes
cls_file_class = File
#--- Overrides
def __init__(self, parent=None, dirname=''):
if isinstance(parent, Directory):
self.__case_sensitive = parent.case_sensitive
else:
self.__case_sensitive = True
self._attrs_to_read = None
super(Directory, self).__init__(parent, dirname)
def _do_hash(self, value):
if (not self.case_sensitive) and isinstance(value, str):
return value.lower()
else:
return value
#--- Protected
def _conflict_check(self, name, node):
if name in self:
newname = self._resolve_conflict(self[name], node, name)
if newname:
return newname
else:
raise fs.AlreadyExistsError(name, self)
else:
return name
def _resolve_conflict(self, offended, offender, conflicted_name): # Virtual
"""Override this to automatically resolve a name conflict instead
of raising an AlreadyExistsError. If you return something else than
None or '', there will be a second try to add name. There is no
third try. if the result of ResolveConflict is also conflictual,
an error will be raised. You can also return a True value that is not
a string, and it will cancel the exception raise, but not make a second
try.
"""
#--- Public
def add_child(self, child, newname=None):
if child in self:
return child
if not newname:
newname = child.name
newname = self._conflict_check(newname, child)
if not isinstance(newname, str):
return child #Just don't perform the add, _resolve_conflict has taken
#care of everything
child.parent = None
child.name = newname
child.parent = self
if isinstance(child, Directory):
child.case_sensitive = self.case_sensitive
return child
def add_dir_copy(self, refdir, newname='', job=job.nulljob):
if not newname:
newname = refdir.name
result = self._create_sub_dir(newname, False)
result.copy(refdir, job)
self.add_child(result)
return result
def add_file_copy(self, reffile, newname=''):
if not newname:
newname = reffile.name
reffile._read_all_info(self._attrs_to_read)
result = self._create_sub_file(newname, False)
result.copy(reffile)
self.add_child(result)
return result
def add_path(self, path):
"""
Creates the first item of path (a tuple), and calls _AddPath in this new
directory. If the directory already exists, uses this directory.
Returns the added (or found) directory.
"""
if not path:
return self
else:
try:
founddir = self[path[0]]
if not isinstance(founddir, Directory):
raise fs.InvalidPath(founddir)
except KeyError:
founddir = self._create_sub_dir(path[0])
return founddir.add_path(path[1:])
def clean_empty_dirs(self):
for directory in self.dirs:
directory.clean_empty_dirs()
to_delete = (d for d in self.dirs if not len(d))
for directory in to_delete:
directory.delete()
def copy(self, refdir, job=job.nulljob):
super(Directory, self).copy(refdir)
filecount = refdir.filecount
dircount = refdir.dircount
if filecount > 0:
job = job.start_subjob(dircount + 1)
job.start_job(filecount)
else:
job = job.start_subjob(dircount)
for myfile in refdir.files:
self.add_file_copy(myfile)
job.add_progress()
for directory in refdir.dirs:
self.add_dir_copy(directory, '', job)
def new_directory(self, name):
return self._create_sub_dir(name)
def new_file(self, name):
return self._create_sub_file(name)
#--- Properties
@property
def case_sensitive(self):
return self.__case_sensitive
@case_sensitive.setter
def case_sensitive(self, value):
if value != self.__case_sensitive:
self.__case_sensitive = value
self._rebuild_hashes()
for subdir in self:
if isinstance(subdir, Directory):
subdir.case_sensitive = value
class AutoResolve(Directory):
#---Override
def _resolve_conflict(self, offended, offender, conflicted_name):
if offended.is_container and offender.is_container:
should_merge = self.on_should_merge(offender, offended)
if should_merge:
# There's a circular reference problem
from .fs_utils import smart_move
smart_move(offender, offended)
offender.delete()
return True
return get_conflicted_name(self, conflicted_name)
#---Events
def on_should_merge(self, source, dest):
if (self.parent is not None) and hasattr(self.parent, 'on_should_merge'):
return self.parent.on_should_merge(source, dest)
#---Properties
@property
def allconflicts(self):
return self.get_stat('conflicts', [])
@property
def conflicts(self):
return [y for y in self.files if is_conflicted(y.name)]
class AutoMerge(AutoResolve):
def on_should_merge(self, source, dest):
return True
| bsd-3-clause | -26,157,494,945,306,680 | 32.835165 | 88 | 0.591642 | false |
almarklein/imageio | imageio/plugins/npz.py | 1 | 3389 | # -*- coding: utf-8 -*-
# Copyright (c) 2014, imageio contributors
# imageio is distributed under the terms of the (new) BSD License.
""" Storage of image data in npz format. Not a great format, but at least
it supports volumetric data. And its less than 100 lines.
"""
from __future__ import absolute_import, print_function, division
import numpy as np
from imageio import formats
from imageio.core import Format
class NpzFormat(Format):
""" NPZ is a file format by numpy that provides storage of array
data using gzip compression. This imageio plugin supports data of any
shape, and also supports multiple images per file.
However, the npz format does not provide streaming; all data is
read/saved at once. Further, there is no support for meta data.
Beware that the numpy npz format has a bug on a certain combination
of Python 2.7 and numpy, which can cause the resulting files to
become unreadable on Python 3. Also, this format is not available
on Pypy.
Parameters for reading
----------------------
None
Parameters for saving
---------------------
None
"""
def _can_read(self, request):
if request.filename.lower().endswith('.npz'):
return True # We support any kind of image data
else:
return False
def _can_save(self, request):
if request.filename.lower().endswith('.npz'):
return True # We support any kind of image data
else:
return False
# -- reader
class Reader(Format.Reader):
def _open(self):
# Load npz file, which provides another file like object
self._npz = np.load(self.request.get_file())
assert isinstance(self._npz, np.lib.npyio.NpzFile)
# Get list of names, ordered by name, but smarter
sorter = lambda x: x.split('_')[-1]
self._names = sorted(self._npz.files, key=sorter)
def _close(self):
self._npz.close()
def _get_length(self):
return len(self._names)
def _get_data(self, index):
# Get data
if index < 0 or index >= len(self._names):
raise IndexError('Index out of range while reading from nzp')
im = self._npz[self._names[index]]
# Return array and empty meta data
return im, {}
def _get_meta_data(self, index):
# Get the meta data for the given index
raise RuntimeError('The npz format does not support meta data.')
# -- writer
class Writer(Format.Writer):
def _open(self):
# Npz is not such a great format. We cannot stream to the file.
# So we remember all images and write them to file at the end.
self._images = []
def _close(self):
# Write everything
np.savez_compressed(self.request.get_file(), *self._images)
def _append_data(self, im, meta):
self._images.append(im) # discart meta data
def set_meta_data(self, meta):
raise RuntimeError('The npz format does not support meta data.')
# Register
format = NpzFormat('npz', "Numpy's compressed array format", 'npz', 'iIvV')
formats.add_format(format)
| bsd-2-clause | -7,003,940,651,712,403,000 | 31.902913 | 77 | 0.590145 | false |
xelphene/swaf | swaf/wrap.py | 1 | 3089 |
import types
import logging
import inspect
import pprint
import operator
import re
from operator import isCallable
from resp import *
import swaf.error
import swaf.misc
from swaf.misc import isListOfFuncs
DEBUG=1
def wrapFilter(f, filter_name, filters):
newf = chainWrap(f)
newf.__name__ = f.__name__
newf.__module__ = f.__module__
newf = debugWrap(newf)
newf.swaf_orig = f
newf.swaf_filter_name = filter_name
newf.swaf_filters = filters
return newf
def makeWrappedNext(chain):
'''given a list of callables (chain), return a function which takes one
param (req). When this returned function is called, it will call
chain[0]( req, chain[1:] ). '''
assert isListOfFuncs(chain)
def next(req):
nextf = chain.pop(0)
return nextf(req, chain)
next.chain = chain
return next
def chainWrap(handler):
def newhandler(req, arg2):
if isListOfFuncs(arg2):
arg2 = makeWrappedNext(arg2)
return handler(req, arg2)
return newhandler
def debugWrap(f):
description = swaf.misc.describeObject(f)
pp = pprint.PrettyPrinter()
logger = logging.getLogger('swaf.wrap')
def debug(req, chain):
if DEBUG:
if not hasattr(debug,'_traceIndent'):
debug._traceIndent = 0
if len(chain)>0:
chain[0]._traceIndent = debug._traceIndent+1
indent = '. '*debug._traceIndent
cn = [fi.__name__ for fi in chain]
cn = ' -> '.join(cn)
logger.debug('%s| about to call %s' % (indent, description))
logger.debug('%s| chain=%s' % (indent, cn))
logger.debug('%s| req=%s' % (indent, repr(req) ) )
rv = f(req,chain)
logger.debug('%s| %s.%s returned with %s' % (indent, f.__module__, f.__name__, repr(rv)))
return rv
else:
return f(req, chain)
debug.__name__ = f.__name__
debug.__module__ = f.__module__
debug.swaf_orig = f
if hasattr(f,'swaf_description'):
debug.swaf_description = f.swaf_description
if hasattr(f,'swaf_route'):
debug.swaf_route = f.swaf_route
if hasattr(f,'swaf_filters'):
debug.swaf_filters = f.swaf_filters
return debug
def wrapHandler(handler, route, filters, description):
if type(handler)==types.InstanceType:
(args, varargs, varkw, defaults) = inspect.getargspec(handler.__call__)
# remove the 'self' arg from __call__
assert len(args)>0
args = args[1:]
elif type(handler)==types.MethodType:
(args, varargs, varkw, defaults) = inspect.getargspec(handler)
# remove the 'self' arg
assert len(args)>0
args = args[1:]
else:
(args, varargs, varkw, defaults) = inspect.getargspec(handler)
def callHandler(req, next):
if varkw==None:
req = swaf.misc.filterDict(req, args)
if set(req.keys()) != set(args):
raise swaf.error.LeafUnknownParamError(handler,req)
return handler(**req)
else:
return handler(**req)
callHandler.swaf_orig = handler
callHandler.swaf_route = route
callHandler.swaf_filters = filters
callHandler.swaf_description = description
callHandler = debugWrap(callHandler)
callHandler.swaf_orig = handler
callHandler.swaf_route = route
callHandler.swaf_filters = filters
callHandler.swaf_description = description
return callHandler
| gpl-3.0 | 8,237,832,620,152,168,000 | 25.177966 | 92 | 0.690838 | false |
skandavivek/tracking-cars-highway | lk_track2.py | 1 | 3415 | #!/usr/bin/env python
'''
Lucas-Kanade tracker
====================
Lucas-Kanade algorithm to track cars on a highway and save output
lk_track2.py [<video_source>]
ESC - exit
'''
from __future__ import print_function
import numpy as np
import cv2
#import video
from common import anorm2, draw_str
from time import clock
if __name__ == '__main__':
import sys
video_src='./cabrillo-1.asf'
count=0
save=np.ndarray(shape=(1, 5), dtype=np.float)
c1=np.ndarray(shape=(1, 5), dtype=np.float)
print(__doc__)
lk_params = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
feature_params = dict( maxCorners = 500,
qualityLevel = 0.3,
minDistance = 25,
blockSize = 25 )
#track_len = 50
detect_interval = 1
tracks = []
cam = cv2.VideoCapture(video_src)
frame_idx = 0
while True:
ret, frame = cam.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
vis = frame.copy()
if len(tracks) > 0:
img0, img1 = prev_gray, frame_gray
p0 = np.float32([tr[-1] for tr in tracks]).reshape(-1, 1, 2)
p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params)
p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params)
d = abs(p0-p0r).reshape(-1, 2).max(-1)
good = d < 1
new_tracks = []
for tr, (x, y), (dx, dy),good_flag in zip(tracks, p1.reshape(-1, 2), (p1-p0).reshape(-1, 2),good):
if not good_flag:
continue
if y>200 and y<350 and x>300 and 500*x-500*y<125000 and np.sqrt(dx**2+dy**2)>.1: #which part of the road to track
tr.append((x, y))
c1[:,0]=x
c1[:,1]=y
c1[:,2]=dx
c1[:,3]=dy
c1[:,4]=count
save=np.r_[save,c1]
new_tracks.append(tr)
cv2.circle(vis, (x, y), 3, (0, 0, 255), -1)
#if len(tr) > track_len:
#del tr[0]
tracks = new_tracks
cv2.polylines(vis, [np.int32(tr) for tr in tracks], False, (0, 255, 0),2)
#cv2.line(vis,(750,500),(250,0),(0,255,0),3)
nc=len(tracks)/6
draw_str(vis, (20, 20), 'track count: %d' % nc)
if frame_idx % detect_interval == 0:
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tr[-1]) for tr in tracks]:
cv2.circle(mask, (x, y), 50, 0, -1)
p = cv2.goodFeaturesToTrack(frame_gray, mask = mask, **feature_params)
if p is not None:
for x, y in np.float32(p).reshape(-1, 2):
tracks.append([(x, y)])
frame_idx += 1
prev_gray = frame_gray
cv2.imshow('lk_track', vis)
cv2.imwrite('./output-lk/'+str(count)+'.jpg', vis)
print(count)
count=count+1
ch = 0xFF & cv2.waitKey(1)
if count==int(cam.get(cv2.CAP_PROP_FRAME_COUNT))-1:
np.savetxt('cabrillo-1-lk.txt',save,fmt='%9.3f')
if ch == 27:
break
cv2.destroyAllWindows()
| apache-2.0 | -1,440,188,667,553,131,300 | 32.480392 | 119 | 0.489312 | false |
windelbouwman/ppci-mirror | ppci/cli/yacc.py | 1 | 1231 | """ Parser generator utility.
This script can generate a python script from a grammar description.
Invoke the script on a grammar specification file:
.. code::
$ ppci-yacc test.x -o test_parser.py
And use the generated parser by deriving a user class:
.. code::
import test_parser
class MyParser(test_parser.Parser):
pass
p = MyParser()
p.parse()
Alternatively you can load the parser on the fly:
.. code::
import yacc
parser_mod = yacc.load_as_module('mygrammar.x')
class MyParser(parser_mod.Parser):
pass
p = MyParser()
p.parse()
"""
import argparse
from .base import base_parser, LogSetup
from ..lang.tools.yacc import transform
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[base_parser],
)
parser.add_argument(
"source", type=argparse.FileType("r"), help="the parser specification"
)
parser.add_argument(
"-o", "--output", type=argparse.FileType("w"), required=True
)
def yacc(args=None):
args = parser.parse_args(args)
with LogSetup(args):
transform(args.source, args.output)
args.output.close()
if __name__ == "__main__":
yacc()
| bsd-2-clause | -3,877,802,558,502,337,000 | 18.854839 | 74 | 0.669374 | false |
no-net/gr-winelo | python/channel/models/const_multi_cc.py | 1 | 1510 | from gnuradio import gr
class const_multi_cc(gr.hier_block2):
""" Constant channel model.
"""
def __init__(self, tx_id, rx_id,
k11=0.0, k12=1.0, k13=1.0,
k21=1.0, k22=0.0, k23=1.0,
k31=1.0, k32=1.0, k33=0.0):
gr.hier_block2.__init__(
self, "No HW model",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
)
##################################################
# Parameters
##################################################
# Use Symmetric channels for this model
#k21 = k12
#k31 = k13
#k32 = k23
# No self-coupling
#k11 = k22 = k33 = 0
# Build the channel matrix
self.k = [[k11, k12, k13],
[k21, k22, k23],
[k31, k32, k33]]
##################################################
# Blocks
##################################################
self.multiply = gr.multiply_const_cc(self.k[tx_id - 1][rx_id - 1])
print "[INFO] WiNeLo - Channel model: Setting k = %s for clients %s "\
"and %s" % (self.k[tx_id - 1][rx_id - 1], tx_id, rx_id)
##################################################
# Connections
##################################################
self.connect((self, 0), (self.multiply, 0))
self.connect((self.multiply, 0), (self, 0))
| gpl-3.0 | -3,393,310,703,416,812,000 | 34.952381 | 78 | 0.368212 | false |
fnp/wolnelektury | src/social/migrations/0012_auto_20210120_1444.py | 1 | 1182 | # Generated by Django 2.2.16 on 2021-01-20 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social', '0011_auto_20190807_1056'),
]
operations = [
migrations.AlterModelOptions(
name='carousel',
options={'verbose_name': 'carousel', 'verbose_name_plural': 'carousels'},
),
migrations.AddField(
model_name='carousel',
name='language',
field=models.CharField(blank=True, choices=[('de', 'Deutsch'), ('en', 'English'), ('es', 'español'), ('fr', 'français'), ('it', 'italiano'), ('lt', 'lietuvių'), ('pl', 'polski'), ('ru', 'русский'), ('uk', 'українська')], default='', max_length=2, verbose_name='language'),
),
migrations.AddField(
model_name='carousel',
name='priority',
field=models.SmallIntegerField(default=0, verbose_name='priority'),
),
migrations.AlterField(
model_name='carousel',
name='slug',
field=models.SlugField(choices=[('main', 'main')], verbose_name='placement'),
),
]
| agpl-3.0 | 3,994,508,156,126,134,000 | 35.3125 | 284 | 0.557659 | false |
yeming233/rally | rally/plugins/openstack/verification/tempest/config.py | 1 | 9370 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import os
from oslo_config import cfg
import six
from six.moves import configparser
from six.moves.urllib import parse
from rally.common import logging
from rally import exceptions
from rally.verification import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class TempestConfigfileManager(object):
"""Class to create a Tempest config file."""
def __init__(self, deployment):
self.credential = deployment.get_credentials_for("openstack")["admin"]
self.clients = self.credential.clients()
self.available_services = self.clients.services().values()
self.conf = configparser.ConfigParser()
def _get_service_type_by_service_name(self, service_name):
for s_type, s_name in self.clients.services().items():
if s_name == service_name:
return s_type
def _configure_auth(self, section_name="auth"):
self.conf.set(section_name, "admin_username",
self.credential.username)
self.conf.set(section_name, "admin_password",
self.credential.password)
self.conf.set(section_name, "admin_project_name",
self.credential.tenant_name)
# Keystone v3 related parameter
self.conf.set(section_name, "admin_domain_name",
self.credential.user_domain_name or "Default")
# Sahara has two service types: 'data_processing' and 'data-processing'.
# 'data_processing' is deprecated, but it can be used in previous OpenStack
# releases. So we need to configure the 'catalog_type' option to support
# environments where 'data_processing' is used as service type for Sahara.
def _configure_data_processing(self, section_name="data-processing"):
if "sahara" in self.available_services:
self.conf.set(section_name, "catalog_type",
self._get_service_type_by_service_name("sahara"))
def _configure_identity(self, section_name="identity"):
self.conf.set(section_name, "region",
self.credential.region_name)
# discover keystone versions
def get_versions(auth_url):
from keystoneauth1 import discover
from keystoneauth1 import session
temp_session = session.Session(
verify=(self.credential.https_cacert or
not self.credential.https_insecure),
timeout=CONF.openstack_client_http_timeout)
data = discover.Discover(temp_session, auth_url).version_data()
return dict([(v["version"][0], v["url"]) for v in data])
# check the original auth_url without cropping versioning to identify
# the default version
versions = get_versions(self.credential.auth_url)
cropped_auth_url = self.clients.keystone._remove_url_version()
if cropped_auth_url == self.credential.auth_url:
# the given auth_url doesn't contain version
if set(versions.keys()) == {2, 3}:
# ok, both versions of keystone are enabled, we can take urls
# there
uri = versions[2]
uri_v3 = versions[3]
target_version = 3
elif set(versions.keys()) == {2} or set(versions.keys()) == {3}:
# only one version is available while discovering, let's just
# guess the second auth_url (it should not be used)
# get the most recent version
target_version = sorted(versions.keys())[-1]
if target_version == 2:
uri = self.credential.auth_url
uri_v3 = parse.urljoin(uri, "/v3")
else:
uri_v3 = self.credential.auth_url
uri = parse.urljoin(uri_v3, "/v2.0")
else:
# Does Keystone released new version of API ?!
LOG.debug("Discovered keystone versions: %s", versions)
raise exceptions.RallyException("Failed to discover keystone "
"auth urls.")
else:
if self.credential.auth_url.rstrip("/").endswith("v2.0"):
uri = self.credential.auth_url
uri_v3 = uri.replace("/v2.0", "/v3")
target_version = 2
else:
uri_v3 = self.credential.auth_url
uri = uri_v3.replace("/v3", "/v2.0")
target_version = 3
self.conf.set(section_name, "auth_version", "v%s" % target_version)
self.conf.set(section_name, "uri", uri)
self.conf.set(section_name, "uri_v3", uri_v3)
self.conf.set(section_name, "disable_ssl_certificate_validation",
str(self.credential.https_insecure))
self.conf.set(section_name, "ca_certificates_file",
self.credential.https_cacert)
# The compute section is configured in context class for Tempest resources.
# Options which are configured there: 'image_ref', 'image_ref_alt',
# 'flavor_ref', 'flavor_ref_alt'.
def _configure_network(self, section_name="network"):
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
public_nets = [net for net
in neutronclient.list_networks()["networks"]
if net["status"] == "ACTIVE" and
net["router:external"] is True]
if public_nets:
net_id = public_nets[0]["id"]
net_name = public_nets[0]["name"]
self.conf.set(section_name, "public_network_id", net_id)
self.conf.set(section_name, "floating_network_name", net_name)
else:
novaclient = self.clients.nova()
net_name = next(net.human_id for net in novaclient.networks.list()
if net.human_id is not None)
self.conf.set("compute", "fixed_network_name", net_name)
self.conf.set("validation", "network_for_ssh", net_name)
def _configure_network_feature_enabled(
self, section_name="network-feature-enabled"):
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
extensions = neutronclient.list_ext("extensions", "/extensions",
retrieve_all=True)
aliases = [ext["alias"] for ext in extensions["extensions"]]
aliases_str = ",".join(aliases)
self.conf.set(section_name, "api_extensions", aliases_str)
def _configure_object_storage(self, section_name="object-storage"):
self.conf.set(section_name, "operator_role",
CONF.tempest.swift_operator_role)
self.conf.set(section_name, "reseller_admin_role",
CONF.tempest.swift_reseller_admin_role)
def _configure_service_available(self, section_name="service_available"):
services = ["cinder", "glance", "heat", "ironic", "neutron", "nova",
"sahara", "swift"]
for service in services:
# Convert boolean to string because ConfigParser fails
# on attempt to get option with boolean value
self.conf.set(section_name, service,
str(service in self.available_services))
def _configure_validation(self, section_name="validation"):
if "neutron" in self.available_services:
self.conf.set(section_name, "connect_method", "floating")
else:
self.conf.set(section_name, "connect_method", "fixed")
def _configure_orchestration(self, section_name="orchestration"):
self.conf.set(section_name, "stack_owner_role",
CONF.tempest.heat_stack_owner_role)
self.conf.set(section_name, "stack_user_role",
CONF.tempest.heat_stack_user_role)
def create(self, conf_path, extra_options=None):
self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini"))
for name, method in inspect.getmembers(self, inspect.ismethod):
if name.startswith("_configure_"):
method()
if extra_options:
utils.add_extra_options(extra_options, self.conf)
with open(conf_path, "w") as configfile:
self.conf.write(configfile)
raw_conf = six.StringIO()
raw_conf.write("# Some empty values of options will be replaced while "
"creating required resources (images, flavors, etc).\n")
self.conf.write(raw_conf)
return raw_conf.getvalue()
| apache-2.0 | 5,861,679,213,484,211,000 | 43.198113 | 79 | 0.594023 | false |
robocomp/robocomp-robolab | components/localization/UWBpublisher/src/genericworker.py | 1 | 4050 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
import sys, Ice, os
from PySide2 import QtWidgets, QtCore
ROBOCOMP = ''
try:
ROBOCOMP = os.environ['ROBOCOMP']
except KeyError:
print('$ROBOCOMP environment variable not set, using the default value /opt/robocomp')
ROBOCOMP = '/opt/robocomp'
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ --all /opt/robocomp/interfaces/"
Ice.loadSlice(preStr+"CommonBehavior.ice")
import RoboCompCommonBehavior
additionalPathStr = ''
icePaths = [ '/opt/robocomp/interfaces' ]
try:
SLICE_PATH = os.environ['SLICE_PATH'].split(':')
for p in SLICE_PATH:
icePaths.append(p)
additionalPathStr += ' -I' + p + ' '
icePaths.append('/opt/robocomp/interfaces')
except:
print('SLICE_PATH environment variable was not exported. Using only the default paths')
pass
ice_UWBSimple = False
for p in icePaths:
if os.path.isfile(p+'/UWBSimple.ice'):
preStr = "-I/opt/robocomp/interfaces/ -I"+ROBOCOMP+"/interfaces/ " + additionalPathStr + " --all "+p+'/'
wholeStr = preStr+"UWBSimple.ice"
Ice.loadSlice(wholeStr)
ice_UWBSimple = True
break
if not ice_UWBSimple:
print('Couln\'t load UWBSimple')
sys.exit(-1)
from RoboCompUWB import *
class GenericWorker(QtCore.QObject):
kill = QtCore.Signal()
#Signals for State Machine
t_initialize_to_compute = QtCore.Signal()
t_initialize_to_finalize = QtCore.Signal()
t_compute_to_compute = QtCore.Signal()
t_compute_to_finalize = QtCore.Signal()
#-------------------------
def __init__(self, mprx):
super(GenericWorker, self).__init__()
self.uwbsimple_proxy = mprx["UWBSimplePub"]
self.mutex = QtCore.QMutex(QtCore.QMutex.Recursive)
self.Period = 30
self.timer = QtCore.QTimer(self)
#State Machine
self.defaultMachine= QtCore.QStateMachine()
self.compute_state = QtCore.QState(self.defaultMachine)
self.initialize_state = QtCore.QState(self.defaultMachine)
self.finalize_state = QtCore.QFinalState(self.defaultMachine)
#------------------
#Initialization State machine
self.initialize_state.addTransition(self.t_initialize_to_compute, self.compute_state)
self.initialize_state.addTransition(self.t_initialize_to_finalize, self.finalize_state)
self.compute_state.addTransition(self.t_compute_to_compute, self.compute_state)
self.compute_state.addTransition(self.t_compute_to_finalize, self.finalize_state)
self.compute_state.entered.connect(self.sm_compute)
self.initialize_state.entered.connect(self.sm_initialize)
self.finalize_state.entered.connect(self.sm_finalize)
self.timer.timeout.connect(self.t_compute_to_compute)
self.defaultMachine.setInitialState(self.initialize_state)
#------------------
#Slots funtion State Machine
@QtCore.Slot()
def sm_compute(self):
print("Error: lack sm_compute in Specificworker")
sys.exit(-1)
@QtCore.Slot()
def sm_initialize(self):
print("Error: lack sm_initialize in Specificworker")
sys.exit(-1)
@QtCore.Slot()
def sm_finalize(self):
print("Error: lack sm_finalize in Specificworker")
sys.exit(-1)
#-------------------------
@QtCore.Slot()
def killYourSelf(self):
rDebug("Killing myself")
self.kill.emit()
# \brief Change compute period
# @param per Period in ms
@QtCore.Slot(int)
def setPeriod(self, p):
print("Period changed", p)
self.Period = p
self.timer.start(self.Period)
| gpl-3.0 | -1,514,458,303,238,705,700 | 28.136691 | 106 | 0.715556 | false |
taiwenko/python | Visa.py | 1 | 3504 | #!c:\Python27\python
import vxi11Device as vxi11
#import numpy
import os,sys, time
# FSW
#['Rohde&Schwarz', 'FSW-50', '1312.8000K50/100970', '2.10\n']
#inst = rm.open_resource('TCPIP::10.0.0.160::INSTR')
#SMW200A
#['Rohde&Schwarz', 'SMW200A', '1412.0000K02/101575', '3.1.18.2-3.01.086.171_SP2\n']
#inst = rm.open_resource('TCPIP::10.0.0.225::INSTR')
# Anritsu
#['"Anritsu', 'MT8221B/31/541/542/546', '1350198', '1.77"']
#inst = rm.open_resource('TCPIP::10.0.0.189::INSTR')
#inst = vxi11.Instrument("10.0.0.189")
# Agilent Power Supply N6705B
#['Agilent Technologies', 'N6705B', 'MY50001691', 'D.01.08\n']
#inst = rm.open_resource('TCPIP::10.0.0.193::INSTR')
#inst = vxi11.Instrument("10.0.0.193")
# Agilent VSG
#['Agilent Technologies', ' E4438C', ' MY45093057', ' C.05.83\n']
#inst = vxi11.Vxi11Device("10.0.0.193","inst0")
inst = vxi11.Vxi11Device(host="10.0.0.176",device="inst0")
# R&S LTE DEMOD Software
#['Rohde&Schwarz', 'K10x', '000000/000', 'Version 3.4 Beta 2\n']
#inst = rm.open_resource('TCPIP::127.0.0.1::INSTR')
# JDSU
#inst = rm.open_resource('TCPIP::10.0.0.137::INSTR')
vxi11.timeout(15000)
#idn = inst.query_ascii_values("*IDN?",converter="s")
#print idn
#quit()
#inst.write("CONF:PRES")
res = None
try:
res = inst.ask("*IDN?")
except Exception,e:
print "FAILED %s"%e
print res
#quit()
def AnritsuMT8221B():
#inst.write("FREQuency:CENTer 2.68GHz")
inst.write("FREQuency:CENTer 2.11GHz")
inst.write("BANDWidth:RESolution 10")
time.sleep(3)
inst.write("CONF:RF SUMM")
inst.write("CONF:DEMod SUMM")
#print(inst.query(":MEAsure:DEMod:AVErage?"))
time.sleep(10)
#print(inst.query(":FETCh:SUMMary?"))
#time.sleep(1)
#inst.write("CONF:DEMod SUMM")
#time.sleep(10)
#print(inst.query(":FETCh:SUMMary?"))
#print(inst.write("INIT"))
#time.sleep(4)
#inst.query(":FETCh:RF:ACLR?")
#inst.write("DISP:TRAC:Y:RLEV:OFFS 49")
#print(inst.query(":FETCh:SUMMary?"))
#EVM (rms) in %, EVM (pk) in %,Ref Signal (RS) Power in dBm, Sync Signal (SS) Power in dBm, Carrier Frequency in MHz, Freq Error in Hz, Freq Error in ppm, the Cell ID, and the number of measurements average for Frequency Error.
print(inst.ask(":FETCh:DEMod:CONStln?"))
print(inst.ask("FETCh:RF:ACLR?"))
def RS_SW(inst):
ok = inst.write("CONF:PRES");
inst.write("CONF:LTE:DUP FDD")
inst.write("CONF:LTE:LDIR DL")
inst.write("FREQ:CENT 2.68GHZ")
inst.write("DISP:TRAC:Y:RLEV:OFFS 49")
inst.write("CONF:DL:MIMO:CONF TX2")
res = dict()
retry = 0
print "MEASURE..."
run = True
while run == True:
print(inst.write("INIT"))
#inst.write("INIT:REFR")
time.sleep(2)
retry += 1
stat = inst.query_ascii_values("SYNC:STAT?",converter="b")
print("STATUS: ",stat," Retry:", retry)
if (stat[0] == 1 & stat[1] == 1 & stat[2] == 1):
run = False
if retry > 3:
print "Cannot Obtain Sync!"
raise SystemExit
break
#for stat
#print(stat)
res['Power'] = inst.query_ascii_values("FETCh:SUMMary:OSTP?")[0]
res['EVM'] = inst.query_ascii_values("FETC:SUMM:EVM?")[0]
res['FreqError'] = inst.query_ascii_values("FETC:SUMM:FERR?")[0]
res['RSPower'] = inst.query_ascii_values("FETCh:SUMMary:RSTP?")[0]
print res
print inst.query("SENSe:LTE:ANTenna:SELect?")
print inst.query("CONFigure:LTE:DL:CC:SYNC:ANTenna?")
#print inst.query("CONF:DL:SUBF2:ALL3:PREC:AP?")
#print inst.query("TRACe:DATA?")
#print "DONE!"
raw_input()
#RS_SW(inst)
#AnritsuMT8221B()
| mit | -6,667,128,671,122,640,000 | 27.694915 | 228 | 0.640982 | false |
dmsurti/mayavi | tvtk/tools/tvtk_doc.py | 1 | 13554 | """
Utility code that provides classes helpful in choosing a suitable TVTK
class. It does this by providing a list of all the classes along with
the option to be able to search for the documentation.
The nice thing about the UI is that it performs some kind of completion
on names typed by the user, plus it allows users to search through the
TVTK class docs very easily. Once a search string is typed the
completion and available lists are modified so you can do completion of
the searched class names. If a unique enough string is typed the class
docs are shown.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008-2015, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import vtk
import types
import inspect
# Enthought library imports.
from traits.api import HasTraits, Property, List, Str, \
Instance, Button, Int
from traitsui.api import View, Group, Item, EnumEditor,\
ListEditor, TextEditor
from tvtk.api import tvtk
from tvtk.common import get_tvtk_name
################################################################################
# Utility functions.
################################################################################
def get_tvtk_class_names():
"""Returns 4 lists:
1. A list of all the TVTK class names that are not abstract.
2. A list of the TVTK sources (have only outputs and no inputs)
3. A list of the TVTK filters (both inputs and outputs)
4. A list of the TVTK sinks (only inputs and no outputs)
"""
# Shut of VTK warnings for the time being.
o = vtk.vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
all = []
src = []
filter = []
sink = []
for name in dir(vtk):
if name.startswith('vtk') and not name.startswith('vtkQt'):
klass = getattr(vtk, name)
try:
c = klass()
except (TypeError, NotImplementedError):
continue
tvtk_name = get_tvtk_name(name)
all.append(tvtk_name)
has_input = has_output = False
if hasattr(klass, 'GetNumberOfInputPorts'):
if c.GetNumberOfInputPorts() > 0:
has_input = True
if hasattr(klass, 'GetNumberOfOutputPorts'):
if c.GetNumberOfOutputPorts() > 0:
has_output = True
if has_input:
if has_output:
filter.append(tvtk_name)
else:
sink.append(tvtk_name)
elif has_output:
src.append(tvtk_name)
o.SetGlobalWarningDisplay(w)
result = (all, src, filter, sink)
for x in result:
x.sort()
return result
def get_func_doc(func, fname):
"""Returns function documentation."""
if inspect.isfunction(func):
func_obj = func
elif inspect.ismethod(func):
func_obj = func.__func__
else:
return ''
args, vargs, vkw = inspect.getargs(func_obj.__code__)
defaults = func_obj.__defaults__
doc = fname + inspect.formatargspec(args, vargs, vkw, defaults)
d = inspect.getdoc(func)
if d is not None:
doc += '\n\n' + d + '\n\n'
return doc
def get_tvtk_class_doc(obj):
"""Return's the objects documentation."""
doc = obj.__doc__ + '\nTraits:\n-------------------\n\n'
ignore = ['trait_added', 'trait_modified']
for key, trait in obj.traits().items():
if key.startswith('_') or key.endswith('_') or key in ignore:
continue
doc += '\n%s: %s'%(key, trait.help)
doc += '\nMethods:\n----------------------\n\n'
traits = obj.trait_names()
for name in dir(obj):
if name in traits or name.startswith('_'):
continue
if name.find('trait') > -1 and name != 'update_traits':
continue
func = getattr(obj, name)
if callable(func):
doc += '\n' + get_func_doc(func, name)
return doc
# GLOBALS
TVTK_CLASSES, TVTK_SOURCES, TVTK_FILTERS, TVTK_SINKS = get_tvtk_class_names()
################################################################################
# `DocSearch` class.
################################################################################
class DocSearch(object):
"""A simple class that provides a method to search through class
documentation. This code is taken from mayavi-1.x's ivtk.VtkHelp
"""
# These are class attributes to prevent regenerating them everytime
# this class is instantiated.
VTK_CLASSES = []
VTK_CLASS_DOC = []
def __init__(self):
self.vtk_classes = self.VTK_CLASSES
self.vtk_c_doc = self.VTK_CLASS_DOC
if len(self.VTK_CLASSES) == 0:
self._setup_data()
def _setup_data(self):
self.vtk_classes = [x for x in dir(vtk) if x.startswith('vtk')]
n = len(self.vtk_classes)
# Store the class docs in the list given below.
self.vtk_c_doc = ['']*n
# setup the data.
for i in range(n):
c = self.vtk_classes[i]
try:
doc = getattr(vtk, c).__doc__.lower()
self.vtk_c_doc[i] = doc
except AttributeError:
pass
def search(self, word):
""" Search for word in class documentation and return matching
classes. This is also case insensitive. The searching
supports the 'and' and 'or' keywords that allow for fairly
complex searches. A space between words assumes that the two
words appear one after the other.
Parameters
----------
word -- name to search for.
"""
assert type(word) is str, \
"Sorry, passed argument, %s is not a string."%word
if len(word.strip()) == 0:
return []
lword = word.lower().strip()
tmp_list = lword.split()
wlist = []
prev = ""
for w in tmp_list:
z = w.strip()
if z in ('and', 'or'):
if prev and prev not in ('and', 'or'):
wlist.append(prev)
wlist.append(z)
prev = z
else:
if prev and prev not in ('and', 'or'):
prev = prev + ' ' + z
else:
prev = z
if prev in ('and', 'or'):
del wlist[-1]
elif prev:
wlist.append(prev)
ret = []
i = 0
vtk_classes = self.vtk_classes
vtk_c_doc = self.vtk_c_doc
N = len(vtk_classes)
while i < N:
stored_test = 0
do_test = ''
for w in wlist:
if w == 'and':
do_test = 'and'
elif w == 'or':
do_test = 'or'
else:
test = (vtk_c_doc[i].find(w) > -1)
if do_test == 'and':
stored_test = stored_test and test
elif do_test == 'or':
stored_test = stored_test or test
elif do_test == '':
stored_test = test
if stored_test:
ret.append(vtk_classes[i])
i = i + 1
return [get_tvtk_name(x) for x in ret]
_search_help_doc = """
Help on Searching
---------------------------------------
To search for a particular TVTK class, type in the 'class_name' text entry
widget. The class names are all case sensitive. You may also select
the class from the list of available class names at the top.
As you type you will see completion options in the completions
list, the instant a complete match is found the class documentation will
be show in the bottom.
You can also search the TVTK class documentation for strings (case
insensitive). The search option supports the 'and' and 'or' keywords to
do advanced searches. Press <Enter>/<Return> to perform the search.
The top 25 hits will show up in the completions, to view a particular
hit either select the choice from the available ones or type in the
name in the 'class_name' entry box. To clear the search string click
the 'Clear search' button or erase the search string manually.
"""
################################################################################
# `TVTKClassChooser` class.
################################################################################
class TVTKClassChooser(HasTraits):
# The selected object, is None if no valid class_name was made.
object = Property
# The TVTK class name to choose.
class_name = Str('', desc='class name of TVTK class (case sensitive)')
# The string to search for in the class docs -- the search supports
# 'and' and 'or' keywords.
search = Str('', desc='string to search in TVTK class documentation '\
'supports the "and" and "or" keywords. '\
'press <Enter> to start search. '\
'This is case insensitive.')
clear_search = Button
# The class documentation.
doc = Str(_search_help_doc)
# Completions for the choice of class.
completions = List(Str)
# List of available class names as strings.
available = List(TVTK_CLASSES)
########################################
# Private traits.
finder = Instance(DocSearch)
n_completion = Int(25)
########################################
# View related traits.
view = View(Group(Item(name='class_name',
editor=EnumEditor(name='available')),
Item(name='class_name',
has_focus=True
),
Item(name='search',
editor=TextEditor(enter_set=True,
auto_set=False)
),
Item(name='clear_search',
show_label=False),
Item('_'),
Item(name='completions',
editor=ListEditor(columns=3),
style='readonly'
),
Item(name='doc',
resizable=True,
label='Documentation',
style='custom')
),
id='tvtk_doc',
resizable=True,
width=800,
height=600,
title='TVTK class chooser',
buttons = ["OK", "Cancel"]
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(TVTKClassChooser, self).__init__(**traits)
self._orig_available = list(self.available)
######################################################################
# Non-public interface.
######################################################################
def _get_object(self):
o = None
if len(self.class_name) > 0:
try:
o = getattr(tvtk, self.class_name)()
except (AttributeError, TypeError):
pass
return o
def _class_name_changed(self, value):
av = self.available
comp = [x for x in av if x.startswith(value)]
self.completions = comp[:self.n_completion]
if len(comp) == 1 and value != comp[0]:
self.class_name = comp[0]
o = self.object
if o is not None:
self.doc = get_tvtk_class_doc(o)
else:
self.doc = _search_help_doc
def _finder_default(self):
return DocSearch()
def _clear_search_fired(self):
self.search = ''
def _search_changed(self, value):
if len(value) < 3:
self.available = self._orig_available
return
f = self.finder
result = f.search(str(value))
if len(result) == 0:
self.available = self._orig_available
elif len(result) == 1:
self.class_name = result[0]
else:
self.available = result
self.completions = result[:self.n_completion]
################################################################################
# `TVTKSourceChooser` class.
################################################################################
class TVTKSourceChooser(TVTKClassChooser):
available = List(TVTK_SOURCES)
################################################################################
# `TVTKFilterChooser` class.
################################################################################
class TVTKFilterChooser(TVTKClassChooser):
available = List(TVTK_FILTERS)
################################################################################
# `TVTKSinkChooser` class.
################################################################################
class TVTKSinkChooser(TVTKClassChooser):
available = List(TVTK_SINKS)
def main():
"""Pops up a class chooser which doubles as a nice help search
documentation tool.
"""
s = TVTKClassChooser()
s.configure_traits()
if __name__ == '__main__':
main()
| bsd-3-clause | 2,175,080,541,615,401,700 | 32.384236 | 80 | 0.486941 | false |
anarcher/enso-launcher-continued | enso/commands/suggestions.py | 1 | 18589 | # Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.commands.suggestions
#
# ----------------------------------------------------------------------------
"""
Classes for encapsulating suggestions (including auto-completions).
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
import re
import enso.utils.strings
import enso.utils.xml_tools
# This is used in loop so better to import the function directly to avoid lookup penalty
from enso.utils.xml_tools import escape_xml
# ----------------------------------------------------------------------------
# Suggestion Objects
# ----------------------------------------------------------------------------
class Suggestion:
"""
An object the encapsulates a "suggestion". A "suggestion" is
essentially a string from a list that is similar to some source
string.
Suggestion objects keep track of the original source string, and
has utility methods for marking-up the suggestion to indicate
similarities to the source string (i.e., which characters of the
suggestion are the same as the original, which are added, and
which are altered).
"""
def __init__( self, originalText, suggestedText, helpText = None, prefix_end=None, start=None, end=None, suggestedPrefix=None ):
"""
Initializes the Suggestion: suggestedText is the suggestion
for originalText.
"""
assert isinstance( originalText, basestring )
assert isinstance( suggestedText, basestring )
# The "source" or "original" text is the text that the user
# typed.
self.__source = originalText
# The "suggestion" is the text that very nearly matches
# the user's typed text.
self.__suggestion = suggestedText
self.__suggestedPrefix = suggestedPrefix
# The "help" text is text that is not actually part of the
# suggestion, per-se, but should be displayed after the
# suggestion to indicate that something should follow the
# suggestion before it is complete and valid.
self.__helpText = helpText
# The xml representation of this suggestion; will not be
# created until requested.
self.__xml = None
# The completion of the user text to the next word.
#self.__completion = None
self.__prefix_end = prefix_end
self.__start = start
self.__end = end
# For performance reasons, compute the "nearness" value
# and cache it.
self._nearness = self.__getNearness()
def getHelpText( self ):
return self.__helpText
"""
TODO:This is broken because the __transform() function has been optimized
and is not setting __completion variable.
It is not used anywhere in the code anyway...
def toNextWord( self ):
""
Returns the simple string representation of the suggestion, i.e.,
the exact suggested text.
Example:
>>> s = Suggestion( 'fo', 'foo bar' )
>>> s.toNextWord()
'foo '
""
if self.__completion is None:
self.__transform()
return self.__completion
"""
def toText( self ):
"""
Returns the simple string representation of the suggestion, i.e.,
the exact suggested text.
Example:
>>> s = Suggestion( 'fo', 'foo' )
>>> s.toText()
'foo'
"""
return self.__suggestion
def getSource( self ):
"""
Returns the "source" string, i.e., the string for which this
object is a suggestion.
Example:
>>> s = Suggestion( 'fo', 'foo' )
>>> s.getSource()
'fo'
"""
return self.__source
def getSuggestedPrefix( self ):
"""
"""
return self.__suggestedPrefix
def __getNearness( self ):
"""
Returns a number between 0 and 1 indicating how near the
original string this suggestion is; 0 means totally different,
and 1 means exactly the same.
NOTE: As long as the return value remains as described,
this method may be overridden to implement custom notions of
"nearness".
"""
result = enso.utils.strings.stringRatio( self.__source,
self.__suggestion )
assert (result >= 0) and (result <= 1),\
"string-ratio is not between 0 and 1: %0.1f" % result
return result
def __eq__( self, other ):
"""
Considers two suggestions to be equal if they suggest the same
string.
"""
if not isinstance( other, Suggestion ):
# The other object isn't a Suggestion, so they can't
# possibly be equal.
return False
else:
return self.toText() == other.toText()
def __ne__( self, other ):
"""
Considers two suggestions to be unequal if they do not suggest the
same text.
"""
# Simply return the inverse of __eq__
return not self.__eq__( other )
def __cmp__( self, other ):
"""
Compares two suggestions on the basis of nearness.
"""
# NOTE: This function is called SO OFTEN, that using getter's
# for the nearness values incurs a NOTICEABLE performance
# penalty.
# Returning the inverse of the value, because 1 is near and 0
# is far.
# Original:
#return - cmp( self._nearness, other._nearness )
if self._nearness < other._nearness: #IGNORE:W0212
return 1
elif self._nearness > other._nearness: #IGNORE:W0212
return -1
else:
# If the nearness is equal, return alphabetical order
return cmp(self.__suggestion, other.__suggestion) #IGNORE:W0212
def toXml( self ):
"""
Transforms the suggestion into a simple XML string. There are
three tags:
<ins></ins> marks an "insertion", i.e., something added to
the original text to make the suggestion.
<alt></alt> marks an "alteration", i.e., a substring of the
original string was replaced with a new substring to make
the suggestion.
<help></help> marks a "help" text, i.e., a string that
indicates the suggestion should be followed by some
additional text; this string is for the user's aid, and is
not part of the suggestion proper.
Anything not contained in these tags was part of the original
text.
NOTE: The return value does not have a "root" tag, and is
therefore not well-formed XML.
Here is a simple example using insertions and help text:
>>> Suggestion( 'fo', 'foo', 'fooObject' ).toXml()
'fo<ins>o</ins><help>fooObject</help>'
Here is a simple example using alterations:
>>> Suggestion( 'fog', 'foo' ).toXml()
'fo<alt>o</alt>'
The default implementation finds the the largest substring of
the original text that (a) includes the first character of the
original text and (b) is entirely contained in the suggestion.
It then repeats this with the remainder of the original text.
So, for instance, if our original text is 'foobar' and our
suggestion text is 'foo the bar', the default implementation
will first match 'foo' to part of the suggestion; at this
point the remainder of the original text will be 'bar', which
it will find a substring for in the suggestion text as well.
This is shown in the following example:
>>> Suggestion( 'foobar', 'foo the bar' ).toXml()
'foo<ins> the </ins>bar'
Furthermore, if there is no initial substring of the original
text in the suggestion text (i.e., condition 'a' from above) ,
the first character is removed from the original text and the
algorithm proceeds as described above, marking a corresponding
part of the suggestion string as an alteration, if
applicable:
>>> Suggestion( 'zzzfo', 'gfoo' ).toXml()
'<alt>g</alt>fo<ins>o</ins>'
>>> Suggestion( 'zzzfo', 'foo' ).toXml()
'fo<ins>o</ins>'
Finally, if no substring of the original text matches the
suggestion text, the entire suggestion text is returned as an
alteration:
>>> Suggestion( 'zzz', 'defghi' ).toXml()
'<alt>defghi</alt>'
NOTE: This method is intended to be overriden by subclasses
that have specialized ways of determining what was original
and what was inserted or altered.
"""
# This class is read-only; the only "setters" are through the
# constructor. If we have previously computed the xml value,
# return that cached value.
if self.__xml == None:
self.__transform()
return self.__xml
def __transform( self ):
if self.__start is not None:
#s = escape_xml(self.__suggestion)
xmlText = "%s<ins>%s</ins>%s<ins>%s</ins>" % (
escape_xml(self.__suggestion[:self.__prefix_end]),
escape_xml(self.__suggestion[self.__prefix_end:self.__prefix_end+self.__start]),
escape_xml(self.__suggestion[self.__prefix_end+self.__start:self.__prefix_end+self.__end]),
escape_xml(self.__suggestion[self.__prefix_end+self.__end:])
)
if self.__suggestedPrefix and xmlText.startswith(self.__suggestedPrefix):
xmlText = "<prefix>%s</prefix>%s" % (escape_xml(self.__suggestedPrefix), xmlText[len(self.__suggestedPrefix):])
# Finally, add help text, if it exists.
if self.__helpText is not None:
xmlText = "%s<help>%s</help>" % (xmlText, escape_xml(self.__helpText))
self.__xml = xmlText
return
else:
pass
# We are going to "use up" both the source string and the
# suggestion
unusedSource = self.__source[:]
unusedSuggestion = self.__suggestion[:]
# The xml representation
xmlText = ""
# The "to the next word" completion.
completion = ""
# If we cannot match an initial substring of unusedSource,
# then we are going to peel off characters one-by-one into
# this variable. These characters have been lost in the
# suggestion, and will cause "insertions" to instead be
# "alterations".
unmatchedChars = ""
# BEGIN SOURCE-STRING LOOP
# Each iteration of this loop should reduce the length of
# unusedSource, and this loop ends when unusedSource is empty.
while len(unusedSource) > 0:
# Save a copy of unusedSource, so we know if it changes.
oldUnusedSource = unusedSource[:]
# Loop from the full length of unusedSource down to one
# character
for i in range( len(unusedSource), 0, -1 ):
# The initial substring we are trying to locate.
target = unusedSource[:i]
# BEGIN TARGET-FOUND CONDITION
if target in unusedSuggestion:
# Search normally from begining
index = unusedSuggestion.find( target )
# Search on word boundaries. This is different from \b in
# that it considers also the underscore character as a word boundary.
m = re.match(r".*[^0-9a-zA-Z](%s)" % re.escape(target), unusedSuggestion, re.I)
if m and m.groups() and m.start(1) > index:
# Prefer word boundary match
index = m.start(1)
# index, m.start(1)
if index > 0:
if len(unmatchedChars) > 0:
# There were unused characters in the
# source, and there were characters in the
# unused suggestion before the target, so
# the next "inserted" portion of the
# suggestion becomes an "alteration"
# instead.
xmlFormat = "<alt>%s</alt>"
else:
xmlFormat = "<ins>%s</ins>"
xmlText += xmlFormat % escape_xml(
unusedSuggestion[:index]
)
# NOTE: Do not add inserted characters to the
# 'next word' completion.
# Whether or not there were characters between
# the start of the unused suggestion and "here",
# any unmatched chars are now defunct.
unmatchedChars = ""
xmlText += escape_xml( target )
completion += target
unusedSuggestion = unusedSuggestion[index+len(target):]
unusedSource = unusedSource[i:]
# The target was found and unusedSource was
# modified; we exit the for-loop (to be entered
# again if unusedSource is still nonempty).
break
# END TARGET-FOUND CONDITION
# Either unusedSource is smaller, or it is the same as
# oldUnusedSource. If it is the same as old unusedSource,
# then there was no match of a beginning substring, so we
# remove the first character and store it as an "unused
# character", which will become part of an "altered
# substring", if there is a match to a later substring.
if unusedSource == oldUnusedSource:
unmatchedChars += unusedSource[0]
unusedSource = unusedSource[1:]
assert len( unusedSource ) < len( oldUnusedSource ), \
"Potential infinite loop condition; failed to reduce"\
" the length of the unused portion of the source string"\
" in toXml()"
# END SOURCE-STRING LOOP
# The source-string loop above only guarantees to use up the
# source string; there may be an unused portion of the
# suggestion left. We append it to the xml string as an
# insertion (or alteration, if appropriate).
if len( unusedSuggestion ) > 0:
if len( unmatchedChars ) > 0:
format = "<alt>%s</alt>"
else:
format = "<ins>%s</ins>"
unusedXml = escape_xml( unusedSuggestion )
xmlText += format % unusedXml
completion += unusedSuggestion.split(" ")[0]
if " " in unusedSuggestion:
completion += " "
# Finally, add the help text, if it exists.
if self.__helpText != None:
xmlText += "<help>%s</help>" % self.__helpText
if self.__suggestedPrefix and xmlText.startswith(self.__suggestedPrefix):
xmlText = "<prefix>%s</prefix>%s" % (escape_xml(self.__suggestedPrefix), xmlText[len(self.__suggestedPrefix):])
self.__xml = xmlText
#print "COMPLETION: \"%s\"" % completion
#self.__completion = completion
class AutoCompletion( Suggestion ):
"""
Encapsulates a single auto-completed suggestion.
Basically the same as a suggestion, except that it requires either
(1) that each word of the original text be contained in the
suggestion, or (2) that the suggestion be empty (indicating a
failed autocompletion).
"""
def __init__( self, originalText, suggestedText, helpText=None, prefix_end=None, start=None, end=None ):
"""
Initializes the AutoCompletion.
"""
# Enforce the object's preconditions.
if len( suggestedText ) > 0:
assertionText = "Attempted to create AutoCompletion %s from %s, "\
"but %s was not found."
words = originalText.split( " " )
# LONGTERM TODO: Don't handle this as a special case.
if words[-1].endswith( "?" ):
words[-1] = words[-1][:-1]
words.append( "?" )
for word in words:
assert word in suggestedText, \
assertionText % ( suggestedText, originalText, word)
# The text matches one of the class's two required conditions,
# so initialize self as a Suggestion.
Suggestion.__init__( self, originalText, suggestedText, helpText, prefix_end, start, end )
def hasCompletion(self):
return bool(self.toText())
| bsd-3-clause | -5,472,798,759,257,362,000 | 37.97065 | 132 | 0.571467 | false |
ffmmjj/desafio-dados-2016 | data_preparation_pipeline/outliers_separation.py | 1 | 1185 | import luigi
import pandas as pd
from augment_data import AppendFeaturesAggregatedFromTeachersDatasetToSchool
class SplitSchoolOutliersData(luigi.Task):
input_task = AppendFeaturesAggregatedFromTeachersDatasetToSchool()
def requires(self):
return self.input_task
def output(self):
return {'average': luigi.LocalTarget('./dados/2013/TS_ESCOLA_average.csv'),
'outstanding': luigi.LocalTarget('./dados/2013/TS_ESCOLA_outstanding.csv')}
def run(self):
with self.input_task.output().open('r') as fp:
escolas_pd = pd.read_csv(fp)
escolas_statistics = escolas_pd['MEDIA_9EF_MT'].describe()
math_avg, math_std = escolas_statistics.values[1], escolas_statistics.values[2]
above_two_std_schools_indices = escolas_pd['MEDIA_9EF_MT'] > (math_avg + 2*math_std)
below_two_std_schools_indices = escolas_pd['MEDIA_9EF_MT'] < (math_avg + 2*math_std)
with self.output()['average'].open('w') as fp:
escolas_pd[below_two_std_schools_indices].to_csv(fp)
with self.output()['outstanding'].open('w') as fp:
escolas_pd[above_two_std_schools_indices].to_csv(fp) | apache-2.0 | -158,811,922,506,397,440 | 38.533333 | 92 | 0.665823 | false |
wehlutyk/Watson | scripts/fuzzer.py | 1 | 1043 | import arrow
import random
from watson import Watson
watson = Watson(frames=None, current=None)
projects = [
("apollo11", ["reactor", "module", "wheels", "steering", "brakes"]),
("hubble", ["lens", "camera", "transmission"]),
("voyager1", ["probe", "generators", "sensors", "antenna"]),
("voyager2", ["probe", "generators", "sensors", "antenna"]),
]
now = arrow.now()
for date in arrow.Arrow.range('day', now.replace(months=-1), now):
if date.weekday() in (5, 6):
# Weekend \o/
continue
start = date.replace(
hour=9, minute=random.randint(0, 59), seconds=random.randint(0, 59)
)
while start.hour < random.randint(16, 19):
project, tags = random.choice(projects)
frame = watson.frames.add(
project,
start,
start.replace(seconds=random.randint(60, 4 * 60 * 60)),
tags=random.sample(tags, random.randint(0, len(tags)))
)
start = frame.stop.replace(seconds=random.randint(0, 1 * 60 * 60))
watson.save()
| mit | 4,692,943,791,232,215,000 | 27.972222 | 75 | 0.591563 | false |
nedbat/zellij | tests/test_intersection.py | 1 | 1510 | from hypothesis import assume, given
from hypothesis.strategies import builds, lists, integers, tuples
from zellij.defuzz import Defuzzer
from zellij.euclid import collinear, Segment, BadGeometry
from zellij.intersection import segment_intersections
from zellij.postulates import all_pairs
nums = integers(min_value=-1000, max_value=1000)
points = tuples(nums, nums)
segments = builds(lambda l: Segment(*l), lists(points, min_size=2, max_size=2, unique=True))
@given(lists(segments, min_size=2, max_size=100, unique=True))
def test_intersections(segments):
defuzz = Defuzzer().defuzz
# Check that none of our segment pairs are pathological, and collect the
# true answers the hard way, by checking pair-wise.
true = set()
for s1, s2 in all_pairs(segments):
try:
ipt = s1.intersect(s2)
if ipt is not None:
true.add(defuzz(ipt))
except BadGeometry:
# If two segments don't have an answer, then don't use this test
# case.
assume(False)
# Run the actual function we care about.
isects = segment_intersections(segments)
for pt, segs in isects.items():
# Property: the answer should be in the true answers we found the hard
# way.
assert defuzz(pt) in true
# Property: every intersection should be collinear with the segment it
# claims to be part of.
for seg in segs:
s1, s2 = seg
assert collinear(s1, pt, s2)
| apache-2.0 | 2,406,871,413,739,147,000 | 34.116279 | 92 | 0.662252 | false |
riscmaster/risc_maap | risc_control/src/circles3_traj.py | 2 | 3789 | #!/usr/bin/env python
'''======================================================
Created by: D. Spencer Maughan
Last updated: July 2015
File name: circles3_traj.py
Organization: RISC Lab, Utah State University
======================================================'''
import roslib; roslib.load_manifest('ardrone_tutorials')
roslib.load_manifest('risc_msgs')
import rospy
from math import *
import numpy as np
import time
#=======================#
# Messages Needed #
#=======================#
from risc_msgs.msg import *
#========================#
# Globals #
#========================#
start_time = 0
pub = rospy.Publisher('trajectory',Trajectories,queue_size = 200)
# Trajectory Variables
period = 8 # seconds
a = 0
b = 0
c = 0
n = 1
w1 = 2*np.pi/period
#====================================#
# Update and Publish Trajectory #
#====================================#
def Datahandler():
global start_time, pub, period, a, b, c, n, w1
time_now = rospy.get_time()
t = time_now-start_time
WP = Trajectories()
num_traj = 3 # number of trajectories
WP.Obj = [Trajectory()]*num_traj
d = 0.5 #Distance from origin
#=================#
# Trajectory #
#=================#
traj1 = Trajectory()
# Position
traj1.x = d*cos(0*np.pi/num_traj)+a*cos(w1*t)
traj1.y = d*sin(0*np.pi/num_traj)+b*sin(w1*t)
traj1.z = n+c*sin(w1*t)
traj1.psi = w1*t
# Velocity
traj1.xdot = -a*w1*sin(w1*t)
traj1.ydot = b*w1*cos(w1*t)
traj1.zdot = c*w1*cos(w1*t)
traj1.psidot = w1
# Acceleration
traj1.xddot = -a*w1*w1*cos(w1*t)
traj1.yddot = -b*w1*w1*sin(w1*t)
traj1.zddot = -c*w1*w1*sin(w1*t)
traj1.psiddot = 0
traj2 = Trajectory()
# Position
traj2.x = d*cos(2*1*np.pi/num_traj)+a*cos(w1*t+period/num_traj)
traj2.y = d*sin(2*1*np.pi/num_traj)+b*sin(w1*t+period/num_traj)
traj2.z = n+c*sin(w1*t+period/num_traj)
traj2.psi = w1*t+period/num_traj
# Velocity
traj2.xdot = -a*w1*sin(w1*t+period/2)
traj2.ydot = b*w1*cos(w1*t+period/2)
traj2.zdot = c*w1*cos(w1*t+period/2)
traj2.psidot = w1
# Acceleration
traj2.xddot = -a*w1*w1*cos(w1*t+period/2)
traj2.yddot = -b*w1*w1*sin(w1*t+period/2)
traj2.zddot = -c*w1*w1*sin(w1*t+period/2)
traj2.psiddot = 0
traj3 = Trajectory()
# Position
traj3.x = d*cos(2*2*np.pi/num_traj)+a*cos(w1*t+2*period/num_traj)
traj3.y = d*sin(2*2*np.pi/num_traj)+b*sin(w1*t+2*period/num_traj)
traj3.z = n+c*sin(w1*t+2*period/num_traj)
traj3.psi = w1*t+2*period/num_traj
# Velocity
traj3.xdot = -a*w1*sin(w1*t+period/2)
traj3.ydot = b*w1*cos(w1*t+period/2)
traj3.zdot = c*w1*cos(w1*t+period/2)
traj3.psidot = w1
# Acceleration
traj3.xddot = -a*w1*w1*cos(w1*t+period/2)
traj3.yddot = -b*w1*w1*sin(w1*t+period/2)
traj3.zddot = -c*w1*w1*sin(w1*t+period/2)
traj3.psiddot = 0
#==================#
# Publish #
#==================#
WP.Obj = [traj1, traj2, traj3]
pub.publish(WP)
#===================#
# Main #
#===================#
if __name__=='__main__':
rospy.init_node('circles_traj')
start_time = rospy.get_time()
#=====================================#
# Set up Publish/Subscribe Loop #
#=====================================#
r = rospy.Rate(200)
while not rospy.is_shutdown():
Datahandler()
r.sleep()
rospy.loginfo("Trajectory Node Has Shutdown.")
rospy.signal_shutdown(0)
| bsd-2-clause | -1,619,490,676,580,143,600 | 27.704545 | 75 | 0.484033 | false |
jeremykid/Algorithm_project | trial_division.py | 1 | 1193 | import math
import time
def primeGenerate(number):
largest = number
prime_list = largest*[1]
if (number<4):
return [2,3]
prime_list[1] = 0
for i in range(0,largest,2):
prime_list[i] = 0
prime_list[2] = 1
for i in range(3,largest,2):
if (prime_list[i] == 1):
for j in range(2*i,largest,i):
prime_list[j] == 1
result = []
# print (prime_list,number)
for i in range(0,number):
if(prime_list[i] == 1):
result.append(i)
return result
def trial_division(n):
"""Return a list of the prime factors for a natural number."""
if n < 2:
return []
prime_factors = []
for p in primeGenerate(int(n**0.5) + 1):
if p*p > n:
break
while n % p == 0:
prime_factors.append(p)
n //= p
if n > 1:
prime_factors.append(n)
return prime_factors
def runner():
testcases = int(input("How many Testcases: "))
for i in range(testcases):
timeA = time.time()
number = int(input("number:"))
print trial_division(number)
timeB = time.time()
print (timeB - timeA)
| mit | 765,561,132,604,464,000 | 22.392157 | 66 | 0.524728 | false |
sgibbes/carbon-budget | gain/utilities.py | 1 | 4640 | import subprocess
import glob
import sys
sys.path.append('../')
import constants_and_names as cn
def s3_folder_download(source, dest):
cmd = ['aws', 's3', 'cp', source, dest, '--recursive']
subprocess.check_call(cmd)
def s3_file_download(source, dest):
cmd = ['aws', 's3', 'cp', source, dest]
subprocess.check_call(cmd)
# Lists the tiles in a folder in s3
def tile_list(source):
## For an s3 folder in a bucket using AWSCLI
# Captures the list of the files in the folder
out = subprocess.Popen(['aws', 's3', 'ls', source], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
# Writes the output string to a text file for easier interpretation
biomass_tiles = open("biomass_tiles.txt", "w")
biomass_tiles.write(stdout)
biomass_tiles.close()
file_list = []
# Iterates through the text file to get the names of the tiles and appends them to list
with open("biomass_tiles.txt", 'r') as tile:
for line in tile:
num = len(line.strip('\n').split(" "))
tile_name = line.strip('\n').split(" ")[num - 1]
# Only tifs will be in the tile list
if '.tif' in tile_name:
# For stripping down standard tree biomass tiles to the tile id
if '_biomass.tif' in tile_name:
tile_short_name = tile_name.replace('_biomass.tif', '')
file_list.append(tile_short_name)
# For stripping down mangrove biomass tiles to the tile id
if cn.pattern_mangrove_biomass_2000 in tile_name:
tile_short_name = tile_name.replace('{}_'.format(cn.pattern_mangrove_biomass_2000), '')
tile_short_name = tile_short_name.replace('.tif', '')
file_list.append(tile_short_name)
file_list = file_list[0:]
return file_list
# Gets the bounding coordinates of a tile
def coords(tile_id):
NS = tile_id.split("_")[0][-1:]
EW = tile_id.split("_")[1][-1:]
if NS == 'S':
ymax =-1*int(tile_id.split("_")[0][:2])
else:
ymax = int(str(tile_id.split("_")[0][:2]))
if EW == 'W':
xmin = -1*int(str(tile_id.split("_")[1][:3]))
else:
xmin = int(str(tile_id.split("_")[1][:3]))
ymin = str(int(ymax) - 10)
xmax = str(int(xmin) + 10)
return ymax, xmin, ymin, xmax
# Rasterizes the shapefile within the bounding coordinates of a tile
def rasterize(in_shape, out_tif, xmin, ymin, xmax, ymax, tr=None, ot=None, gainEcoCon=None, anodata=None):
cmd = ['gdal_rasterize', '-co', 'COMPRESS=LZW',
# Input raster is ingested as 1024x1024 pixel tiles (rather than the default of 1 pixel wide strips
'-co', 'TILED=YES', '-co', 'BLOCKXSIZE=1024', '-co', 'BLOCKYSIZE=1024',
'-te', str(xmin), str(ymin), str(xmax), str(ymax),
'-tr', tr, tr, '-ot', ot, '-a', gainEcoCon, '-a_nodata',
anodata, in_shape, '{}.tif'.format(out_tif)]
subprocess.check_call(cmd)
return out_tif
# Uploads tile to specified location
def upload_final(upload_dir, tile_id, pattern):
file = '{}_{}.tif'.format(tile_id, pattern)
print "Uploading {}".format(file)
cmd = ['aws', 's3', 'cp', file, upload_dir]
try:
subprocess.check_call(cmd)
except:
print "Error uploading output tile"
##### Not currently using the below functions
def wgetloss(tile_id):
print "download hansen loss tile"
cmd = ['wget', r'http://glad.geog.umd.edu/Potapov/GFW_2015/tiles/{}.tif'.format(tile_id)]
subprocess.check_call(cmd)
def wget2015data(tile_id, filetype):
outfile = '{0}_{1}_h.tif'.format(tile_id, filetype)
website = 'https://storage.googleapis.com/earthenginepartners-hansen/GFC-2015-v1.3/Hansen_GFC-2015-v1.3_{0}_{1}.tif'.format(filetype, tile_id)
cmd = ['wget', website, '-O', outfile]
print cmd
subprocess.check_call(cmd)
return outfile
def rasterize_shapefile(xmin, ymax, xmax, ymin, shapefile, output_tif, attribute_field):
layer = shapefile.replace(".shp", "")
# attribute_field = 'old_100'
cmd= ['gdal_rasterize', '-te', str(xmin), str(ymin), str(xmax), str(ymax), '-a', attribute_field, '-co', 'COMPRESS=LZW', '-tr', '.00025', '.00025', '-tap', '-a_nodata', '0', '-l', layer, shapefile, output_tif]
subprocess.check_call(cmd)
return output_tif
def resample_00025(input_tif, resampled_tif):
# resample to .00025
cmd = ['gdal_translate', input_tif, resampled_tif, '-tr', '.00025', '.00025', '-co', 'COMPRESS=LZW']
subprocess.check_call(cmd)
| apache-2.0 | 3,722,346,610,201,929,000 | 31.222222 | 213 | 0.603448 | false |
dwhoman/CVPI | tests/hypothesis/testing_hypothesis.py | 1 | 2209 | import subprocess
import numpy as np
import hypothesis as h
import hypothesis.strategies as st
import hypothesis.extra.numpy as hnp
#h.settings(buffer_size = 819200000)
min_img_width = 1
min_img_height = 1
max_img_width = 10
max_img_height = 10
max_uint32 = 2**32 - 1
max_int32 = 2**31 - 1
min_int32 = -(2**31)
max_short = 2**15 - 1
min_short = -(2**15)
def thirty2to8s(np_num):
return [int(i) for i in int(np_num).to_bytes(4, byteorder='big', signed=False)]
def twoDto3d(np_array):
return np.array([[[z for z in thirty2to8s(y)] for y in x] for x in np_array], dtype=np.uint64)
def image_hex(np_array):
return ''.join(["%02x" % (x) for x in np_array.flatten('C')])
@st.composite
def np_images(draw,
number,
width=st.integers(min_img_width, max_img_width).example(),
height=st.integers(min_img_height, max_img_height).example()):
return draw(st.lists(hnp.arrays(np.uint32, (width,height),
elements=st.integers(0,max_uint32)),
min_size=number, max_size=number))
@h.given(np_images(2),
st.integers(1, 5),
st.integers(1, 5),
st.floats(1.0, 1.0),
st.floats(0, 0))
def test_add_images(images, a, b, scale, bias):
assert len(images) == 2
assert images[0].shape == images[1].shape
image_1 = twoDto3d(images[0])
image_2 = twoDto3d(images[1])
image_sum = np.clip(np.ceil(scale * (a * image_1 + b * image_2) + bias), 0, 255)
compl_proc = subprocess.check_output([
"./cvpi_tests_hyp",
"cvpi_image_add",
image_hex(image_1),
image_hex(image_2),
str(images[0].shape[0]),
str(images[0].shape[1]),
str(a), str(b), format(scale, 'f'), format(bias, 'f')])
compl_proc_str = ''.join(map(chr, compl_proc))
numpy_image_str = image_hex(image_sum) + "\n"
h.note(str(images[0].shape[0]) + " " + str(images[0].shape[1]))
h.note(image_hex(image_1))
h.note(image_hex(image_2))
h.note("cvpi: " + compl_proc_str)
h.note("numpy: " + numpy_image_str)
assert numpy_image_str == compl_proc_str
if __name__ == '__main__':
test_add_images()
| apache-2.0 | -1,831,955,842,194,675,200 | 29.680556 | 98 | 0.583975 | false |
theanalyst/cinder | cinder/db/sqlalchemy/api.py | 1 | 94793 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import sys
import threading
import uuid
import warnings
from oslo.config import cfg
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
from sqlalchemy.orm import joinedload, joinedload_all
from sqlalchemy.orm import RelationshipProperty
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql import func
from cinder.common import sqlalchemyutils
from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.openstack.common.db import exception as db_exc
from cinder.openstack.common.db import options
from cinder.openstack.common.db.sqlalchemy import session as db_session
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder.openstack.common import uuidutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
options.set_defaults(sql_connection='sqlite:///$state_path/cinder.sqlite',
sqlite_db='cinder.sqlite')
_LOCK = threading.Lock()
_FACADE = None
def _create_facade_lazily():
global _LOCK
with _LOCK:
global _FACADE
if _FACADE is None:
_FACADE = db_session.EngineFacade(
CONF.database.connection,
**dict(CONF.database.iteritems())
)
return _FACADE
def get_engine():
facade = _create_facade_lazily()
return facade.get_engine()
def get_session(**kwargs):
facade = _create_facade_lazily()
return facade.get_session(**kwargs)
_DEFAULT_QUOTA_NAME = 'default'
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.NotAuthorized()
elif context.project_id != project_id:
raise exception.NotAuthorized()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.NotAuthorized()
elif context.user_id != user_id:
raise exception.NotAuthorized()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.NotAuthorized()
elif context.quota_class != class_name:
raise exception.NotAuthorized()
def require_admin_context(f):
"""Decorator to require admin request context.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
def require_volume_exists(f):
"""Decorator to require the specified volume to exist.
Requires the wrapped function to use context and volume_id as
their first two arguments.
"""
def wrapper(context, volume_id, *args, **kwargs):
volume_get(context, volume_id)
return f(context, volume_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def require_snapshot_exists(f):
"""Decorator to require the specified snapshot to exist.
Requires the wrapped function to use context and snapshot_id as
their first two arguments.
"""
def wrapper(context, snapshot_id, *args, **kwargs):
snapshot_get(context, snapshot_id)
return f(context, snapshot_id, *args, **kwargs)
wrapper.__name__ = f.__name__
return wrapper
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _sync_volumes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(volumes, gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'volumes'
if volume_type_name:
key += '_' + volume_type_name
return {key: volumes}
def _sync_snapshots(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(snapshots, gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'snapshots'
if volume_type_name:
key += '_' + volume_type_name
return {key: snapshots}
def _sync_gigabytes(context, project_id, session, volume_type_id=None,
volume_type_name=None):
(_junk, vol_gigs) = _volume_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
key = 'gigabytes'
if volume_type_name:
key += '_' + volume_type_name
if CONF.no_snapshot_gb_quota:
return {key: vol_gigs}
(_junk, snap_gigs) = _snapshot_data_get_for_project(
context, project_id, volume_type_id=volume_type_id, session=session)
return {key: vol_gigs + snap_gigs}
QUOTA_SYNC_FUNCTIONS = {
'_sync_volumes': _sync_volumes,
'_sync_snapshots': _sync_snapshots,
'_sync_gigabytes': _sync_gigabytes,
}
###################
@require_admin_context
def service_destroy(context, service_id):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
service_ref.delete(session=session)
@require_admin_context
def _service_get(context, service_id, session=None):
result = model_query(
context,
models.Service,
session=session).\
filter_by(id=service_id).\
first()
if not result:
raise exception.ServiceNotFound(service_id=service_id)
return result
@require_admin_context
def service_get(context, service_id):
return _service_get(context, service_id)
@require_admin_context
def service_get_all(context, disabled=None):
query = model_query(context, models.Service)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_all_by_topic(context, topic, disabled=None):
query = model_query(
context, models.Service, read_deleted="no").\
filter_by(topic=topic)
if disabled is not None:
query = query.filter_by(disabled=disabled)
return query.all()
@require_admin_context
def service_get_by_host_and_topic(context, host, topic):
result = model_query(
context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
filter_by(host=host).\
filter_by(topic=topic).\
first()
if not result:
raise exception.ServiceNotFound(service_id=None)
return result
@require_admin_context
def service_get_all_by_host(context, host):
return model_query(
context, models.Service, read_deleted="no").\
filter_by(host=host).\
all()
@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return model_query(context, models.Service,
func.coalesce(sort_value, 0),
session=session, read_deleted="no").\
filter_by(topic=topic).\
filter_by(disabled=False).\
outerjoin((subq, models.Service.host == subq.c.host)).\
order_by(sort_value).\
all()
@require_admin_context
def service_get_all_volume_sorted(context):
session = get_session()
with session.begin():
topic = CONF.volume_topic
label = 'volume_gigabytes'
subq = model_query(context, models.Volume.host,
func.sum(models.Volume.size).label(label),
session=session, read_deleted="no").\
group_by(models.Volume.host).\
subquery()
return _service_get_all_topic_subquery(context,
session,
topic,
subq,
label)
@require_admin_context
def service_get_by_args(context, host, binary):
result = model_query(context, models.Service).\
filter_by(host=host).\
filter_by(binary=binary).\
first()
if not result:
raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@require_admin_context
def service_create(context, values):
service_ref = models.Service()
service_ref.update(values)
if not CONF.enable_new_services:
service_ref.disabled = True
session = get_session()
with session.begin():
service_ref.save(session)
return service_ref
@require_admin_context
def service_update(context, service_id, values):
session = get_session()
with session.begin():
service_ref = _service_get(context, service_id, session=session)
service_ref.update(values)
###################
def _metadata_refs(metadata_dict, meta_class):
metadata_refs = []
if metadata_dict:
for k, v in metadata_dict.iteritems():
metadata_ref = meta_class()
metadata_ref['key'] = k
metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
return metadata_refs
def _dict_with_extra_specs(inst_type_query):
"""Convert type query result to dict with extra_spec and rate_limit.
Takes a volume type query returned by sqlalchemy and returns it
as a dictionary, converting the extra_specs entry from a list
of dicts:
'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
to a single dict:
'extra_specs' : {'k1': 'v1'}
"""
inst_type_dict = dict(inst_type_query)
extra_specs = dict([(x['key'], x['value'])
for x in inst_type_query['extra_specs']])
inst_type_dict['extra_specs'] = extra_specs
return inst_type_dict
###################
@require_admin_context
def iscsi_target_count_by_host(context, host):
return model_query(context, models.IscsiTarget).\
filter_by(host=host).\
count()
@require_admin_context
def iscsi_target_create_safe(context, values):
iscsi_target_ref = models.IscsiTarget()
for (key, value) in values.iteritems():
iscsi_target_ref[key] = value
session = get_session()
with session.begin():
try:
iscsi_target_ref.save(session)
return iscsi_target_ref
except IntegrityError:
return None
###################
@require_context
def _quota_get(context, project_id, resource, session=None):
result = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@require_context
def quota_get(context, project_id, resource):
return _quota_get(context, project_id, resource)
@require_context
def quota_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.Quota, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_create(context, project_id, resource, limit):
quota_ref = models.Quota()
quota_ref.project_id = project_id
quota_ref.resource = resource
quota_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_ref.save(session)
return quota_ref
@require_admin_context
def quota_update(context, project_id, resource, limit):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.hard_limit = limit
@require_admin_context
def quota_destroy(context, project_id, resource):
session = get_session()
with session.begin():
quota_ref = _quota_get(context, project_id, resource, session=session)
quota_ref.delete(session=session)
###################
@require_context
def _quota_class_get(context, class_name, resource, session=None):
result = model_query(context, models.QuotaClass, session=session,
read_deleted="no").\
filter_by(class_name=class_name).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaClassNotFound(class_name=class_name)
return result
@require_context
def quota_class_get(context, class_name, resource):
return _quota_class_get(context, class_name, resource)
def quota_class_get_default(context):
rows = model_query(context, models.QuotaClass,
read_deleted="no").\
filter_by(class_name=_DEFAULT_QUOTA_NAME).all()
result = {'class_name': _DEFAULT_QUOTA_NAME}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_context
def quota_class_get_all_by_name(context, class_name):
authorize_quota_class_context(context, class_name)
rows = model_query(context, models.QuotaClass, read_deleted="no").\
filter_by(class_name=class_name).\
all()
result = {'class_name': class_name}
for row in rows:
result[row.resource] = row.hard_limit
return result
@require_admin_context
def quota_class_create(context, class_name, resource, limit):
quota_class_ref = models.QuotaClass()
quota_class_ref.class_name = class_name
quota_class_ref.resource = resource
quota_class_ref.hard_limit = limit
session = get_session()
with session.begin():
quota_class_ref.save(session)
return quota_class_ref
@require_admin_context
def quota_class_update(context, class_name, resource, limit):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.hard_limit = limit
@require_admin_context
def quota_class_destroy(context, class_name, resource):
session = get_session()
with session.begin():
quota_class_ref = _quota_class_get(context, class_name, resource,
session=session)
quota_class_ref.delete(session=session)
@require_admin_context
def quota_class_destroy_all_by_name(context, class_name):
session = get_session()
with session.begin():
quota_classes = model_query(context, models.QuotaClass,
session=session, read_deleted="no").\
filter_by(class_name=class_name).\
all()
for quota_class_ref in quota_classes:
quota_class_ref.delete(session=session)
###################
@require_context
def quota_usage_get(context, project_id, resource):
result = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
filter_by(resource=resource).\
first()
if not result:
raise exception.QuotaUsageNotFound(project_id=project_id)
return result
@require_context
def quota_usage_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
rows = model_query(context, models.QuotaUsage, read_deleted="no").\
filter_by(project_id=project_id).\
all()
result = {'project_id': project_id}
for row in rows:
result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved)
return result
@require_admin_context
def _quota_usage_create(context, project_id, resource, in_use, reserved,
until_refresh, session=None):
quota_usage_ref = models.QuotaUsage()
quota_usage_ref.project_id = project_id
quota_usage_ref.resource = resource
quota_usage_ref.in_use = in_use
quota_usage_ref.reserved = reserved
quota_usage_ref.until_refresh = until_refresh
quota_usage_ref.save(session=session)
return quota_usage_ref
###################
def _reservation_create(context, uuid, usage, project_id, resource, delta,
expire, session=None):
reservation_ref = models.Reservation()
reservation_ref.uuid = uuid
reservation_ref.usage_id = usage['id']
reservation_ref.project_id = project_id
reservation_ref.resource = resource
reservation_ref.delta = delta
reservation_ref.expire = expire
reservation_ref.save(session=session)
return reservation_ref
###################
# NOTE(johannes): The quota code uses SQL locking to ensure races don't
# cause under or over counting of resources. To avoid deadlocks, this
# code always acquires the lock on quota_usages before acquiring the lock
# on reservations.
def _get_quota_usages(context, session, project_id):
# Broken out for testability
rows = model_query(context, models.QuotaUsage,
read_deleted="no",
session=session).\
filter_by(project_id=project_id).\
with_lockmode('update').\
all()
return dict((row.resource, row) for row in rows)
@require_context
def quota_reserve(context, resources, quotas, deltas, expire,
until_refresh, max_age, project_id=None):
elevated = context.elevated()
session = get_session()
with session.begin():
if project_id is None:
project_id = context.project_id
# Get the current usages
usages = _get_quota_usages(context, session, project_id)
# Handle usage refresh
work = set(deltas.keys())
while work:
resource = work.pop()
# Do we need to refresh the usage?
refresh = False
if resource not in usages:
usages[resource] = _quota_usage_create(elevated,
project_id,
resource,
0, 0,
until_refresh or None,
session=session)
refresh = True
elif usages[resource].in_use < 0:
# Negative in_use count indicates a desync, so try to
# heal from that...
refresh = True
elif usages[resource].until_refresh is not None:
usages[resource].until_refresh -= 1
if usages[resource].until_refresh <= 0:
refresh = True
elif max_age and usages[resource].updated_at is not None and (
(usages[resource].updated_at -
timeutils.utcnow()).seconds >= max_age):
refresh = True
# OK, refresh the usage
if refresh:
# Grab the sync routine
sync = QUOTA_SYNC_FUNCTIONS[resources[resource].sync]
volume_type_id = getattr(resources[resource],
'volume_type_id', None)
volume_type_name = getattr(resources[resource],
'volume_type_name', None)
updates = sync(elevated, project_id,
volume_type_id=volume_type_id,
volume_type_name=volume_type_name,
session=session)
for res, in_use in updates.items():
# Make sure we have a destination for the usage!
if res not in usages:
usages[res] = _quota_usage_create(
elevated,
project_id,
res,
0, 0,
until_refresh or None,
session=session
)
# Update the usage
usages[res].in_use = in_use
usages[res].until_refresh = until_refresh or None
# Because more than one resource may be refreshed
# by the call to the sync routine, and we don't
# want to double-sync, we make sure all refreshed
# resources are dropped from the work set.
work.discard(res)
# NOTE(Vek): We make the assumption that the sync
# routine actually refreshes the
# resources that it is the sync routine
# for. We don't check, because this is
# a best-effort mechanism.
# Check for deltas that would go negative
unders = [r for r, delta in deltas.items()
if delta < 0 and delta + usages[r].in_use < 0]
# Now, let's check the quotas
# NOTE(Vek): We're only concerned about positive increments.
# If a project has gone over quota, we want them to
# be able to reduce their usage without any
# problems.
overs = [r for r, delta in deltas.items()
if quotas[r] >= 0 and delta >= 0 and
quotas[r] < delta + usages[r].total]
# NOTE(Vek): The quota check needs to be in the transaction,
# but the transaction doesn't fail just because
# we're over quota, so the OverQuota raise is
# outside the transaction. If we did the raise
# here, our usage updates would be discarded, but
# they're not invalidated by being over-quota.
# Create the reservations
if not overs:
reservations = []
for resource, delta in deltas.items():
reservation = _reservation_create(elevated,
str(uuid.uuid4()),
usages[resource],
project_id,
resource, delta, expire,
session=session)
reservations.append(reservation.uuid)
# Also update the reserved quantity
# NOTE(Vek): Again, we are only concerned here about
# positive increments. Here, though, we're
# worried about the following scenario:
#
# 1) User initiates resize down.
# 2) User allocates a new instance.
# 3) Resize down fails or is reverted.
# 4) User is now over quota.
#
# To prevent this, we only update the
# reserved value if the delta is positive.
if delta > 0:
usages[resource].reserved += delta
if unders:
LOG.warning(_("Change will make usage less than 0 for the following "
"resources: %s") % unders)
if overs:
usages = dict((k, dict(in_use=v['in_use'], reserved=v['reserved']))
for k, v in usages.items())
raise exception.OverQuota(overs=sorted(overs), quotas=quotas,
usages=usages)
return reservations
def _quota_reservations(session, context, reservations):
"""Return the relevant reservations."""
# Get the listed reservations
return model_query(context, models.Reservation,
read_deleted="no",
session=session).\
filter(models.Reservation.uuid.in_(reservations)).\
with_lockmode('update').\
all()
@require_context
def reservation_commit(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
for reservation in _quota_reservations(session, context, reservations):
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
usage.in_use += reservation.delta
reservation.delete(session=session)
@require_context
def reservation_rollback(context, reservations, project_id=None):
session = get_session()
with session.begin():
usages = _get_quota_usages(context, session, project_id)
for reservation in _quota_reservations(session, context, reservations):
usage = usages[reservation.resource]
if reservation.delta >= 0:
usage.reserved -= reservation.delta
reservation.delete(session=session)
@require_admin_context
def quota_destroy_all_by_project(context, project_id):
session = get_session()
with session.begin():
quotas = model_query(context, models.Quota, session=session,
read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_ref in quotas:
quota_ref.delete(session=session)
quota_usages = model_query(context, models.QuotaUsage,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for quota_usage_ref in quota_usages:
quota_usage_ref.delete(session=session)
reservations = model_query(context, models.Reservation,
session=session, read_deleted="no").\
filter_by(project_id=project_id).\
all()
for reservation_ref in reservations:
reservation_ref.delete(session=session)
@require_admin_context
def reservation_expire(context):
session = get_session()
with session.begin():
current_time = timeutils.utcnow()
results = model_query(context, models.Reservation, session=session,
read_deleted="no").\
filter(models.Reservation.expire < current_time).\
all()
if results:
for reservation in results:
if reservation.delta >= 0:
reservation.usage.reserved -= reservation.delta
reservation.usage.save(session=session)
reservation.delete(session=session)
###################
@require_admin_context
def volume_allocate_iscsi_target(context, volume_id, host):
session = get_session()
with session.begin():
iscsi_target_ref = model_query(context, models.IscsiTarget,
session=session, read_deleted="no").\
filter_by(volume=None).\
filter_by(host=host).\
with_lockmode('update').\
first()
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not iscsi_target_ref:
raise exception.NoMoreTargets()
iscsi_target_ref.volume_id = volume_id
session.add(iscsi_target_ref)
return iscsi_target_ref.target_num
@require_admin_context
def volume_attached(context, volume_id, instance_uuid, host_name, mountpoint):
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
raise exception.InvalidUUID(uuid=instance_uuid)
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref['status'] = 'in-use'
volume_ref['mountpoint'] = mountpoint
volume_ref['attach_status'] = 'attached'
volume_ref['instance_uuid'] = instance_uuid
volume_ref['attached_host'] = host_name
return volume_ref
@require_context
def volume_create(context, values):
values['volume_metadata'] = _metadata_refs(values.get('metadata'),
models.VolumeMetadata)
if is_admin_context(context):
values['volume_admin_metadata'] = \
_metadata_refs(values.get('admin_metadata'),
models.VolumeAdminMetadata)
elif values.get('volume_admin_metadata'):
del values['volume_admin_metadata']
volume_ref = models.Volume()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
volume_ref.update(values)
session = get_session()
with session.begin():
session.add(volume_ref)
return _volume_get(context, values['id'], session=session)
@require_admin_context
def volume_data_get_for_host(context, host, count_only=False):
if count_only:
result = model_query(context,
func.count(models.Volume.id),
read_deleted="no").\
filter_by(host=host).\
first()
return result[0] or 0
else:
result = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no").\
filter_by(host=host).\
first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def _volume_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
query = model_query(context,
func.count(models.Volume.id),
func.sum(models.Volume.size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_admin_context
def volume_data_get_for_project(context, project_id, volume_type_id=None):
return _volume_data_get_for_project(context, project_id, volume_type_id)
@require_admin_context
def finish_volume_migration(context, src_vol_id, dest_vol_id):
"""Copy almost all columns from dest to source."""
session = get_session()
with session.begin():
src_volume_ref = _volume_get(context, src_vol_id, session=session)
dest_volume_ref = _volume_get(context, dest_vol_id, session=session)
# NOTE(rpodolyaka): we should copy only column values, while model
# instances also have relationships attributes, which
# should be ignored
def is_column(inst, attr):
return attr in inst.__class__.__table__.columns
for key, value in dest_volume_ref.iteritems():
if key == 'id' or not is_column(dest_volume_ref, key):
continue
elif key == 'migration_status':
value = None
elif key == '_name_id':
value = dest_volume_ref['_name_id'] or dest_volume_ref['id']
setattr(src_volume_ref, key, value)
@require_admin_context
def volume_destroy(context, volume_id):
session = get_session()
now = timeutils.utcnow()
with session.begin():
model_query(context, models.Volume, session=session).\
filter_by(id=volume_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.IscsiTarget, session=session).\
filter_by(volume_id=volume_id).\
update({'volume_id': None})
model_query(context, models.VolumeMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeAdminMetadata, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
model_query(context, models.Transfer, session=session).\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': now,
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_detached(context, volume_id):
session = get_session()
with session.begin():
volume_ref = _volume_get(context, volume_id, session=session)
# Hide status update from user if we're performing a volume migration
if not volume_ref['migration_status']:
volume_ref['status'] = 'available'
volume_ref['mountpoint'] = None
volume_ref['attach_status'] = 'detached'
volume_ref['instance_uuid'] = None
volume_ref['attached_host'] = None
volume_ref['attach_time'] = None
@require_context
def _volume_get_query(context, session=None, project_only=False):
if is_admin_context(context):
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_admin_metadata')).\
options(joinedload('volume_type'))
else:
return model_query(context, models.Volume, session=session,
project_only=project_only).\
options(joinedload('volume_metadata')).\
options(joinedload('volume_type'))
@require_context
def _volume_get(context, volume_id, session=None):
result = _volume_get_query(context, session=session, project_only=True).\
filter_by(id=volume_id).\
first()
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result
@require_context
def volume_get(context, volume_id):
return _volume_get(context, volume_id)
@require_admin_context
def volume_get_all(context, marker, limit, sort_key, sort_dir,
filters=None):
"""Retrieves all volumes.
:param context: context to query under
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_key: single attributes by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param filters: Filters for the query. A filter key/value of
'no_migration_targets'=True causes volumes with either
a NULL 'migration_status' or a 'migration_status' that
does not start with 'target:' to be retrieved.
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_key, sort_dir, filters)
# No volumes would match, return empty list
if query == None:
return []
return query.all()
@require_admin_context
def volume_get_all_by_host(context, host):
return _volume_get_query(context).filter_by(host=host).all()
@require_context
def volume_get_all_by_project(context, project_id, marker, limit, sort_key,
sort_dir, filters=None):
""""Retrieves all volumes in a project.
:param context: context to query under
:param project_id: project for all volumes being retrieved
:param marker: the last item of the previous page, used to determine the
next page of results to return
:param limit: maximum number of items to return
:param sort_key: single attributes by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param filters: Filters for the query. A filter key/value of
'no_migration_targets'=True causes volumes with either
a NULL 'migration_status' or a 'migration_status' that
does not start with 'target:' to be retrieved.
:returns: list of matching volumes
"""
session = get_session()
with session.begin():
authorize_project_context(context, project_id)
# Add in the project filter without modifying the given filters
filters = filters.copy() if filters else {}
filters['project_id'] = project_id
# Generate the query
query = _generate_paginate_query(context, session, marker, limit,
sort_key, sort_dir, filters)
# No volumes would match, return empty list
if query == None:
return []
return query.all()
def _generate_paginate_query(context, session, marker, limit, sort_key,
sort_dir, filters):
"""Generate the query to include the filters and the paginate options.
Returns a query with sorting / pagination criteria added or None
if the given filters will not yield any results.
:param context: context to query under
:param session: the session to use
:param marker: the last item of the previous page; we returns the next
results after this value.
:param limit: maximum number of items to return
:param sort_key: single attributes by which results should be sorted
:param sort_dir: direction in which results should be sorted (asc, desc)
:param filters: dictionary of filters; values that are lists,
tuples, sets, or frozensets cause an 'IN' test to
be performed, while exact matching ('==' operator)
is used for other values
:returns: updated query or None
"""
query = _volume_get_query(context, session=session)
if filters:
filters = filters.copy()
# 'no_migration_targets' is unique, must be either NULL or
# not start with 'target:'
if ('no_migration_targets' in filters and
filters['no_migration_targets'] == True):
filters.pop('no_migration_targets')
try:
column_attr = getattr(models.Volume, 'migration_status')
conditions = [column_attr == None,
column_attr.op('NOT LIKE')('target:%')]
query = query.filter(or_(*conditions))
except AttributeError:
log_msg = _("'migration_status' column could not be found.")
LOG.debug(log_msg)
return None
# Apply exact match filters for everything else, ensure that the
# filter value exists on the model
for key in filters.keys():
# metadata is unique, must be a dict
if key == 'metadata':
if not isinstance(filters[key], dict):
log_msg = _("'metadata' filter value is not valid.")
LOG.debug(log_msg)
return None
continue
try:
column_attr = getattr(models.Volume, key)
# Do not allow relationship properties since those require
# schema specific knowledge
prop = getattr(column_attr, 'property')
if isinstance(prop, RelationshipProperty):
log_msg = (_("'%s' filter key is not valid, "
"it maps to a relationship.")) % key
LOG.debug(log_msg)
return None
except AttributeError:
log_msg = _("'%s' filter key is not valid.") % key
LOG.debug(log_msg)
return None
# Holds the simple exact matches
filter_dict = {}
# Iterate over all filters, special case the filter is necessary
for key, value in filters.iteritems():
if key == 'metadata':
# model.VolumeMetadata defines the backref to Volumes as
# 'volume_metadata' or 'volume_admin_metadata', use those as
# column attribute keys
col_attr = getattr(models.Volume, 'volume_metadata')
col_ad_attr = getattr(models.Volume, 'volume_admin_metadata')
for k, v in value.iteritems():
query = query.filter(or_(col_attr.any(key=k, value=v),
col_ad_attr.any(key=k, value=v)))
elif isinstance(value, (list, tuple, set, frozenset)):
# Looking for values in a list; apply to query directly
column_attr = getattr(models.Volume, key)
query = query.filter(column_attr.in_(value))
else:
# OK, simple exact match; save for later
filter_dict[key] = value
# Apply simple exact matches
if filter_dict:
query = query.filter_by(**filter_dict)
marker_volume = None
if marker is not None:
marker_volume = _volume_get(context, marker, session)
return sqlalchemyutils.paginate_query(query, models.Volume, limit,
[sort_key, 'created_at', 'id'],
marker=marker_volume,
sort_dir=sort_dir)
@require_admin_context
def volume_get_iscsi_target_num(context, volume_id):
result = model_query(context, models.IscsiTarget, read_deleted="yes").\
filter_by(volume_id=volume_id).\
first()
if not result:
raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@require_context
def volume_update(context, volume_id, values):
session = get_session()
with session.begin():
metadata = values.get('metadata')
if metadata is not None:
_volume_user_metadata_update(context,
volume_id,
values.pop('metadata'),
delete=True,
session=session)
admin_metadata = values.get('admin_metadata')
if is_admin_context(context) and admin_metadata is not None:
_volume_admin_metadata_update(context,
volume_id,
values.pop('admin_metadata'),
delete=True,
session=session)
volume_ref = _volume_get(context, volume_id, session=session)
volume_ref.update(values)
return volume_ref
####################
def _volume_x_metadata_get_query(context, volume_id, model, session=None):
return model_query(context, model, session=session, read_deleted="no").\
filter_by(volume_id=volume_id)
def _volume_x_metadata_get(context, volume_id, model, session=None):
rows = _volume_x_metadata_get_query(context, volume_id, model,
session=session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
def _volume_x_metadata_get_item(context, volume_id, key, model, notfound_exec,
session=None):
result = _volume_x_metadata_get_query(context, volume_id,
model, session=session).\
filter_by(key=key).\
first()
if not result:
raise notfound_exec(metadata_key=key, volume_id=volume_id)
return result
def _volume_x_metadata_update(context, volume_id, metadata, delete,
model, notfound_exec, session=None):
if not session:
session = get_session()
with session.begin(subtransactions=True):
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _volume_x_metadata_get(context, volume_id,
model, session=session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _volume_x_metadata_get_item(context, volume_id,
meta_key, model,
notfound_exec,
session=session)
except notfound_exec:
meta_ref = model()
item.update({"key": meta_key, "volume_id": volume_id})
meta_ref.update(item)
meta_ref.save(session=session)
return _volume_x_metadata_get(context, volume_id, model)
def _volume_user_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeMetadata, session=session)
@require_context
def _volume_user_metadata_get_item(context, volume_id, key, session=None):
return _volume_x_metadata_get_item(context, volume_id, key,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def _volume_user_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeMetadata,
exception.VolumeMetadataNotFound,
session=session)
@require_context
@require_volume_exists
def volume_metadata_get_item(context, volume_id, key):
return _volume_user_metadata_get_item(context, volume_id, key)
@require_context
@require_volume_exists
def volume_metadata_get(context, volume_id):
return _volume_user_metadata_get(context, volume_id)
@require_context
@require_volume_exists
def volume_metadata_delete(context, volume_id, key):
_volume_user_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
@require_volume_exists
def volume_metadata_update(context, volume_id, metadata, delete):
return _volume_user_metadata_update(context, volume_id, metadata, delete)
###################
def _volume_admin_metadata_get_query(context, volume_id, session=None):
return _volume_x_metadata_get_query(context, volume_id,
models.VolumeAdminMetadata,
session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_get(context, volume_id, session=None):
return _volume_x_metadata_get(context, volume_id,
models.VolumeAdminMetadata, session=session)
@require_admin_context
@require_volume_exists
def _volume_admin_metadata_update(context, volume_id, metadata, delete,
session=None):
return _volume_x_metadata_update(context, volume_id, metadata, delete,
models.VolumeAdminMetadata,
exception.VolumeAdminMetadataNotFound,
session=session)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_get(context, volume_id):
return _volume_admin_metadata_get(context, volume_id)
@require_admin_context
@require_volume_exists
def volume_admin_metadata_delete(context, volume_id, key):
_volume_admin_metadata_get_query(context, volume_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
@require_volume_exists
def volume_admin_metadata_update(context, volume_id, metadata, delete):
return _volume_admin_metadata_update(context, volume_id, metadata, delete)
###################
@require_context
def snapshot_create(context, values):
values['snapshot_metadata'] = _metadata_refs(values.get('metadata'),
models.SnapshotMetadata)
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
snapshot_ref = models.Snapshot()
snapshot_ref.update(values)
session.add(snapshot_ref)
return _snapshot_get(context, values['id'], session=session)
@require_admin_context
def snapshot_destroy(context, snapshot_id):
session = get_session()
with session.begin():
model_query(context, models.Snapshot, session=session).\
filter_by(id=snapshot_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.SnapshotMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_get(context, snapshot_id, session=None):
result = model_query(context, models.Snapshot, session=session,
project_only=True).\
options(joinedload('volume')).\
options(joinedload('snapshot_metadata')).\
filter_by(id=snapshot_id).\
first()
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result
@require_context
def snapshot_get(context, snapshot_id):
return _snapshot_get(context, snapshot_id)
@require_admin_context
def snapshot_get_all(context):
return model_query(context, models.Snapshot).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_for_volume(context, volume_id):
return model_query(context, models.Snapshot, read_deleted='no',
project_only=True).\
filter_by(volume_id=volume_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def snapshot_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
return model_query(context, models.Snapshot).\
filter_by(project_id=project_id).\
options(joinedload('snapshot_metadata')).\
all()
@require_context
def _snapshot_data_get_for_project(context, project_id, volume_type_id=None,
session=None):
authorize_project_context(context, project_id)
query = model_query(context,
func.count(models.Snapshot.id),
func.sum(models.Snapshot.volume_size),
read_deleted="no",
session=session).\
filter_by(project_id=project_id)
if volume_type_id:
query = query.join('volume').filter_by(volume_type_id=volume_type_id)
result = query.first()
# NOTE(vish): convert None to 0
return (result[0] or 0, result[1] or 0)
@require_context
def snapshot_data_get_for_project(context, project_id, volume_type_id=None):
return _snapshot_data_get_for_project(context, project_id, volume_type_id)
@require_context
def snapshot_get_active_by_window(context, begin, end=None, project_id=None):
"""Return snapshots that were active during window."""
query = model_query(context, models.Snapshot, read_deleted="yes")
query = query.filter(or_(models.Snapshot.deleted_at == None,
models.Snapshot.deleted_at > begin))
query = query.options(joinedload(models.Snapshot.volume))
if end:
query = query.filter(models.Snapshot.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
@require_context
def snapshot_update(context, snapshot_id, values):
session = get_session()
with session.begin():
snapshot_ref = _snapshot_get(context, snapshot_id, session=session)
snapshot_ref.update(values)
####################
def _snapshot_metadata_get_query(context, snapshot_id, session=None):
return model_query(context, models.SnapshotMetadata,
session=session, read_deleted="no").\
filter_by(snapshot_id=snapshot_id)
@require_context
@require_snapshot_exists
def _snapshot_metadata_get(context, snapshot_id, session=None):
rows = _snapshot_metadata_get_query(context, snapshot_id, session).all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
@require_snapshot_exists
def snapshot_metadata_get(context, snapshot_id):
return _snapshot_metadata_get(context, snapshot_id)
@require_context
@require_snapshot_exists
def snapshot_metadata_delete(context, snapshot_id, key):
_snapshot_metadata_get_query(context, snapshot_id).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _snapshot_metadata_get_item(context, snapshot_id, key, session=None):
result = _snapshot_metadata_get_query(context,
snapshot_id,
session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.SnapshotMetadataNotFound(metadata_key=key,
snapshot_id=snapshot_id)
return result
@require_context
@require_snapshot_exists
def snapshot_metadata_update(context, snapshot_id, metadata, delete):
session = get_session()
with session.begin():
# Set existing metadata to deleted if delete argument is True
if delete:
original_metadata = _snapshot_metadata_get(context, snapshot_id,
session)
for meta_key, meta_value in original_metadata.iteritems():
if meta_key not in metadata:
meta_ref = _snapshot_metadata_get_item(context,
snapshot_id,
meta_key, session)
meta_ref.update({'deleted': True})
meta_ref.save(session=session)
meta_ref = None
# Now update all existing items with new values, or create new meta
# objects
for meta_key, meta_value in metadata.items():
# update the value whether it exists or not
item = {"value": meta_value}
try:
meta_ref = _snapshot_metadata_get_item(context, snapshot_id,
meta_key, session)
except exception.SnapshotMetadataNotFound:
meta_ref = models.SnapshotMetadata()
item.update({"key": meta_key, "snapshot_id": snapshot_id})
meta_ref.update(item)
meta_ref.save(session=session)
return snapshot_metadata_get(context, snapshot_id)
###################
@require_admin_context
def volume_type_create(context, values):
"""Create a new instance type.
In order to pass in extra specs, the values dict should contain a
'extra_specs' key/value pair:
{'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
"""
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
try:
_volume_type_get_by_name(context, values['name'], session)
raise exception.VolumeTypeExists(id=values['name'])
except exception.VolumeTypeNotFoundByName:
pass
try:
_volume_type_get(context, values['id'], session)
raise exception.VolumeTypeExists(id=values['id'])
except exception.VolumeTypeNotFound:
pass
try:
values['extra_specs'] = _metadata_refs(values.get('extra_specs'),
models.VolumeTypeExtraSpecs)
volume_type_ref = models.VolumeTypes()
volume_type_ref.update(values)
session.add(volume_type_ref)
except Exception as e:
raise db_exc.DBError(e)
return volume_type_ref
@require_context
def volume_type_get_all(context, inactive=False, filters=None):
"""Returns a dict describing all volume_types with name as key."""
filters = filters or {}
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.VolumeTypes,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
order_by("name").\
all()
result = {}
for row in rows:
result[row['name']] = _dict_with_extra_specs(row)
return result
@require_context
def _volume_type_get(context, id, session=None, inactive=False):
read_deleted = "yes" if inactive else "no"
result = model_query(context,
models.VolumeTypes,
session=session,
read_deleted=read_deleted).\
options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
if not result:
raise exception.VolumeTypeNotFound(volume_type_id=id)
return _dict_with_extra_specs(result)
@require_context
def volume_type_get(context, id, inactive=False):
"""Return a dict describing specific volume_type."""
return _volume_type_get(context, id, None, inactive)
@require_context
def _volume_type_get_by_name(context, name, session=None):
result = model_query(context, models.VolumeTypes, session=session).\
options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not result:
raise exception.VolumeTypeNotFoundByName(volume_type_name=name)
else:
return _dict_with_extra_specs(result)
@require_context
def volume_type_get_by_name(context, name):
"""Return a dict describing specific volume_type."""
return _volume_type_get_by_name(context, name)
@require_admin_context
def volume_type_qos_associations_get(context, qos_specs_id, inactive=False):
read_deleted = "yes" if inactive else "no"
return model_query(context, models.VolumeTypes,
read_deleted=read_deleted). \
filter_by(qos_specs_id=qos_specs_id).all()
@require_admin_context
def volume_type_qos_associate(context, type_id, qos_specs_id):
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
update({'qos_specs_id': qos_specs_id,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from qos specs."""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
session.query(models.VolumeTypes). \
filter_by(id=type_id). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_disassociate_all(context, qos_specs_id):
"""Disassociate all volume types associated with specified qos specs."""
session = get_session()
with session.begin():
session.query(models.VolumeTypes). \
filter_by(qos_specs_id=qos_specs_id). \
update({'qos_specs_id': None,
'updated_at': timeutils.utcnow()})
@require_admin_context
def volume_type_qos_specs_get(context, type_id):
"""Return all qos specs for given volume type.
result looks like:
{
'qos_specs':
{
'id': 'qos-specs-id',
'name': 'qos_specs_name',
'consumer': 'Consumer',
'specs': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3'
}
}
}
"""
session = get_session()
with session.begin():
_volume_type_get(context, type_id, session)
row = session.query(models.VolumeTypes). \
options(joinedload('qos_specs')). \
filter_by(id=type_id). \
first()
# row.qos_specs is a list of QualityOfServiceSpecs ref
specs = _dict_with_qos_specs(row.qos_specs)
if not specs:
# turn empty list to None
specs = None
else:
specs = specs[0]
return {'qos_specs': specs}
@require_admin_context
def volume_type_destroy(context, id):
session = get_session()
with session.begin():
_volume_type_get(context, id, session)
results = model_query(context, models.Volume, session=session). \
filter_by(volume_type_id=id).all()
if results:
msg = _('VolumeType %s deletion failed, VolumeType in use.') % id
LOG.error(msg)
raise exception.VolumeTypeInUse(volume_type_id=id)
model_query(context, models.VolumeTypes, session=session).\
filter_by(id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
model_query(context, models.VolumeTypeExtraSpecs, session=session).\
filter_by(volume_type_id=id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_get_active_by_window(context,
begin,
end=None,
project_id=None):
"""Return volumes that were active during window."""
query = model_query(context, models.Volume, read_deleted="yes")
query = query.filter(or_(models.Volume.deleted_at == None,
models.Volume.deleted_at > begin))
if end:
query = query.filter(models.Volume.created_at < end)
if project_id:
query = query.filter_by(project_id=project_id)
return query.all()
####################
def _volume_type_extra_specs_query(context, volume_type_id, session=None):
return model_query(context, models.VolumeTypeExtraSpecs, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id)
@require_context
def volume_type_extra_specs_get(context, volume_type_id):
rows = _volume_type_extra_specs_query(context, volume_type_id).\
all()
result = {}
for row in rows:
result[row['key']] = row['value']
return result
@require_context
def volume_type_extra_specs_delete(context, volume_type_id, key):
session = get_session()
with session.begin():
_volume_type_extra_specs_get_item(context, volume_type_id, key,
session)
_volume_type_extra_specs_query(context, volume_type_id, session).\
filter_by(key=key).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def _volume_type_extra_specs_get_item(context, volume_type_id, key,
session=None):
result = _volume_type_extra_specs_query(
context, volume_type_id, session=session).\
filter_by(key=key).\
first()
if not result:
raise exception.VolumeTypeExtraSpecsNotFound(
extra_specs_key=key,
volume_type_id=volume_type_id)
return result
@require_context
def volume_type_extra_specs_update_or_create(context, volume_type_id,
specs):
session = get_session()
with session.begin():
spec_ref = None
for key, value in specs.iteritems():
try:
spec_ref = _volume_type_extra_specs_get_item(
context, volume_type_id, key, session)
except exception.VolumeTypeExtraSpecsNotFound:
spec_ref = models.VolumeTypeExtraSpecs()
spec_ref.update({"key": key, "value": value,
"volume_type_id": volume_type_id,
"deleted": False})
spec_ref.save(session=session)
return specs
####################
@require_admin_context
def qos_specs_create(context, values):
"""Create a new QoS specs.
:param values dictionary that contains specifications for QoS
e.g. {'name': 'Name',
'qos_specs': {
'consumer': 'front-end',
'total_iops_sec': 1000,
'total_bytes_sec': 1024000
}
}
"""
specs_id = str(uuid.uuid4())
session = get_session()
with session.begin():
try:
_qos_specs_get_by_name(context, values['name'], session)
raise exception.QoSSpecsExists(specs_id=values['name'])
except exception.QoSSpecsNotFound:
pass
try:
# Insert a root entry for QoS specs
specs_root = models.QualityOfServiceSpecs()
root = dict(id=specs_id)
# 'QoS_Specs_Name' is an internal reserved key to store
# the name of QoS specs
root['key'] = 'QoS_Specs_Name'
root['value'] = values['name']
LOG.debug("DB qos_specs_create(): root %s", root)
specs_root.update(root)
specs_root.save(session=session)
# Insert all specification entries for QoS specs
for k, v in values['qos_specs'].iteritems():
item = dict(key=k, value=v, specs_id=specs_id)
item['id'] = str(uuid.uuid4())
spec_entry = models.QualityOfServiceSpecs()
spec_entry.update(item)
spec_entry.save(session=session)
except Exception as e:
raise db_exc.DBError(e)
return dict(id=specs_root.id, name=specs_root.value)
@require_admin_context
def _qos_specs_get_by_name(context, name, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
results = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(key='QoS_Specs_Name'). \
filter_by(value=name). \
options(joinedload('specs')).all()
if not results:
raise exception.QoSSpecsNotFound(specs_id=name)
return results
@require_admin_context
def _qos_specs_get_ref(context, qos_specs_id, session=None, inactive=False):
read_deleted = 'yes' if inactive else 'no'
result = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted, session=session). \
filter_by(id=qos_specs_id). \
options(joinedload_all('specs')).all()
if not result:
raise exception.QoSSpecsNotFound(specs_id=qos_specs_id)
return result
def _dict_with_children_specs(specs):
"""Convert specs list to a dict."""
result = {}
for spec in specs:
# Skip deleted keys
if not spec['deleted']:
result.update({spec['key']: spec['value']})
return result
def _dict_with_qos_specs(rows):
"""Convert qos specs query results to list.
Qos specs query results are a list of quality_of_service_specs refs,
some are root entry of a qos specs (key == 'QoS_Specs_Name') and the
rest are children entry, a.k.a detailed specs for a qos specs. This
function converts query results to a dict using spec name as key.
"""
result = []
for row in rows:
if row['key'] == 'QoS_Specs_Name':
member = {}
member['name'] = row['value']
member.update(dict(id=row['id']))
if row.specs:
spec_dict = _dict_with_children_specs(row.specs)
member.update(dict(consumer=spec_dict['consumer']))
del spec_dict['consumer']
member.update(dict(specs=spec_dict))
result.append(member)
return result
@require_admin_context
def qos_specs_get(context, qos_specs_id, inactive=False):
rows = _qos_specs_get_ref(context, qos_specs_id, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_get_all(context, inactive=False, filters=None):
"""Returns a list of all qos_specs.
Results is like:
[{
'id': SPECS-UUID,
'name': 'qos_spec-1',
'consumer': 'back-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
{
'id': SPECS-UUID,
'name': 'qos_spec-2',
'consumer': 'front-end',
'specs': {
'key1': 'value1',
'key2': 'value2',
...
}
},
]
"""
filters = filters or {}
#TODO(zhiteng) Add filters for 'consumer'
read_deleted = "yes" if inactive else "no"
rows = model_query(context, models.QualityOfServiceSpecs,
read_deleted=read_deleted). \
options(joinedload_all('specs')).all()
return _dict_with_qos_specs(rows)
@require_admin_context
def qos_specs_get_by_name(context, name, inactive=False):
rows = _qos_specs_get_by_name(context, name, None, inactive)
return _dict_with_qos_specs(rows)[0]
@require_admin_context
def qos_specs_associations_get(context, qos_specs_id):
"""Return all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_associations_get(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
# Raise QoSSpecsNotFound if no specs found
_qos_specs_get_ref(context, qos_specs_id, None)
return volume_type_qos_associations_get(context, qos_specs_id)
@require_admin_context
def qos_specs_associate(context, qos_specs_id, type_id):
"""Associate volume type from specified qos specs."""
return volume_type_qos_associate(context, type_id, qos_specs_id)
@require_admin_context
def qos_specs_disassociate(context, qos_specs_id, type_id):
"""Disassociate volume type from specified qos specs."""
return volume_type_qos_disassociate(context, qos_specs_id, type_id)
@require_admin_context
def qos_specs_disassociate_all(context, qos_specs_id):
"""Disassociate all entities associated with specified qos specs.
For now, the only entity that is possible to associate with
a qos specs is volume type, so this is just a wrapper of
volume_type_qos_disassociate_all(). But it's possible to
extend qos specs association to other entities, such as volumes,
sometime in future.
"""
return volume_type_qos_disassociate_all(context, qos_specs_id)
@require_admin_context
def qos_specs_item_delete(context, qos_specs_id, key):
session = get_session()
with session.begin():
_qos_specs_get_item(context, qos_specs_id, key)
session.query(models.QualityOfServiceSpecs). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def qos_specs_delete(context, qos_specs_id):
session = get_session()
with session.begin():
_qos_specs_get_ref(context, qos_specs_id, session)
session.query(models.QualityOfServiceSpecs).\
filter(or_(models.QualityOfServiceSpecs.id == qos_specs_id,
models.QualityOfServiceSpecs.specs_id ==
qos_specs_id)).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def _qos_specs_get_item(context, qos_specs_id, key, session=None):
result = model_query(context, models.QualityOfServiceSpecs,
session=session). \
filter(models.QualityOfServiceSpecs.key == key). \
filter(models.QualityOfServiceSpecs.specs_id == qos_specs_id). \
first()
if not result:
raise exception.QoSSpecsKeyNotFound(
specs_key=key,
specs_id=qos_specs_id)
return result
@require_admin_context
def qos_specs_update(context, qos_specs_id, specs):
"""Make updates to an existing qos specs.
Perform add, update or delete key/values to a qos specs.
"""
session = get_session()
with session.begin():
# make sure qos specs exists
_qos_specs_get_ref(context, qos_specs_id, session)
spec_ref = None
for key in specs.keys():
try:
spec_ref = _qos_specs_get_item(
context, qos_specs_id, key, session)
except exception.QoSSpecsKeyNotFound:
spec_ref = models.QualityOfServiceSpecs()
id = None
if spec_ref.get('id', None):
id = spec_ref['id']
else:
id = str(uuid.uuid4())
value = dict(id=id, key=key, value=specs[key],
specs_id=qos_specs_id,
deleted=False)
LOG.debug('qos_specs_update() value: %s' % value)
spec_ref.update(value)
spec_ref.save(session=session)
return specs
####################
@require_context
def volume_type_encryption_get(context, volume_type_id, session=None):
return model_query(context, models.Encryption, session=session,
read_deleted="no").\
filter_by(volume_type_id=volume_type_id).first()
@require_admin_context
def volume_type_encryption_delete(context, volume_type_id):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
encryption.update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_admin_context
def volume_type_encryption_create(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = models.Encryption()
if 'volume_type_id' not in values:
values['volume_type_id'] = volume_type_id
encryption.update(values)
session.add(encryption)
return encryption
@require_admin_context
def volume_type_encryption_update(context, volume_type_id, values):
session = get_session()
with session.begin():
encryption = volume_type_encryption_get(context, volume_type_id,
session)
if not encryption:
raise exception.VolumeTypeEncryptionNotFound(type_id=
volume_type_id)
encryption.update(values)
return encryption
def volume_type_encryption_volume_get(context, volume_type_id, session=None):
volume_list = _volume_get_query(context, session=session,
project_only=False).\
filter_by(volume_type_id=volume_type_id).\
all()
return volume_list
####################
@require_context
def volume_encryption_metadata_get(context, volume_id, session=None):
"""Return the encryption key id for a given volume."""
volume_ref = _volume_get(context, volume_id)
encryption_ref = volume_type_encryption_get(context,
volume_ref['volume_type_id'])
return {
'encryption_key_id': volume_ref['encryption_key_id'],
'control_location': encryption_ref['control_location'],
'cipher': encryption_ref['cipher'],
'key_size': encryption_ref['key_size'],
'provider': encryption_ref['provider'],
}
####################
@require_context
def _volume_glance_metadata_get_all(context, session=None):
rows = model_query(context,
models.VolumeGlanceMetadata,
project_only=True,
session=session).\
filter_by(deleted=False).\
all()
return rows
@require_context
def volume_glance_metadata_get_all(context):
"""Return the Glance metadata for all volumes."""
return _volume_glance_metadata_get_all(context)
@require_context
@require_volume_exists
def _volume_glance_metadata_get(context, volume_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(volume_id=volume_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=volume_id)
return rows
@require_context
@require_volume_exists
def volume_glance_metadata_get(context, volume_id):
"""Return the Glance metadata for the specified volume."""
return _volume_glance_metadata_get(context, volume_id)
@require_context
@require_snapshot_exists
def _volume_snapshot_glance_metadata_get(context, snapshot_id, session=None):
rows = model_query(context, models.VolumeGlanceMetadata, session=session).\
filter_by(snapshot_id=snapshot_id).\
filter_by(deleted=False).\
all()
if not rows:
raise exception.GlanceMetadataNotFound(id=snapshot_id)
return rows
@require_context
@require_snapshot_exists
def volume_snapshot_glance_metadata_get(context, snapshot_id):
"""Return the Glance metadata for the specified snapshot."""
return _volume_snapshot_glance_metadata_get(context, snapshot_id)
@require_context
@require_volume_exists
def volume_glance_metadata_create(context, volume_id, key, value):
"""Update the Glance metadata for a volume by adding a new key:value pair.
This API does not support changing the value of a key once it has been
created.
"""
session = get_session()
with session.begin():
rows = session.query(models.VolumeGlanceMetadata).\
filter_by(volume_id=volume_id).\
filter_by(key=key).\
filter_by(deleted=False).all()
if len(rows) > 0:
raise exception.GlanceMetadataExists(key=key,
volume_id=volume_id)
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = key
vol_glance_metadata.value = str(value)
session.add(vol_glance_metadata)
return
@require_context
@require_snapshot_exists
def volume_glance_metadata_copy_to_snapshot(context, snapshot_id, volume_id):
"""Update the Glance metadata for a snapshot.
This copies all of the key:value pairs from the originating volume, to
ensure that a volume created from the snapshot will retain the
original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context, volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.snapshot_id = snapshot_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_from_volume_to_volume(context,
src_volume_id,
volume_id):
"""Update the Glance metadata for a volume.
This copies all all of the key:value pairs from the originating volume,
to ensure that a volume created from the volume (clone) will
retain the original metadata.
"""
session = get_session()
with session.begin():
metadata = _volume_glance_metadata_get(context,
src_volume_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
@require_volume_exists
def volume_glance_metadata_copy_to_volume(context, volume_id, snapshot_id):
"""Update the Glance metadata from a volume (created from a snapshot) by
copying all of the key:value pairs from the originating snapshot.
This is so that the Glance metadata from the original volume is retained.
"""
session = get_session()
with session.begin():
metadata = _volume_snapshot_glance_metadata_get(context, snapshot_id,
session=session)
for meta in metadata:
vol_glance_metadata = models.VolumeGlanceMetadata()
vol_glance_metadata.volume_id = volume_id
vol_glance_metadata.key = meta['key']
vol_glance_metadata.value = meta['value']
vol_glance_metadata.save(session=session)
@require_context
def volume_glance_metadata_delete_by_volume(context, volume_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(volume_id=volume_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def volume_glance_metadata_delete_by_snapshot(context, snapshot_id):
model_query(context, models.VolumeGlanceMetadata, read_deleted='no').\
filter_by(snapshot_id=snapshot_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def backup_get(context, backup_id):
result = model_query(context, models.Backup, project_only=True).\
filter_by(id=backup_id).\
first()
if not result:
raise exception.BackupNotFound(backup_id=backup_id)
return result
def _backup_get_all(context, filters=None):
session = get_session()
with session.begin():
# Generate the query
query = model_query(context, models.Backup)
if filters:
query = query.filter_by(**filters)
return query.all()
@require_admin_context
def backup_get_all(context, filters=None):
return _backup_get_all(context, filters)
@require_admin_context
def backup_get_all_by_host(context, host):
return model_query(context, models.Backup).filter_by(host=host).all()
@require_context
def backup_get_all_by_project(context, project_id, filters=None):
authorize_project_context(context, project_id)
if not filters:
filters = {}
else:
filters = filters.copy()
filters['project_id'] = project_id
return _backup_get_all(context, filters)
@require_context
def backup_create(context, values):
backup = models.Backup()
if not values.get('id'):
values['id'] = str(uuid.uuid4())
backup.update(values)
session = get_session()
with session.begin():
backup.save(session)
return backup
@require_context
def backup_update(context, backup_id, values):
session = get_session()
with session.begin():
backup = model_query(context, models.Backup,
session=session, read_deleted="yes").\
filter_by(id=backup_id).first()
if not backup:
raise exception.BackupNotFound(
_("No backup with id %s") % backup_id)
backup.update(values)
return backup
@require_admin_context
def backup_destroy(context, backup_id):
model_query(context, models.Backup).\
filter_by(id=backup_id).\
update({'status': 'deleted',
'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
###############################
@require_context
def _transfer_get(context, transfer_id, session=None):
query = model_query(context, models.Transfer,
session=session).\
filter_by(id=transfer_id)
if not is_admin_context(context):
volume = models.Volume
query = query.filter(models.Transfer.volume_id == volume.id,
volume.project_id == context.project_id)
result = query.first()
if not result:
raise exception.TransferNotFound(transfer_id=transfer_id)
return result
@require_context
def transfer_get(context, transfer_id):
return _transfer_get(context, transfer_id)
def _translate_transfers(transfers):
results = []
for transfer in transfers:
r = {}
r['id'] = transfer['id']
r['volume_id'] = transfer['volume_id']
r['display_name'] = transfer['display_name']
r['created_at'] = transfer['created_at']
r['deleted'] = transfer['deleted']
results.append(r)
return results
@require_admin_context
def transfer_get_all(context):
results = model_query(context, models.Transfer).all()
return _translate_transfers(results)
@require_context
def transfer_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
query = model_query(context, models.Transfer).\
filter(models.Volume.id == models.Transfer.volume_id,
models.Volume.project_id == project_id)
results = query.all()
return _translate_transfers(results)
@require_context
def transfer_create(context, values):
if not values.get('id'):
values['id'] = str(uuid.uuid4())
session = get_session()
with session.begin():
volume_ref = _volume_get(context,
values['volume_id'],
session=session)
if volume_ref['status'] != 'available':
msg = _('Volume must be available')
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'awaiting-transfer'
transfer = models.Transfer()
transfer.update(values)
session.add(transfer)
volume_ref.update(volume_ref)
return transfer
@require_context
def transfer_destroy(context, transfer_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context,
transfer_id,
session=session)
volume_ref = _volume_get(context,
transfer_ref['volume_id'],
session=session)
# If the volume state is not 'awaiting-transfer' don't change it, but
# we can still mark the transfer record as deleted.
if volume_ref['status'] != 'awaiting-transfer':
msg = _('Volume in unexpected state %s, '
'expected awaiting-transfer') % volume_ref['status']
LOG.error(msg)
else:
volume_ref['status'] = 'available'
volume_ref.update(volume_ref)
volume_ref.save(session=session)
model_query(context, models.Transfer, session=session).\
filter_by(id=transfer_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
@require_context
def transfer_accept(context, transfer_id, user_id, project_id):
session = get_session()
with session.begin():
transfer_ref = _transfer_get(context, transfer_id, session)
volume_id = transfer_ref['volume_id']
volume_ref = _volume_get(context, volume_id, session=session)
if volume_ref['status'] != 'awaiting-transfer':
msg = _('Transfer %(transfer_id)s: Volume id %(volume_id)s in '
'unexpected state %(status)s, expected '
'awaiting-transfer') % {'transfer_id': transfer_id,
'volume_id': volume_ref['id'],
'status': volume_ref['status']}
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume_ref['status'] = 'available'
volume_ref['user_id'] = user_id
volume_ref['project_id'] = project_id
volume_ref['updated_at'] = literal_column('updated_at')
volume_ref.update(volume_ref)
session.query(models.Transfer).\
filter_by(id=transfer_ref['id']).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
| apache-2.0 | -7,665,454,611,863,467,000 | 32.830478 | 79 | 0.588356 | false |
Azure/azure-sdk-for-python | sdk/hybridcompute/azure-mgmt-hybridcompute/azure/mgmt/hybridcompute/aio/_configuration.py | 1 | 3246 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class HybridComputeManagementClientConfiguration(Configuration):
"""Configuration for HybridComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(HybridComputeManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2021-03-25-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-hybridcompute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| mit | -8,604,923,745,926,091,000 | 47.447761 | 134 | 0.684535 | false |
tomkralidis/pywps | pywps/processing/job.py | 1 | 3570 | ##################################################################
# Copyright 2018 Open Source Geospatial Foundation and others #
# licensed under MIT, Please consult LICENSE.txt for details #
##################################################################
import os
import tempfile
import pywps.configuration as config
import logging
LOGGER = logging.getLogger("PYWPS")
class Job(object):
"""
:class:`Job` represents a processing job.
"""
def __init__(self, process, wps_request, wps_response):
self.process = process
self.method = '_run_process'
self.wps_request = wps_request
self.wps_response = wps_response
@property
def name(self):
return self.process.identifier
@property
def workdir(self):
return self.process.workdir
@property
def uuid(self):
return self.process.uuid
def dump(self):
LOGGER.debug('dump job ...')
import dill
filename = tempfile.mkstemp(prefix='job_', suffix='.dump', dir=self.workdir)[1]
with open(filename, 'w') as fp:
dill.dump(self, fp)
LOGGER.debug("dumped job status to {}".format(filename))
return filename
return None
@classmethod
def load(cls, filename):
LOGGER.debug('load job ...')
import dill
with open(filename) as fp:
job = dill.load(fp)
return job
return None
def run(self):
getattr(self.process, self.method)(self.wps_request, self.wps_response)
class JobLauncher(object):
"""
:class:`JobLauncher` is a command line tool to launch a job from a file
with a dumped job state.
Example call: ``joblauncher -c /etc/pywps.cfg job-1001.dump``
"""
def create_parser(self):
import argparse
parser = argparse.ArgumentParser(prog="joblauncher")
parser.add_argument("-c", "--config", help="Path to pywps configuration.")
parser.add_argument("filename", help="File with dumped pywps job object.")
return parser
def run(self, args):
if args.config:
LOGGER.debug("using pywps_cfg={}".format(args.config))
os.environ['PYWPS_CFG'] = args.config
self._run_job(args.filename)
def _run_job(self, filename):
job = Job.load(filename)
# init config
if 'PYWPS_CFG' in os.environ:
config.load_configuration(os.environ['PYWPS_CFG'])
# update PATH
os.environ['PATH'] = "{0}:{1}".format(
config.get_config_value('processing', 'path'),
os.environ.get('PATH'))
# cd into workdir
os.chdir(job.workdir)
# init logger ... code copied from app.Service
if config.get_config_value('logging', 'file') and config.get_config_value('logging', 'level'):
LOGGER.setLevel(getattr(logging, config.get_config_value('logging', 'level')))
if not LOGGER.handlers: # hasHandlers in Python 3.x
fh = logging.FileHandler(config.get_config_value('logging', 'file'))
fh.setFormatter(logging.Formatter(config.get_config_value('logging', 'format')))
LOGGER.addHandler(fh)
else: # NullHandler
if not LOGGER.handlers:
LOGGER.addHandler(logging.NullHandler())
job.run()
def launcher():
"""
Run job launcher command line.
"""
job_launcher = JobLauncher()
parser = job_launcher.create_parser()
args = parser.parse_args()
job_launcher.run(args)
| mit | 5,095,248,502,263,513,000 | 31.454545 | 102 | 0.582353 | false |
jcmcclurg/serverpower | profiling/defaultGetSetpoints.py | 1 | 1840 | #!/usr/bin/python
import numpy
import re
import cPickle as pickle
import gzip
import time
def getSetpointsFromRaw(filename,verbose=False):
printEvery = 1
if(verbose):
startTime = time.time()
print "Opened raw file %s."%(filename)
else:
startTime = 0
f=open(filename,'rb')
data = []
for line in f:
if re.search('^[0-9]+(\.[0-9]*)?,[0-9]+(\.[0-9]*)?$', line) != None:
v = [ float(i) for i in line.strip().split(',') ]
data.append(v)
if verbose and (time.time() - startTime > printEvery):
startTime = time.time()
print "The list has %d blocks."%(len(data))
return numpy.array(data)
def rawFileToSetpointsFile(oldFilename,newFilename,verbose=False):
if verbose:
print "Loading data from raw..."
data = getSetpointsFromRaw(oldFilename,verbose)
if verbose:
print "Writing data (%d blocks) to setpoints file..."%(data.shape[0])
fp = gzip.open(newFilename,'wb')
pickle.dump(data,fp,-1)
fp.close()
return data
def readSetpointsFile(filename,verbose=False):
try:
if verbose:
print "Loading data from setpoints file..."
fp = gzip.open(filename+"_cache.gz","rb")
data = pickle.load(fp)
fp.close()
except IOError as err:
if verbose:
print "Does not exist (%s). Attempting to create..."%(err)
data = rawFileToSetpointsFile(filename, filename+"_cache.gz", verbose)
if verbose:
print "Got %d blocks."%(data.shape[0])
return data
if __name__ == "__main__":
exps = { 'stress': '1452722752.651508100', 'signal_insert_delays':'1452732970.201413700', 'rapl':'1452743186.881235700','powerclamp':'1452753403.717082000','cpufreq':'1452796934.955382300' }
for exp in exps:
date = exps[exp]
print exp+": "+date
d = "experiments/"+exp+"/"+date
for i in [1,2,3,4]:
print " server "+str(i)
data = readSetpointsFile(d+"/server"+str(i)+"/"+date+".testlog",True)
print ""
| gpl-2.0 | -3,381,506,806,175,974,000 | 25.285714 | 191 | 0.66413 | false |
bacher09/gpackages-metadata | tests/test_news.py | 1 | 1144 | from .utils import TestCase, TESTDATA_DIR
from packages_metadata.generic_metadata import news
import os.path
class TestNews(TestCase):
@classmethod
def setUpClass(cls):
cls.news_dir = os.path.join(TESTDATA_DIR, 'news')
cls.news_titles = [
'ia64-java-removal', 'glib-228', 'gnustep-new-layout', 'gnome-232'
]
def test_news(self):
test_news = news.News(repo_path=self.news_dir, news_path=self.news_dir)
news_list = list(test_news)
news_titles = [item.title for item in news_list]
self.assertSetEqual(set(news_titles), set(self.news_titles))
glib_news = None
for item in news_list:
if item.title == "glib-228":
glib_news = item.default_news
break
self.assertEqual(glib_news.title.strip(), "Upgrade to GLIB 2.28")
self.assertEqual(glib_news.revision, 1)
authors = glib_news.authors
self.assertEqual(len(authors), 1)
self.assertEqual(authors[0].email, "[email protected]")
self.assertTupleEqual(glib_news.if_installed, ("<dev-libs/glib-2.28",))
| gpl-2.0 | 3,034,550,257,392,151,600 | 34.75 | 79 | 0.625 | false |
Petr-Kovalev/nupic-win32 | tests/unit/py2/nupic/data/file_record_stream_test.py | 1 | 7707 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from datetime import datetime
import tempfile
import unittest2 as unittest
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.file_record_stream import FileRecordStream
from nupic.data.utils import (
parseTimestamp, serializeTimestamp, escape, unescape)
def _getTempFileName():
"""Creates unique file name that starts with 'test' and ends with '.txt'."""
handle = tempfile.NamedTemporaryFile(prefix='test', suffix='.txt', dir='.')
filename = handle.name
handle.close()
return filename
class TestFileRecordStream(unittest.TestCase):
def testBasic(self):
"""Runs basic FileRecordStream tests."""
filename = _getTempFileName()
# Write a standard file
fields = [('name', 'string', ''),
('timestamp', 'datetime', 'T'),
('integer', 'int', ''),
('real', 'float', ''),
('reset', 'int', 'R'),
('sid', 'string', 'S'),
('categoryField', 'int', 'C'),]
fieldNames = ['name', 'timestamp', 'integer', 'real', 'reset', 'sid',
'categoryField']
print 'Creating temp file:', filename
s = FileRecordStream(streamID=filename, write=True, fields=fields)
self.assertTrue(s.getDataRowCount() == 0)
# Records
records = (
['rec_1', datetime(day=1, month=3, year=2010), 5, 6.5, 1, 'seq-1', 10],
['rec_2', datetime(day=2, month=3, year=2010), 8, 7.5, 0, 'seq-1', 11],
['rec_3', datetime(day=3, month=3, year=2010), 12, 8.5, 0, 'seq-1', 12])
self.assertTrue(s.getFields() == fields)
self.assertTrue(s.getNextRecordIdx() == 0)
print 'Writing records ...'
for r in records:
print list(r)
s.appendRecord(list(r))
self.assertTrue(s.getDataRowCount() == 3)
recordsBatch = (
['rec_4', datetime(day=4, month=3, year=2010), 2, 9.5, 1, 'seq-1', 13],
['rec_5', datetime(day=5, month=3, year=2010), 6, 10.5, 0, 'seq-1', 14],
['rec_6', datetime(day=6, month=3, year=2010), 11, 11.5, 0, 'seq-1', 15])
print 'Adding batch of records...'
for rec in recordsBatch:
print rec
s.appendRecords(recordsBatch)
self.assertTrue(s.getDataRowCount() == 6)
s.close()
# Read the standard file
s = FileRecordStream(filename)
self.assertTrue(s.getDataRowCount() == 6)
self.assertTrue(s.getFieldNames() == fieldNames)
# Note! this is the number of records read so far
self.assertTrue(s.getNextRecordIdx() == 0)
readStats = s.getStats()
print 'Got stats:', readStats
expectedStats = {
'max': [None, None, 12, 11.5, 1, None, 15],
'min': [None, None, 2, 6.5, 0, None, 10]
}
self.assertTrue(readStats == expectedStats)
readRecords = []
print 'Reading records ...'
while True:
r = s.getNextRecord()
print r
if r is None:
break
readRecords.append(r)
allRecords = records + recordsBatch
for r1, r2 in zip(allRecords, readRecords):
print 'Expected:', r1
print 'Read :', r2
self.assertTrue(r1 == r2)
s.close()
def testEscapeUnescape(self):
s = '1,2\n4,5'
e = escape(s)
u = unescape(e)
self.assertTrue(u == s)
def testParseSerializeTimestamp(self):
t = datetime.now()
s = serializeTimestamp(t)
self.assertTrue(parseTimestamp(s) == t)
def testBadDataset(self):
filename = _getTempFileName()
print 'Creating tempfile:', filename
# Write bad dataset with records going backwards in time
fields = [('timestamp', 'datetime', 'T')]
o = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = (
[datetime(day=3, month=3, year=2010)],
[datetime(day=2, month=3, year=2010)])
o.appendRecord(records[0])
o.appendRecord(records[1])
o.close()
# Write bad dataset with broken sequences
fields = [('sid', 'int', 'S')]
o = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = ([1], [2], [1])
o.appendRecord(records[0])
o.appendRecord(records[1])
self.assertRaises(Exception, o.appendRecord, (records[2],))
o.close()
def testMissingValues(self):
print "Beginning Missing Data test..."
filename = _getTempFileName()
# Some values missing of each type
# read dataset from disk, retrieve values
# string should return empty string, numeric types sentinelValue
print 'Creating tempfile:', filename
# write dataset to disk with float, int, and string fields
fields = [('timestamp', 'datetime', 'T'),
('name', 'string', ''),
('integer', 'int', ''),
('real', 'float', '')]
s = FileRecordStream(streamID=filename, write=True, fields=fields)
# Records
records = (
[datetime(day=1, month=3, year=2010), 'rec_1', 5, 6.5],
[datetime(day=2, month=3, year=2010), '', 8, 7.5],
[datetime(day=3, month=3, year=2010), 'rec_3', '', 8.5],
[datetime(day=4, month=3, year=2010), 'rec_4', 12, ''],
[datetime(day=5, month=3, year=2010), 'rec_5', -87657496599, 6.5],
[datetime(day=6, month=3, year=2010), 'rec_6', 12, -87657496599],
[datetime(day=6, month=3, year=2010), str(-87657496599), 12, 6.5])
for r in records:
s.appendRecord(list(r))
s.close()
# Read the standard file
s = FileRecordStream(streamID=filename, write=False)
fieldsRead = s.getFields()
self.assertTrue(fields == fieldsRead)
recordsRead = []
while True:
r = s.getNextRecord()
if r is None:
break
print 'Reading record ...'
print r
recordsRead.append(r)
# sort the records by date, so we know for sure which is which
sorted(recordsRead, key=lambda rec: rec[0])
# empty string
self.assertTrue(recordsRead[1][1] == SENTINEL_VALUE_FOR_MISSING_DATA)
# missing int
self.assertTrue(recordsRead[2][2] == SENTINEL_VALUE_FOR_MISSING_DATA)
# missing float
self.assertTrue(recordsRead[3][3] == SENTINEL_VALUE_FOR_MISSING_DATA)
# sentinel value in input handled correctly for int field
self.assertTrue(recordsRead[4][2] != SENTINEL_VALUE_FOR_MISSING_DATA)
# sentinel value in input handled correctly for float field
self.assertTrue(recordsRead[5][3] != SENTINEL_VALUE_FOR_MISSING_DATA)
# sentinel value in input handled correctly for string field
# this should leave the string as-is, since a missing string
# is encoded not with a sentinel value but with an empty string
self.assertTrue(recordsRead[6][1] != SENTINEL_VALUE_FOR_MISSING_DATA)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 6,863,097,200,473,217,000 | 29.583333 | 79 | 0.618918 | false |
djolent/WebApp | LifeSciences/AzureBlast/AzureBlast/BatchScripts/AnalysisJobManager.py | 1 | 4604 | import os
import sys
import datetime
import time
import azure.batch.batch_service_client as batch
import azure.batch.batch_auth as batchauth
import azure.batch.models as batchmodels
from azure.storage.table import TableService, TableBatch
from azure.storage.blob import BlockBlobService
def get_analysis_state(all_tasks_complete, any_failures):
if all_tasks_complete and any_failures:
return 'Error'
if all_tasks_complete:
return 'Complete'
return 'Running'
def get_query_state(task):
if task.state == batchmodels.TaskState.active:
return 'Waiting'
if task.state == batchmodels.TaskState.preparing:
return 'Waiting'
if task.state == batchmodels.TaskState.running:
return 'Running'
if task.state == batchmodels.TaskState.completed:
if task.execution_info.exit_code == 0:
return 'Success'
return 'Error'
def wait_for_tasks_to_complete(
table_service, batch_client, entity_pk, entity_rk, job_id):
"""
Returns when all tasks in the specified job reach the Completed state.
"""
while True:
entity = table_service.get_entity(
'AnalysisEntity', entity_pk, entity_rk)
tasks = batch_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if
task.id != 'JobManager' and
task.state != batchmodels.TaskState.completed]
complete_tasks = [task for task in tasks if
task.id != 'JobManager' and
task.state == batchmodels.TaskState.completed]
failed_tasks = [task for task in complete_tasks if
task.execution_info.exit_code != 0 or
task.execution_info.scheduling_error is not None]
queries = table_service.query_entities(
'AnalysisQueryEntity',
filter="PartitionKey eq '{}'".format(entity.RowKey))
current_batch_count = 0
updateBatch = TableBatch()
for task in tasks:
matching_queries = [q for q in queries if q.RowKey == task.id]
if not matching_queries:
print('Could not find query {}'.format(task.id))
continue
query = matching_queries[0]
update = False
state = get_query_state(task)
if query._State != state:
query._State = state
update = True
if task.state == batchmodels.TaskState.running:
if not hasattr(query, 'StartTime'):
query.StartTime = task.execution_info.start_time
update = True
if task.state == batchmodels.TaskState.completed:
if not hasattr(query, 'EndTime'):
query.EndTime = task.execution_info.end_time
update = True
if update:
updateBatch.update_entity(query)
current_batch_count += 1
if current_batch_count == 99:
table_service.commit_batch('AnalysisQueryEntity', updateBatch)
current_batch_count = 0
updateBatch = TableBatch()
if current_batch_count > 0:
table_service.commit_batch('AnalysisQueryEntity', updateBatch)
all_tasks_complete = not incomplete_tasks
any_failures = len(failed_tasks) > 0
entity.CompletedTasks = len(complete_tasks)
entity._State = get_analysis_state(all_tasks_complete, any_failures)
if not incomplete_tasks:
entity.EndTime = datetime.datetime.utcnow()
table_service.update_entity('AnalysisEntity', entity)
return
else:
table_service.update_entity('AnalysisEntity', entity)
time.sleep(5)
if __name__ == '__main__':
storage_account = sys.argv[1]
storage_key = sys.argv[2]
batch_account = sys.argv[3]
batch_key = sys.argv[4]
batch_url = sys.argv[5]
job_id = sys.argv[6]
entity_pk = sys.argv[7]
entity_rk = sys.argv[8]
table_service = TableService(account_name=storage_account,
account_key=storage_key)
blob_service = BlockBlobService(account_name=storage_account,
account_key=storage_key)
credentials = batchauth.SharedKeyCredentials(batch_account, batch_key)
batch_client = batch.BatchServiceClient(credentials, base_url=batch_url)
wait_for_tasks_to_complete(table_service, batch_client, entity_pk, entity_rk, job_id)
| mit | 1,266,419,782,913,377,800 | 35.251969 | 89 | 0.597524 | false |
jelly/calibre | src/calibre/gui2/tweak_book/check.py | 2 | 9562 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from PyQt5.Qt import (
QIcon, Qt, QSplitter, QListWidget, QTextBrowser, QPalette, QMenu,
QListWidgetItem, pyqtSignal, QApplication, QStyledItemDelegate)
from calibre.ebooks.oeb.polish.check.base import WARN, INFO, DEBUG, ERROR, CRITICAL
from calibre.ebooks.oeb.polish.check.main import run_checks, fix_errors
from calibre.gui2 import NO_URL_FORMATTING
from calibre.gui2.tweak_book import tprefs
from calibre.gui2.tweak_book.widgets import BusyCursor
def icon_for_level(level):
if level > WARN:
icon = 'dialog_error.png'
elif level == WARN:
icon = 'dialog_warning.png'
elif level == INFO:
icon = 'dialog_information.png'
else:
icon = None
return QIcon(I(icon)) if icon else QIcon()
def prefix_for_level(level):
if level > WARN:
text = _('ERROR')
elif level == WARN:
text = _('WARNING')
elif level == INFO:
text = _('INFO')
else:
text = ''
if text:
text += ': '
return text
class Delegate(QStyledItemDelegate):
def initStyleOption(self, option, index):
super(Delegate, self).initStyleOption(option, index)
if index.row() == self.parent().currentRow():
option.font.setBold(True)
option.backgroundBrush = self.parent().palette().brush(QPalette.AlternateBase)
class Check(QSplitter):
item_activated = pyqtSignal(object)
check_requested = pyqtSignal()
fix_requested = pyqtSignal(object)
def __init__(self, parent=None):
QSplitter.__init__(self, parent)
self.setChildrenCollapsible(False)
self.items = i = QListWidget(self)
i.setContextMenuPolicy(Qt.CustomContextMenu)
i.customContextMenuRequested.connect(self.context_menu)
self.items.setSpacing(3)
self.items.itemDoubleClicked.connect(self.current_item_activated)
self.items.currentItemChanged.connect(self.current_item_changed)
self.items.setSelectionMode(self.items.NoSelection)
self.delegate = Delegate(self.items)
self.items.setItemDelegate(self.delegate)
self.addWidget(i)
self.help = h = QTextBrowser(self)
h.anchorClicked.connect(self.link_clicked)
h.setOpenLinks(False)
self.addWidget(h)
self.setStretchFactor(0, 100)
self.setStretchFactor(1, 50)
self.clear_at_startup()
state = tprefs.get('check-book-splitter-state', None)
if state is not None:
self.restoreState(state)
def clear_at_startup(self):
self.clear_help(_('Check has not been run'))
self.items.clear()
def context_menu(self, pos):
m = QMenu()
if self.items.count() > 0:
m.addAction(QIcon(I('edit-copy.png')), _('Copy list of errors to clipboard'), self.copy_to_clipboard)
if list(m.actions()):
m.exec_(self.mapToGlobal(pos))
def copy_to_clipboard(self):
items = []
for item in (self.items.item(i) for i in xrange(self.items.count())):
msg = unicode(item.text())
msg = prefix_for_level(item.data(Qt.UserRole).level) + msg
items.append(msg)
if items:
QApplication.clipboard().setText('\n'.join(items))
def save_state(self):
tprefs.set('check-book-splitter-state', bytearray(self.saveState()))
def clear_help(self, msg=None):
if msg is None:
msg = _('No problems found')
self.help.setText('<h2>%s</h2><p><a style="text-decoration:none" title="%s" href="run:check">%s</a></p>' % (
msg, _('Click to run a check on the book'), _('Run check')))
def link_clicked(self, url):
url = unicode(url.toString(NO_URL_FORMATTING))
if url == 'activate:item':
self.current_item_activated()
elif url == 'run:check':
self.check_requested.emit()
elif url == 'fix:errors':
errors = [self.items.item(i).data(Qt.UserRole) for i in xrange(self.items.count())]
self.fix_requested.emit(errors)
elif url.startswith('fix:error,'):
num = int(url.rpartition(',')[-1])
errors = [self.items.item(num).data(Qt.UserRole)]
self.fix_requested.emit(errors)
elif url.startswith('activate:item:'):
index = int(url.rpartition(':')[-1])
self.location_activated(index)
def next_error(self, delta=1):
row = self.items.currentRow()
num = self.items.count()
if num > 0:
row = (row + delta) % num
self.items.setCurrentRow(row)
self.current_item_activated()
def current_item_activated(self, *args):
i = self.items.currentItem()
if i is not None:
err = i.data(Qt.UserRole)
if err.has_multiple_locations:
self.location_activated(0)
else:
self.item_activated.emit(err)
def location_activated(self, index):
i = self.items.currentItem()
if i is not None:
err = i.data(Qt.UserRole)
err.current_location_index = index
self.item_activated.emit(err)
def current_item_changed(self, *args):
i = self.items.currentItem()
self.help.setText('')
def loc_to_string(line, col):
loc = ''
if line is not None:
loc = _('line: %d') % line
if col is not None:
loc += _(' column: %d') % col
if loc:
loc = ' (%s)' % loc
return loc
if i is not None:
err = i.data(Qt.UserRole)
header = {DEBUG:_('Debug'), INFO:_('Information'), WARN:_('Warning'), ERROR:_('Error'), CRITICAL:_('Error')}[err.level]
ifix = ''
loc = loc_to_string(err.line, err.col)
if err.INDIVIDUAL_FIX:
ifix = '<a href="fix:error,%d" title="%s">%s</a><br><br>' % (
self.items.currentRow(), _('Try to fix only this error'), err.INDIVIDUAL_FIX)
open_tt = _('Click to open in editor')
fix_tt = _('Try to fix all fixable errors automatically. Only works for some types of error.')
fix_msg = _('Try to correct all fixable errors automatically')
run_tt, run_msg = _('Re-run the check'), _('Re-run check')
header = '<style>a { text-decoration: none}</style><h2>%s [%d / %d]</h2>' % (
header, self.items.currentRow()+1, self.items.count())
msg = '<p>%s</p>'
footer = '<div>%s<a href="fix:errors" title="%s">%s</a><br><br> <a href="run:check" title="%s">%s</a></div>'
if err.has_multiple_locations:
activate = []
for i, (name, lnum, col) in enumerate(err.all_locations):
activate.append('<a href="activate:item:%d" title="%s">%s %s</a>' % (
i, open_tt, name, loc_to_string(lnum, col)))
many = len(activate) > 2
activate = '<div>%s</div>' % ('<br>'.join(activate))
if many:
activate += '<br>'
template = header + ((msg + activate) if many else (activate + msg)) + footer
else:
activate = '<div><a href="activate:item" title="%s">%s %s</a></div>' % (
open_tt, err.name, loc)
template = header + activate + msg + footer
self.help.setText(
template % (err.HELP, ifix, fix_tt, fix_msg, run_tt, run_msg))
def run_checks(self, container):
with BusyCursor():
self.show_busy()
QApplication.processEvents()
errors = run_checks(container)
self.hide_busy()
for err in sorted(errors, key=lambda e:(100 - e.level, e.name)):
i = QListWidgetItem('%s\xa0\xa0\xa0\xa0[%s]' % (err.msg, err.name), self.items)
i.setData(Qt.UserRole, err)
i.setIcon(icon_for_level(err.level))
if errors:
self.items.setCurrentRow(0)
self.current_item_changed()
self.items.setFocus(Qt.OtherFocusReason)
else:
self.clear_help()
def fix_errors(self, container, errors):
with BusyCursor():
self.show_busy(_('Running fixers, please wait...'))
QApplication.processEvents()
changed = fix_errors(container, errors)
self.run_checks(container)
return changed
def show_busy(self, msg=_('Running checks, please wait...')):
self.help.setText(msg)
self.items.clear()
def hide_busy(self):
self.help.setText('')
self.items.clear()
def keyPressEvent(self, ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
self.current_item_activated()
return super(Check, self).keyPressEvent(ev)
def clear(self):
self.items.clear()
self.clear_help()
def main():
from calibre.gui2 import Application
from calibre.gui2.tweak_book.boss import get_container
app = Application([]) # noqa
path = sys.argv[-1]
container = get_container(path)
d = Check()
d.run_checks(container)
d.show()
app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 | 6,725,639,900,550,539,000 | 35.496183 | 131 | 0.566409 | false |
alexliew/learn_python_the_hard_way | ex5.py | 1 | 2837 | my_name = "Alex Liew"
my_age = 25 # this is no lie
my_height = 174 # cm
my_weight = 65 # kg
my_eyes = 'Brown'
my_teeth = 'White'
my_hair = 'Black'
print("Let's talk about {0}.".format(my_name))
print("He's {0} centimeters tall.".format(my_height))
print("He's {0} kilograms heavy.".format(my_weight))
print("Actually that's not that heavy.")
print("He's got {0} eyes and {1} hair.".format(my_eyes, my_hair))
print("His teeth are usually {0} dependong on the coffee.".format(my_teeth))
print("If I add {0}, {1}, and {2} I'd get {3}.".format(my_age, my_height, my_weight, my_age + my_height + my_weight))
print("Without 'my_' in front of the variables.")
name = "Alex Liew"
age = 25 # this is no lie
height = 174 # cm
weight = 65 # kg
eyes = 'Brown'
teeth = 'White'
hair = 'Black'
print("Let's talk about {0}.".format(name))
print("He's {0} centimeters tall.".format(height))
print("He's {0} kilograms heavy.".format(weight))
print("Actually that's not that heavy.")
print("He's got {0} eyes and {1} hair.".format(eyes, hair))
print("His teeth are usually {0} dependong on the coffee.".format(teeth))
print("If I add {0}, {1}, and {2} I'd get {3}.".format(age, height, weight, age + height + weight))
# Additional Study Drills
# convert inches to centimeters
inches = 23
centimeters = 23 * 1.5
print("{0} inches is equal to {1} centimeters.".format(inches, centimeters))
# convert pounds to kilograms
pounds = 22
kilograms = 22 / 2.2
print("{0} pounds is equal to {1} kilograms.".format(pounds, kilograms))
# You cannot switch between automatic and manual field numbering.
# print("The number {} in base 10 is equal to the number {0:b} in base 2.".format(5))
# You must include the field number if using a format specification in a string with multiple fields.
# print("The number {} in base 10 is equal to the number {:b} in base 2.".format(5))
num = 23
print("The number {0} in base 10 is equal to the number {0:b} in base 2.".format(num))
print("The number {0} in base 10 is equal to the number {0:o} in base 8.".format(num))
print("The number {0} in base 10 is equal to the number {0:x} in base 16.".format(num))
print("The unicode character represented by the integer {0} is {0:c}.".format(97))
print("The number {0} represented using exponent notation is {0:e}.".format(num))
print("The number {0} represented using fixed point notation is {0:f}.".format(num))
fnum = 123985.12376908
print("{0} is {0:-<+,.5f}".format(fnum))
print("{0} is {0:-<+20,.5f}".format(fnum))
print("{0} is {0:-<20,.5f}".format(fnum))
print("{0} is {0:->20,.5f}".format(fnum))
print("{0} is {0:-=20,.5f}".format(fnum))
print("{0} is {0:-^20,.5f}".format(fnum))
# thing = [1, 2, 3]
thing = 'a sentence'
print("{0} stringified is {0!s}".format(thing))
print("{0} reprified is {0!r}".format(thing))
print("{0} asciified is {0!a}".format(thing))
| mit | 7,507,327,738,741,249,000 | 35.844156 | 117 | 0.667607 | false |
maxwward/SCOPEBak | askbot/views/readers.py | 1 | 27155 | # encoding:utf-8
"""
:synopsis: views "read-only" for main textual content
By main textual content is meant - text of Exercises, Problems and Comments.
The "read-only" requirement here is not 100% strict, as for example "exercise" view does
allow adding new comments via Ajax form post.
"""
import datetime
import logging
import urllib
import operator
from django.shortcuts import get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse, Http404, HttpResponseNotAllowed
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.template import Context
from django.utils import simplejson
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from django.utils import translation
from django.views.decorators import csrf
from django.core.urlresolvers import reverse
from django.core import exceptions as django_exceptions
from django.contrib.humanize.templatetags import humanize
from django.http import QueryDict
from django.conf import settings
import askbot
from askbot import exceptions
from askbot.utils.diff import textDiff as htmldiff
from askbot.forms import ProblemForm, ShowExerciseForm, AnswerForm
from askbot import conf
from askbot import models
from askbot import schedules
from askbot.models.tag import Tag
from askbot import const
from askbot.utils import functions
from askbot.utils.html import sanitize_html
from askbot.utils.decorators import anonymous_forbidden, ajax_only, get_only
from askbot.search.state_manager import SearchState, DummySearchState
from askbot.templatetags import extra_tags
from askbot.conf import settings as askbot_settings
from askbot.skins.loaders import render_into_skin, get_template #jinja2 template loading enviroment
from askbot.views import context
# used in index page
#todo: - take these out of const or settings
from askbot.models import Post, Vote
INDEX_PAGE_SIZE = 30
INDEX_AWARD_SIZE = 15
INDEX_TAGS_SIZE = 25
# used in tags list
DEFAULT_PAGE_SIZE = 60
# used in exercises
# used in problems
#refactor? - we have these
#views that generate a listing of exercises in one way or another:
#index, without_problem, exercises, search, tag
#should we dry them up?
#related topics - information drill-down, search refinement
def index(request):#generates front page - shows listing of exercises sorted in various ways
"""index view mapped to the root url of the Q&A site
"""
return HttpResponseRedirect(reverse('exercises'))
def exercises(request, **kwargs):
"""
List of Exercises, Tagged exercises, and Exercises without problems.
matching search query or user selection
"""
#before = datetime.datetime.now()
if request.method != 'GET':
return HttpResponseNotAllowed(['GET'])
search_state = SearchState(
user_logged_in=request.user.is_authenticated(),
**kwargs
)
page_size = int(askbot_settings.DEFAULT_EXERCISES_PAGE_SIZE)
qs, meta_data = models.Thread.objects.run_advanced_search(
request_user=request.user, search_state=search_state
)
if meta_data['non_existing_tags']:
search_state = search_state.remove_tags(meta_data['non_existing_tags'])
paginator = Paginator(qs, page_size)
if paginator.num_pages < search_state.page:
search_state.page = 1
page = paginator.page(search_state.page)
page.object_list = list(page.object_list) # evaluate the queryset
# INFO: Because for the time being we need exercise posts and thread authors
# down the pipeline, we have to precache them in thread objects
models.Thread.objects.precache_view_data_hack(threads=page.object_list)
related_tags = Tag.objects.get_related_to_search(
threads=page.object_list,
ignored_tag_names=meta_data.get('ignored_tag_names',[])
)
tag_list_type = askbot_settings.TAG_LIST_FORMAT
if tag_list_type == 'cloud': #force cloud to sort by name
related_tags = sorted(related_tags, key = operator.attrgetter('name'))
contributors = list(
models.Thread.objects.get_thread_contributors(
thread_list=page.object_list
).only('id', 'username', 'gravatar')
)
paginator_context = {
'is_paginated' : (paginator.count > page_size),
'pages': paginator.num_pages,
'page': search_state.page,
'has_previous': page.has_previous(),
'has_next': page.has_next(),
'previous': page.previous_page_number(),
'next': page.next_page_number(),
'base_url' : search_state.query_string(),
'page_size' : page_size,
}
# We need to pass the rss feed url based
# on the search state to the template.
# We use QueryDict to get a querystring
# from dicts and arrays. Much cleaner
# than parsing and string formating.
rss_query_dict = QueryDict("").copy()
if search_state.query:
# We have search string in session - pass it to
# the QueryDict
rss_query_dict.update({"q": search_state.query})
if search_state.tags:
# We have tags in session - pass it to the
# QueryDict but as a list - we want tags+
rss_query_dict.setlist("tags", search_state.tags)
context_feed_url = '/%sfeeds/rss/?%s' % (settings.ASKBOT_URL, rss_query_dict.urlencode()) # Format the url with the QueryDict
reset_method_count = len(filter(None, [search_state.query, search_state.tags, meta_data.get('author_name', None)]))
if request.is_ajax():
q_count = paginator.count
exercise_counter = ungettext('%(q_num)s exercise', '%(q_num)s exercises', q_count)
exercise_counter = exercise_counter % {'q_num': humanize.intcomma(q_count),}
if q_count > page_size:
paginator_tpl = get_template('main_page/paginator.html', request)
paginator_html = paginator_tpl.render(Context({
'context': functions.setup_paginator(paginator_context),
'exercises_count': q_count,
'page_size' : page_size,
'search_state': search_state,
}))
else:
paginator_html = ''
exercises_tpl = get_template('main_page/exercises_loop.html', request)
exercises_html = exercises_tpl.render(Context({
'threads': page,
'search_state': search_state,
'reset_method_count': reset_method_count,
'request': request
}))
ajax_data = {
'query_data': {
'tags': search_state.tags,
'sort_order': search_state.sort,
'ask_query_string': search_state.ask_query_string(),
},
'paginator': paginator_html,
'exercise_counter': exercise_counter,
'faces': [],#[extra_tags.gravatar(contributor, 48) for contributor in contributors],
'feed_url': context_feed_url,
'query_string': search_state.query_string(),
'page_size' : page_size,
'exercises': exercises_html.replace('\n',''),
'non_existing_tags': meta_data['non_existing_tags']
}
ajax_data['related_tags'] = [{
'name': escape(tag.name),
'used_count': humanize.intcomma(tag.local_used_count)
} for tag in related_tags]
return HttpResponse(simplejson.dumps(ajax_data), mimetype = 'application/json')
else: # non-AJAX branch
template_data = {
'active_tab': 'exercises',
'author_name' : meta_data.get('author_name',None),
'contributors' : contributors,
'context' : paginator_context,
'is_without_problem' : False,#remove this from template
'interesting_tag_names': meta_data.get('interesting_tag_names', None),
'ignored_tag_names': meta_data.get('ignored_tag_names', None),
'subscribed_tag_names': meta_data.get('subscribed_tag_names', None),
'language_code': translation.get_language(),
'name_of_anonymous_user' : models.get_name_of_anonymous_user(),
'page_class': 'main-page',
'page_size': page_size,
'query': search_state.query,
'threads' : page,
'exercises_count' : paginator.count,
'reset_method_count': reset_method_count,
'scope': search_state.scope,
'show_sort_by_relevance': conf.should_show_sort_by_relevance(),
'search_tags' : search_state.tags,
'sort': search_state.sort,
'tab_id' : search_state.sort,
'tags' : related_tags,
'tag_list_type' : tag_list_type,
'font_size' : extra_tags.get_tag_font_size(related_tags),
'display_tag_filter_strategy_choices': conf.get_tag_display_filter_strategy_choices(),
'email_tag_filter_strategy_choices': const.TAG_EMAIL_FILTER_STRATEGY_CHOICES,
'update_avatar_data': schedules.should_update_avatar_data(request),
'query_string': search_state.query_string(),
'search_state': search_state,
'feed_url': context_feed_url,
}
return render_into_skin('main_page.html', template_data, request)
def tags(request):#view showing a listing of available tags - plain list
tag_list_type = askbot_settings.TAG_LIST_FORMAT
if tag_list_type == 'list':
stag = ""
is_paginated = True
sortby = request.GET.get('sort', 'used')
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
stag = request.GET.get("query", "").strip()
if stag != '':
objects_list = Paginator(
models.Tag.objects.filter(
deleted=False,
name__icontains=stag
).exclude(
used_count=0
),
DEFAULT_PAGE_SIZE
)
else:
if sortby == "name":
objects_list = Paginator(models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("name"), DEFAULT_PAGE_SIZE)
else:
objects_list = Paginator(models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("-used_count"), DEFAULT_PAGE_SIZE)
try:
tags = objects_list.page(page)
except (EmptyPage, InvalidPage):
tags = objects_list.page(objects_list.num_pages)
paginator_data = {
'is_paginated' : is_paginated,
'pages': objects_list.num_pages,
'page': page,
'has_previous': tags.has_previous(),
'has_next': tags.has_next(),
'previous': tags.previous_page_number(),
'next': tags.next_page_number(),
'base_url' : reverse('tags') + '?sort=%s&' % sortby
}
paginator_context = functions.setup_paginator(paginator_data)
data = {
'active_tab': 'tags',
'page_class': 'tags-page',
'tags' : tags,
'tag_list_type' : tag_list_type,
'stag' : stag,
'tab_id' : sortby,
'keywords' : stag,
'paginator_context' : paginator_context,
}
else:
stag = ""
sortby = request.GET.get('sort', 'name')
if request.method == "GET":
stag = request.GET.get("query", "").strip()
if stag != '':
tags = models.Tag.objects.filter(deleted=False, name__icontains=stag).exclude(used_count=0)
else:
if sortby == "name":
tags = models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("name")
else:
tags = models.Tag.objects.all().filter(deleted=False).exclude(used_count=0).order_by("-used_count")
font_size = extra_tags.get_tag_font_size(tags)
data = {
'active_tab': 'tags',
'page_class': 'tags-page',
'tags' : tags,
'tag_list_type' : tag_list_type,
'font_size' : font_size,
'stag' : stag,
'tab_id' : sortby,
'keywords' : stag,
'search_state': SearchState(*[None for x in range(7)])
}
return render_into_skin('tags.html', data, request)
@csrf.csrf_protect
#@cache_page(60 * 5)
def exercise(request, id):#refactor - long subroutine. display exercise body, problems and comments
"""view that displays body of the exercise and
all problems to it
"""
#process url parameters
#todo: fix inheritance of sort method from exercises
#before = datetime.datetime.now()
default_sort_method = request.session.get('exercises_sort_method', 'votes')
form = ShowExerciseForm(request.GET, default_sort_method)
form.full_clean()#always valid
show_problem = form.cleaned_data['show_problem']
show_comment = form.cleaned_data['show_comment']
show_page = form.cleaned_data['show_page']
problem_sort_method = form.cleaned_data['problem_sort_method']
#load exercise and maybe refuse showing deleted exercise
#if the exercise does not exist - try mapping to old exercises
#and and if it is not found again - then give up
try:
exercise_post = models.Post.objects.filter(
post_type = 'exercise',
id = id
).select_related('thread')[0]
except IndexError:
# Handle URL mapping - from old Q/A/C/ URLs to the new one
try:
exercise_post = models.Post.objects.filter(
post_type='exercise',
old_exercise_id = id
).select_related('thread')[0]
except IndexError:
raise Http404
if show_problem:
try:
old_problem = models.Post.objects.get_problems().get(old_problem_id=show_problem)
return HttpResponseRedirect(old_problem.get_absolute_url())
except models.Post.DoesNotExist:
pass
elif show_comment:
try:
old_comment = models.Post.objects.get_comments().get(old_comment_id=show_comment)
return HttpResponseRedirect(old_comment.get_absolute_url())
except models.Post.DoesNotExist:
pass
try:
exercise_post.assert_is_visible_to(request.user)
except exceptions.ExerciseHidden, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('index'))
#redirect if slug in the url is wrong
if request.path.split('/')[-2] != exercise_post.slug:
logging.debug('no slug match!')
exercise_url = '?'.join((
exercise_post.get_absolute_url(),
urllib.urlencode(request.GET)
))
return HttpResponseRedirect(exercise_url)
#resolve comment and problem permalinks
#they go first because in theory both can be moved to another exercise
#this block "returns" show_post and assigns actual comment and problem
#to show_comment and show_problem variables
#in the case if the permalinked items or their parents are gone - redirect
#redirect also happens if id of the object's origin post != requested id
show_post = None #used for permalinks
if show_comment:
#if url calls for display of a specific comment,
#check that comment exists, that it belongs to
#the current exercise
#if it is an problem comment and the problem is hidden -
#redirect to the default view of the exercise
#if the exercise is hidden - redirect to the main page
#in addition - if url points to a comment and the comment
#is for the problem - we need the problem object
try:
show_comment = models.Post.objects.get_comments().get(id=show_comment)
except models.Post.DoesNotExist:
error_message = _(
'Sorry, the comment you are looking for has been '
'deleted and is no longer accessible'
)
request.user.message_set.create(message = error_message)
return HttpResponseRedirect(exercise_post.thread.get_absolute_url())
if str(show_comment.thread._exercise_post().id) != str(id):
return HttpResponseRedirect(show_comment.get_absolute_url())
show_post = show_comment.parent
try:
show_comment.assert_is_visible_to(request.user)
except exceptions.ProblemHidden, error:
request.user.message_set.create(message = unicode(error))
#use reverse function here because exercise is not yet loaded
return HttpResponseRedirect(reverse('exercise', kwargs = {'id': id}))
except exceptions.ExerciseHidden, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('index'))
elif show_problem:
#if the url calls to view a particular problem to
#exercise - we must check whether the exercise exists
#whether problem is actually corresponding to the current exercise
#and that the visitor is allowed to see it
show_post = get_object_or_404(models.Post, post_type='problem', id=show_problem)
if str(show_post.thread._exercise_post().id) != str(id):
return HttpResponseRedirect(show_post.get_absolute_url())
try:
show_post.assert_is_visible_to(request.user)
except django_exceptions.PermissionDenied, error:
request.user.message_set.create(message = unicode(error))
return HttpResponseRedirect(reverse('exercise', kwargs = {'id': id}))
thread = exercise_post.thread
logging.debug('problem_sort_method=' + unicode(problem_sort_method))
#load problems and post id's->athor_id mapping
#posts are pre-stuffed with the correctly ordered comments
updated_exercise_post, problems, post_to_author, published_problem_ids = thread.get_cached_post_data(
sort_method = problem_sort_method,
user = request.user
)
exercise_post.set_cached_comments(
updated_exercise_post.get_cached_comments()
)
#Post.objects.precache_comments(for_posts=[exercise_post] + problems, visitor=request.user)
user_votes = {}
user_post_id_list = list()
#todo: cache this query set, but again takes only 3ms!
if request.user.is_authenticated():
user_votes = Vote.objects.filter(
user=request.user,
voted_post__id__in = post_to_author.keys()
).values_list('voted_post_id', 'vote')
user_votes = dict(user_votes)
#we can avoid making this query by iterating through
#already loaded posts
user_post_id_list = [
id for id in post_to_author if post_to_author[id] == request.user.id
]
#resolve page number and comment number for permalinks
show_comment_position = None
if show_comment:
show_page = show_comment.get_page_number(problem_posts=problems)
show_comment_position = show_comment.get_order_number()
elif show_problem:
show_page = show_post.get_page_number(problem_posts=problems)
objects_list = Paginator(problems, const.PROBLEMS_PAGE_SIZE)
if show_page > objects_list.num_pages:
return HttpResponseRedirect(exercise_post.get_absolute_url())
page_objects = objects_list.page(show_page)
#count visits
#import ipdb; ipdb.set_trace()
if functions.not_a_robot_request(request):
#todo: split this out into a subroutine
#todo: merge view counts per user and per session
#1) view count per session
update_view_count = False
if 'exercise_view_times' not in request.session:
request.session['exercise_view_times'] = {}
last_seen = request.session['exercise_view_times'].get(exercise_post.id, None)
if thread.last_activity_by_id != request.user.id:
if last_seen:
if last_seen < thread.last_activity_at:
update_view_count = True
else:
update_view_count = True
request.session['exercise_view_times'][exercise_post.id] = \
datetime.datetime.now()
#2) run the slower jobs in a celery task
from askbot import tasks
tasks.record_exercise_visit.delay(
exercise_post = exercise_post,
user = request.user,
update_view_count = update_view_count
)
paginator_data = {
'is_paginated' : (objects_list.count > const.PROBLEMS_PAGE_SIZE),
'pages': objects_list.num_pages,
'page': show_page,
'has_previous': page_objects.has_previous(),
'has_next': page_objects.has_next(),
'previous': page_objects.previous_page_number(),
'next': page_objects.next_page_number(),
'base_url' : request.path + '?sort=%s&' % problem_sort_method,
}
paginator_context = functions.setup_paginator(paginator_data)
#todo: maybe consolidate all activity in the thread
#for the user into just one query?
favorited = thread.has_favorite_by_user(request.user)
is_cacheable = True
if show_page != 1:
is_cacheable = False
elif show_comment_position > askbot_settings.MAX_COMMENTS_TO_SHOW:
is_cacheable = False
initial = {
'wiki': exercise_post.wiki and askbot_settings.WIKI_ON,
'email_notify': thread.is_followed_by(request.user)
}
#maybe load draft
if request.user.is_authenticated():
#todo: refactor into methor on thread
drafts = models.DraftProblem.objects.filter(
author=request.user,
thread=thread
)
if drafts.count() > 0:
initial['text'] = drafts[0].text
problem_form = ProblemForm(initial)
answer_form = AnswerForm(initial)
user_can_post_comment = (
request.user.is_authenticated() and request.user.can_post_comment()
)
user_already_gave_problem = False
previous_problem = None
if request.user.is_authenticated():
if askbot_settings.LIMIT_ONE_PROBLEM_PER_USER:
for problem in problems:
if problem.author == request.user:
user_already_gave_problem = True
previous_problem = problem
break
data = {
'is_cacheable': False,#is_cacheable, #temporary, until invalidation fix
'long_time': const.LONG_TIME,#"forever" caching
'page_class': 'exercise-page',
'active_tab': 'exercises',
'exercise' : exercise_post,
'thread': thread,
'thread_is_moderated': thread.is_moderated(),
'user_is_thread_moderator': thread.has_moderator(request.user),
'published_problem_ids': published_problem_ids,
'problem' : problem_form,
'problems' : page_objects.object_list,
'problem_count': thread.get_problem_count(request.user),
'category_tree_data': askbot_settings.CATEGORY_TREE,
'user_votes': user_votes,
'user_post_id_list': user_post_id_list,
'user_can_post_comment': user_can_post_comment,#in general
'user_already_gave_problem': user_already_gave_problem,
'previous_problem': previous_problem,
'tab_id' : problem_sort_method,
'favorited' : favorited,
'similar_threads' : thread.get_similar_threads(),
'language_code': translation.get_language(),
'paginator_context' : paginator_context,
'show_post': show_post,
'show_comment': show_comment,
'show_comment_position': show_comment_position,
'answer': answer_form,
#'answers': answer_form,
}
#shared with ...
if askbot_settings.GROUPS_ENABLED:
data['sharing_info'] = thread.get_sharing_info()
data.update(context.get_for_tag_editor())
return render_into_skin('exercise.html', data, request)
def revisions(request, id, post_type = None):
assert post_type in ('exercise', 'problem')
post = get_object_or_404(models.Post, post_type=post_type, id=id)
revisions = list(models.PostRevision.objects.filter(post=post))
revisions.reverse()
for i, revision in enumerate(revisions):
if i == 0:
revision.diff = sanitize_html(revisions[i].html)
revision.summary = _('initial version')
else:
revision.diff = htmldiff(
sanitize_html(revisions[i-1].html),
sanitize_html(revision.html)
)
data = {
'page_class':'revisions-page',
'active_tab':'exercises',
'post': post,
'revisions': revisions,
}
return render_into_skin('revisions.html', data, request)
@csrf.csrf_exempt
@ajax_only
@anonymous_forbidden
@get_only
def get_comment(request):
"""returns text of a comment by id
via ajax response requires request method get
and request must be ajax
"""
id = int(request.GET['id'])
comment = models.Post.objects.get(post_type='comment', id=id)
request.user.assert_can_edit_comment(comment)
return {'text': comment.text}
#@decorators.check_authorization_to_post(_('Please log in to post answers'))
#@decorators.check_spam('text')
@csrf.csrf_protect
def new_answer_form(request, mid, pid):
exercise_post = models.Post.objects.filter(
post_type = 'exercise',
id = mid
).select_related('thread')[0]
problem_post = models.Post.objects.filter(
post_type = 'problem',
id = pid
).select_related('thread')[0]
thread = exercise_post.thread
initial = {
'wiki': exercise_post.wiki and askbot_settings.WIKI_ON,
'email_notify': thread.is_followed_by(request.user)
}
answer_form = AnswerForm(initial)
# if exercise doesn't exist, redirect to main page
data = {
'pid': pid,
'mid': mid,
'exercise': exercise_post,
'problem': problem_post,
'thread': thread,
'answer_form': answer_form
}
return render_into_skin('exercise/answer_form.html', data, request)
| gpl-3.0 | -1,405,857,867,789,381,400 | 39.409226 | 153 | 0.604198 | false |
uptimerobot/uptimerobot-cli | uptimerobot/client.py | 1 | 12485 | from __future__ import absolute_import, division, print_function, unicode_literals
import re
import sys
import json
import requests
from . import APIError, HTTPError
from .monitor import Monitor
from .alert_contact import AlertContact
# Ensure that we can test against the appropriate string types.
if sys.version_info < (3, 0):
string = basestring
else:
string = (str, bytes)
class Client(object):
"""An uptimerobot API client"""
URL = "http://api.uptimerobot.com/"
LIST_SEPARATOR = "-"
ID_PATTERN = "^\d+$"
def __init__(self, api_key):
self.api_key = api_key
def get(self, action, **values):
payload = {
"apiKey": self.api_key,
"format": "json",
"noJsonCallback": 1,
}
payload.update(values)
response = requests.get(self.URL + action, params=payload)
# Handle client/server errors with the request.
if response.status_code != requests.codes.ok:
try:
raise response.raise_for_status()
except Exception as ex:
raise HTTPError(ex)
# Parse the json in the correct response.
data = json.loads(response.text)
# Request went through, but was bad in some way.
if data["stat"] == "fail":
raise APIError(data["message"])
return data
def get_monitors(self, ids=None, show_logs=False,
show_log_alert_contacts=False,
show_alert_contacts=False,
custom_uptime_ratio=False,
show_log_timezone=False):
"""
Args
ids
IDs of the monitors to list. If None, then get all contacts. [list<int>]
logs
Show logs [Boolean]
alert_contacts
Show alert contacts [Boolean]
show_monitor_alert_contacts
Show monitors alert contacts [Boolean]
custom_uptime_ratio
Number of days to calculate uptime over [list<int>]
show_log_timezone
Show the timezone for the log times [Boolean]
Returns
List of Monitor detail objects.
"""
variables = {}
if ids:
if any(not isinstance(id, string) for id in ids):
raise TypeError("ids must be strings")
if any(not re.match(self.ID_PATTERN, id) for id in ids):
raise ValueError("ids must be numeric")
variables["monitors"] = self.LIST_SEPARATOR.join(ids)
if show_logs:
variables["logs"] = "1"
if show_log_timezone:
variables["showTimezone"] = "1"
if show_log_alert_contacts:
variables["alertContacts"] = "1"
if show_alert_contacts:
variables["showMonitorAlertContacts"] = "1"
if custom_uptime_ratio:
if not all(isinstance(n, int) and n > 0 for n in custom_uptime_ratio):
raise TypeError("custom_uptime_ratio must be a list of positive integers")
variables["customUptimeRatio"] = self.LIST_SEPARATOR.join(str(n) for n in custom_uptime_ratio)
data = self.get("getMonitors", **variables)
monitors = [Monitor(mon, custom_uptime_ratio) for mon in data["monitors"]["monitor"]]
return monitors
def new_monitor(self, name, url, type,
subtype=None,
port=None,
keyword_type=None,
keyword=None,
username=None,
password=None,
alert_contacts=None):
"""
Args
name
Human-readable name to assign to the monitor [str].
url
URL [str]
type
Monitor type [int]
subtype
subtype of the monitor [int]
keyword_type
Type of keyword to use (requires keyword be set) [int]
keyword
Keyword to use (requires keyword_type be set)
http_username
Username to use for private site (requires http_password be set)
http_password
Password to use for private site (requires http_username be set)
alert_contacts
Alert contacts to give the monitor [list<int>]
Returns
ID of monitor created.
"""
if type not in Monitor.TYPES:
raise ValueError("type must be one of %s" % ", ".join(str(m) for m in Monitor.TYPES.keys()))
variables = {
"monitorFriendlyName": name,
"monitorURL": url,
"monitorType": str(type),
}
if subtype is not None:
if subtype not in Monitor.SUBTYPES:
raise ValueError("subtype must be one of %s" % ", ".join(str(m) for m in Monitor.SUBTYPES.keys()))
variables["monitorSubType"] = str(subtype)
if port is not None:
variables["monitorPort"] = str(port)
if keyword_type and keyword:
if keyword_type not in Monitor.KEYWORD_TYPES:
raise ValueError("keyword_type must be one of %s" % ", ".join(str(m) for m in Monitor.KEYWORD_TYPES.keys()))
variables["monitorKeywordType"] = str(keyword_type)
variables["monitorKeywordValue"] = keyword
elif keyword_type is not None or keyword is not None:
raise ValueError("Requires both keyword_type and keyword if either are specified")
if username is not None and password is not None:
variables["monitorHTTPUsername"] = username
variables["monitorHTTPPassword"] = password
elif username is not None or password is not None:
raise ValueError("Requires both username and password if either are specified")
if alert_contacts:
if any(not isinstance(id, string) for id in alert_contacts):
raise TypeError("alert_contacts must be strings")
if any(not re.match(self.ID_PATTERN, id) for id in alert_contacts):
raise ValueError("alert_contacts must be numeric")
variables["monitorAlertContacts"] = self.LIST_SEPARATOR.join(alert_contacts)
data = self.get("newMonitor", **variables)
return data["monitor"]["id"]
def edit_monitor(self, id,
status=None,
name=None,
url=None,
type=None,
subtype=None,
port=None,
keyword_type=None,
keyword=None,
username=None,
password=None,
alert_contacts=None):
"""
Args
id
ID number of the monitor to edit [str]
status
Status to set [int]
name
Human-readable name to assign to the monitor.
url
URL to monitor
type
Monitor type [int]
subtype
subtype of the monitor [int]
keyword_type
Type of keyword to use (requires keyword be set) [int]
keyword
Keyword to use (requires keyword_type be set)
username
Username to use for private site (requires http_password be set)
password
Password to use for private site (requires http_username be set)
alert_contacts
Alert contacts to give the monitor [list<int>]
Returns
ID of monitor edited.
"""
if not isinstance(id, string):
raise TypeError("id must be a string")
if not re.match(self.ID_PATTERN, id):
raise ValueError("id must be numeric")
variables = {
"monitorID": id,
}
if status is not None:
if status not in Monitor.STATUSES:
raise ValueError("status must be one of %s" % ", ".join(str(m) for m in Monitor.STATUSES.keys()))
variables["monitorStatus"] = str(status)
if name is not None:
variables["monitorFriendlyName"] = name
if url is not None:
variables["monitorURL"] = url
if type is not None:
if type not in Monitor.TYPES:
raise ValueError("type must be one of %s" % ", ".join(str(m) for m in Monitor.TYPES.keys()))
variables["monitorType"] = str(type)
if subtype is not None:
if subtype not in Monitor.SUBTYPES:
raise ValueError("subtype must be one of %s" % ", ".join(str(m) for m in Monitor.SUBTYPES.keys()))
variables["monitorSubType"] = str(subtype)
if port is not None:
variables["monitorPort"] = str(port)
if keyword_type is not None:
if keyword_type not in Monitor.KEYWORD_TYPES:
raise ValueError("keyword_type must be one of %s" % ", ".join(str(m) for m in Monitor.KEYWORD_TYPES.keys()))
variables["monitorKeywordType"] = str(keyword_type)
if keyword:
variables["monitorKeywordValue"] = keyword
if username:
variables["monitorHTTPUsername"] = username
if password:
variables["monitorHTTPPassword"] = password
if alert_contacts:
if any(not isinstance(id, string) for id in alert_contacts):
raise TypeError("alert_contacts must be strings")
if any(not re.match(self.ID_PATTERN, id) for id in alert_contacts):
raise ValueError("alert_contacts must be numeric")
variables["monitorAlertContacts"] = self.LIST_SEPARATOR.join(alert_contacts)
data = self.get("editMonitor", **variables)
return data["monitor"]["id"]
def delete_monitor(self, id):
"""
Args
id
ID of the monitor to delete [str]
Returns
ID of monitor deleted [str]
"""
if not isinstance(id, string):
raise TypeError("id must be a string")
if not re.match(self.ID_PATTERN, id):
raise ValueError("id must be numeric")
data = self.get("deleteMonitor", monitorID=id)
return data["monitor"]["id"]
def get_alert_contacts(self, ids=None):
"""
Args
ids
IDs of the alert contacts to list. If None, then get all contacts [list.
Returns
List of AlertContact detail objects.
"""
variables = {}
if ids is not None:
if any(not isinstance(id, string) for id in ids):
raise TypeError("ids must be strings")
if any(not re.match(self.ID_PATTERN, id) for id in ids):
raise ValueError("ids must be numeric")
variables["alertcontacts"] = self.LIST_SEPARATOR.join(ids)
data = self.get("getAlertContacts", **variables)
alerts = [AlertContact(ac) for ac in data["alertcontacts"]["alertcontact"]]
return alerts
def new_alert_contact(self, type, value):
"""
Args
type
Type of the new alert to create [int]
value
email address (or ) to alert [str]
Returns
ID of alert contact created [str]
"""
if type not in AlertContact.TYPES:
raise ValueError("type must be one of %s" % ", ".join(str(t) for t in AlertContact.TYPES))
if not isinstance(value, string):
raise TypeError("value must be a string")
data = self.get("newAlertContact", alertContactType=str(type), alertContactValue=value)
return data["alertcontact"]["id"]
def delete_alert_contact(self, id):
"""
Args
id
ID of the alert contact to delete [str]
Returns
ID of alert contact deleted [str]
"""
if not isinstance(id, string):
raise TypeError("id must be a string")
if not re.match(self.ID_PATTERN, id):
raise ValueError("id must be numeric")
data = self.get("deleteAlertContact", alertContactID=id)
return data["alertcontact"]["id"] | gpl-3.0 | -6,106,212,682,719,807,000 | 29.678133 | 124 | 0.546496 | false |
Grumbel/dirtool | dirtools/cmd_desktop.py | 1 | 2895 | # dirtool.py - diff tool for directories
# Copyright (C) 2018 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import sys
from xdg.DesktopEntry import DesktopEntry
from xdg.BaseDirectory import xdg_data_dirs
from dirtools.xdg_desktop import get_desktop_file
# https://standards.freedesktop.org/desktop-entry-spec/latest/
def parse_args(args):
parser = argparse.ArgumentParser(description="Query the systems .desktop files")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("DESKTOP", nargs='?')
group.add_argument('-l', '--list-dirs', action='store_true', default=False,
help="List all directories scanned for .desktop files")
group.add_argument('-L', '--list-files', action='store_true', default=False,
help="List all .desktop files")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help="Be verbose")
return parser.parse_args(args)
def main(argv):
args = parse_args(argv[1:])
if args.list_dirs:
for directory in xdg_data_dirs:
print(os.path.join(directory, "applications"))
elif args.list_files:
for directory in xdg_data_dirs:
path = os.path.join(directory, "applications")
try:
for entry in os.listdir(path):
if entry.endswith(".desktop"):
if args.verbose:
filename = os.path.join(path, entry)
desktop = DesktopEntry(filename)
print("{:70} {:40} {:40}".format(filename, desktop.getName(), desktop.getExec()))
else:
print(os.path.join(path, entry))
except FileNotFoundError:
pass
else:
filename = get_desktop_file(args.DESKTOP)
print(filename)
desktop = DesktopEntry(filename)
print("Name: {}".format(desktop.getName()))
print("Exec: {}".format(desktop.getExec()))
print("TryExec: {}".format(desktop.getTryExec()))
print("Mime-Types: {}".format(desktop.getMimeTypes()))
def main_entrypoint():
exit(main(sys.argv))
# EOF #
| gpl-3.0 | 4,080,812,269,677,872,000 | 35.1875 | 111 | 0.629016 | false |
quokkaproject/flask-htmlbuilder | setup.py | 1 | 1061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name='quokka-flask-htmlbuilder',
version='0.13',
url='http://github.com/quokkaproject/flask-htmlbuilder',
license='MIT',
author='QuokkaProject',
author_email='[email protected]',
description='Fork of Flexible Python-only HTML generation for Flask',
long_description=__doc__,
packages=['flask_htmlbuilder'],
namespace_packages=['flask_htmlbuilder'],
test_suite='nose.collector',
zip_safe=False,
platforms='any',
install_requires=[
'Flask'
],
tests_require=[
'nose'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause | -672,378,022,798,663,400 | 28.472222 | 73 | 0.6164 | false |
Rfam/rfam-production | scripts/support/mirnas/validate_family.py | 1 | 3027 | import os
import json
import argparse
import urllib
import logging
# ------------------------------------------------------------------------------------------
search_dirs = ["/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk1_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk2_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch2/searches"]
MEMORY = 8000
CPU = 4
LSF_GROUP = "/family_srch"
REF_STRING = """RN [1]
RM 30423142
RT miRBase: from microRNA sequences to function.
RA Kozomara A, Birgaoanu M, Griffiths-Jones S;
RL Nucleic Acids Res. 2019;47:D155."""
# ------------------------------------------------------------------------------------------
def check_desc_reference_is_valid(desc_loc, ref_string):
fp = open(desc_loc, 'r')
desc_lines = fp.read()
fp.close()
# check if we can find the reference lines in DESC
if desc_lines.find(REF_STRING) != -1:
return True
return False
# ------------------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--mirna-list",
help="A .json file containing all miRNAs to validate", action="store")
parser.add_argument("--desc", help="Only perform DESC validation",
action="store_true", default=False)
parser.add_argument("--svn", help="Check family exists in the SVN repository",
action="store_true", default=False)
parser.add_argument("--log", help="Creates a log file with all validated DESC files",
action="store_true", default=False)
return parser
# ------------------------------------------------------------------------------------------
def get_mirna_directory_location(mirna_id):
if mirna_id.find("_relabelled")==-1:
dir_label = mirna_id+"_relabelled"
for search_dir in search_dirs:
family_dir_loc = os.path.join(search_dir, dir_label)
if os.path.exists(family_dir_loc):
return family_dir_loc
return None
# ------------------------------------------------------------------------------------------
def check_family_exists_in_svn(rfam_acc):
svn_url = "https://xfamsvn.ebi.ac.uk/svn/data_repos/trunk/Families/%s"
status = False
# Check if entry existis on SVN repo; status=True
return status
# ------------------------------------------------------------------------------------------
if __name__=='__main__':
parser = parse_arguments()
args = parser.parse_args()
fp = open(args.mirna_list, 'r')
mirnas = json.load(fp)
fp.close()
# if args.log is True:
for mirna in mirnas:
mirna_dir_loc = get_mirna_directory_location(mirna)
if mirna_dir_loc is not None:
if args.desc is True:
desc_loc = os.path.join(mirna_dir_loc, "DESC")
if os.path.exists(desc_loc):
check = check_desc_reference_is_valid(desc_loc, REF_STRING)
if check is False:
print (mirna_dir_loc)
| apache-2.0 | 5,354,791,960,162,409,000 | 26.27027 | 106 | 0.553023 | false |
cmheisel/django-jamsession | jamsession/forms/admin.py | 1 | 1149 | from django import forms
from jamsession.forms.fields import SchemaField
from jamsession.models import Schema
class SchemaAdminForm(forms.Form):
error_css_class = 'error'
required_css_class = 'required'
name = forms.CharField(required=True,
widget=forms.TextInput(
attrs={'class': 'vTextField'})
)
schema = SchemaField(widget=forms.Textarea, required=True)
def __init__(self, *args, **kwargs):
if 'instance' in kwargs:
self.instance = kwargs['instance']
del kwargs['instance']
super(SchemaAdminForm, self).__init__(*args, **kwargs)
class _meta(object):
model = Schema
def clean_name(self):
data = self.cleaned_data['name'].strip()
if not data:
raise forms.ValidationError("Name is required.")
if self._meta.model.objects.filter(name=data).count() >= 1:
raise forms.ValidationError("Name must be unique.")
return data
def save(self):
obj = self._meta.model(**self.cleaned_data)
obj.save()
return obj
| mit | -3,212,703,057,285,607,000 | 29.236842 | 67 | 0.585727 | false |
rbiswas4/SNsims | snsims_previous/snsims/tmp/models.py | 1 | 2804 | #!/usr/bin/env python
import sncosmo.models
import numpy
class SEDFileSource(sncosmo.models.TimeSeriesSource):
"""A TimeSeriesSource stored in a 3-column ASCII file format, for PHASE,
LAMBDA, and F_LAMBDA. The hash symbol # is a comment line.
The spectral flux density of this model is given by
.. math::
F(t, \lambda) = A \\times M(t, \lambda)
where _M_ is the flux defined on a grid in phase and wavelength and _A_
(amplitude) is the single free parameter of the model. It should be noted
that while t and \lambda are in the rest frame of the object, the flux
density is defined at redshift zero. This means that for objects with the
same intrinsic luminosity, the amplitude will be smaller for objects at
larger luminosity distances.
Parameters
----------
filename : str
Name of the filename that contains the Time Series
zero_before : bool, optional
If True, flux at phases before minimum phase will be zeroed. The
default is False, in which case the flux at such phases will be equal
to the flux at the minimum phase (``flux[0, :]`` in the input array).
version : str, optional
Version of the model. Default is `None`.
Returns
-------
`~sncosmo.TimeSeriesSource` instance representing the TimeSeriesSource
in file
"""
_param_names = ['amplitude']
param_names_latex = ['A']
def __init__(self, filename, zero_before=False, version=None):
phase, wave, flux = numpy.loadtxt(filename, unpack=True)
# Convert 3 column format to that expected by TimeSeriesSource
phase_u = numpy.unique(phase)
wave_u = numpy.unique(wave)
lenp = len(phase_u)
lenw = len(wave_u)
if lenp * lenw != len(flux):
raise TypeError('File is not a TimeSeriesSource')
i = numpy.zeros(len(flux), dtype='int')
j = numpy.zeros(len(flux), dtype='int')
for index, p in enumerate(phase_u):
i[phase == p] = index
for index, w in enumerate(wave_u):
j[wave == w] = index
flux = flux[i * lenw + j]
flux = numpy.reshape(flux, (lenp, lenw))
super(SEDFileSource, self).__init__(phase_u, wave_u, flux,
zero_before=False,
name=filename, version=None)
if __name__ == '__main__':
# filename = '/Users/akim/project/SNDATA_ROOT/snsed/NON1A/SDSS-019323.SED'
# data = SEDFileSource(filename)
sn = sncosmo.Model(source='snana-2007nc')
print sn.param_names
# wefwe
import matplotlib.pyplot as plt
plt.plot(data._wave, data.flux(0, data._wave))
plt.plot(sn.source._wave, sn.flux(0, sn.source._wave) * 0.95)
plt.show()
| mit | -6,002,426,233,363,240,000 | 32.783133 | 78 | 0.615906 | false |
ericmjl/bokeh | sphinx/source/docs/user_guide/examples/categorical_bar_dodged.py | 1 | 1153 | from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from bokeh.transform import dodge
output_file("dodged_bars.html")
fruits = ['Apples', 'Pears', 'Nectarines', 'Plums', 'Grapes', 'Strawberries']
years = ['2015', '2016', '2017']
data = {'fruits' : fruits,
'2015' : [2, 1, 4, 3, 2, 4],
'2016' : [5, 3, 3, 2, 4, 6],
'2017' : [3, 2, 4, 4, 5, 3]}
source = ColumnDataSource(data=data)
p = figure(x_range=fruits, y_range=(0, 10), plot_height=250, title="Fruit Counts by Year",
toolbar_location=None, tools="")
p.vbar(x=dodge('fruits', -0.25, range=p.x_range), top='2015', width=0.2, source=source,
color="#c9d9d3", legend_label="2015")
p.vbar(x=dodge('fruits', 0.0, range=p.x_range), top='2016', width=0.2, source=source,
color="#718dbf", legend_label="2016")
p.vbar(x=dodge('fruits', 0.25, range=p.x_range), top='2017', width=0.2, source=source,
color="#e84d60", legend_label="2017")
p.x_range.range_padding = 0.1
p.xgrid.grid_line_color = None
p.legend.location = "top_left"
p.legend.orientation = "horizontal"
show(p)
| bsd-3-clause | 5,315,051,611,966,367,000 | 31.942857 | 90 | 0.633131 | false |
jendrikseipp/rednotebook-elementary | win/utils.py | 1 | 1582 | import logging
import os
import shutil
import sys
import subprocess
import urllib.request
def ensure_path(path):
if not os.path.exists(path):
os.mkdir(path)
def confirm_overwrite(dir):
if os.path.exists(dir):
answer = input(
'The directory {} exists. Overwrite it? (Y/n): '.format(dir)).strip()
if answer and answer.lower() != 'y':
sys.exit('Aborting')
shutil.rmtree(dir)
def fast_copytree(src_dir, dest_dir):
subprocess.check_call(['cp', '-r', src_dir, dest_dir])
def fetch(url, path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.mkdir(dirname)
if not os.path.exists(path):
logging.info('Fetch {0} to {1}'.format(url, path))
with urllib.request.urlopen(url) as response, open(path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if not os.path.exists(path):
sys.exit('Download unsuccessful.')
def run(*args, **kwargs):
logging.info('Run command: {0} ({1})'.format(args, kwargs))
retcode = subprocess.call(*args, **kwargs)
if retcode != 0:
sys.exit('Command failed.')
def get_output(*args, **kwargs):
return subprocess.check_output(*args, **kwargs).decode().strip()
def install(path, use_wine):
cmd = []
if use_wine:
cmd.append('wine')
if path.lower().endswith('.exe'):
cmd.extend([path])
elif path.lower().endswith('.msi'):
cmd.extend(['msiexec', '/i', path])
else:
sys.exit('Don\'t know how to install {0}'.format(path))
run(cmd)
| gpl-2.0 | 8,799,536,457,335,876,000 | 28.296296 | 83 | 0.609355 | false |
shaftoe/home-assistant | homeassistant/const.py | 1 | 12266 | # coding: utf-8
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 46
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 4, 2)
REQUIRED_PYTHON_VER_WIN = (3, 5, 2)
CONSTRAINT_FILE = 'package_constraints.txt'
PROJECT_NAME = 'Home Assistant'
PROJECT_PACKAGE_NAME = 'homeassistant'
PROJECT_LICENSE = 'Apache License 2.0'
PROJECT_AUTHOR = 'The Home Assistant Authors'
PROJECT_COPYRIGHT = ' 2013, {}'.format(PROJECT_AUTHOR)
PROJECT_URL = 'https://home-assistant.io/'
PROJECT_EMAIL = '[email protected]'
PROJECT_DESCRIPTION = ('Open-source home automation platform '
'running on Python 3.')
PROJECT_LONG_DESCRIPTION = ('Home Assistant is an open-source '
'home automation platform running on Python 3. '
'Track and control all devices at home and '
'automate control. '
'Installation in less than a minute.')
PROJECT_CLASSIFIERS = [
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.4',
'Topic :: Home Automation'
]
PROJECT_GITHUB_USERNAME = 'home-assistant'
PROJECT_GITHUB_REPOSITORY = 'home-assistant'
PYPI_URL = 'https://pypi.python.org/pypi/{}'.format(PROJECT_PACKAGE_NAME)
GITHUB_PATH = '{}/{}'.format(PROJECT_GITHUB_USERNAME,
PROJECT_GITHUB_REPOSITORY)
GITHUB_URL = 'https://github.com/{}'.format(GITHUB_PATH)
PLATFORM_FORMAT = '{}.{}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_AUTHENTICATION = 'authentication'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_EFFECT = 'effect'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_ICON = 'icon'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MINIMUM = 'minimum'
CONF_MAXIMUM = 'maximum'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROTOCOL = 'protocol'
CONF_PROXY_SSL = 'proxy_ssl'
CONF_QUOTE = 'quote'
CONF_RECIPIENT = 'recipient'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_CLASS = 'sensor_class'
CONF_SENSORS = 'sensors'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_WHITE_VALUE = 'white_value'
CONF_XY = 'xy'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_SERVICE_EXECUTED = 'service_executed'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
EVENT_SERVICE_REMOVED = 'service_removed'
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_CLOSED = 'closed'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Contains current time for a TIME_CHANGED event
ATTR_NOW = 'now'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# Data for a SERVICE_EXECUTED event
ATTR_SERVICE_CALL_ID = 'service_call_id'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_RELOAD = 'reload'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_SHUFFLE_SET = 'shuffle_set'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_ACCEPT_ENCODING = 'Accept-Encoding'
HTTP_HEADER_CONTENT_TYPE = 'Content-type'
HTTP_HEADER_CONTENT_ENCODING = 'Content-Encoding'
HTTP_HEADER_VARY = 'Vary'
HTTP_HEADER_CONTENT_LENGTH = 'Content-Length'
HTTP_HEADER_CACHE_CONTROL = 'Cache-Control'
HTTP_HEADER_EXPIRES = 'Expires'
HTTP_HEADER_ORIGIN = 'Origin'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
HTTP_HEADER_ACCEPT = 'Accept'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_ORIGIN = 'Access-Control-Allow-Origin'
HTTP_HEADER_ACCESS_CONTROL_ALLOW_HEADERS = 'Access-Control-Allow-Headers'
ALLOWED_CORS_HEADERS = [HTTP_HEADER_ORIGIN, HTTP_HEADER_ACCEPT,
HTTP_HEADER_X_REQUESTED_WITH, HTTP_HEADER_CONTENT_TYPE,
HTTP_HEADER_HA_AUTH]
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
| apache-2.0 | -7,999,569,583,938,235,000 | 28.480769 | 79 | 0.708741 | false |
kyle-elsalhi/opencv-examples | CalibrationByChessboard/CalibrateCamera.py | 1 | 5055 | # System information:
# - Linux Mint 18.1 Cinnamon 64-bit
# - Python 2.7 with OpenCV 3.2.0
# Resources:
# - OpenCV-Python tutorial for calibration: http://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_calib3d/py_calibration/py_calibration.html
# - Variable names were changed for clarity
import numpy
import cv2
import pickle
import glob
# Create arrays you'll use to store object points and image points from all images processed
objpoints = [] # 3D point in real world space where chess squares are
imgpoints = [] # 2D point in image plane, determined by CV2
# Chessboard variables
CHESSBOARD_CORNERS_ROWCOUNT = 9
CHESSBOARD_CORNERS_COLCOUNT = 6
# Theoretical object points for the chessboard we're calibrating against,
# These will come out like:
# (0, 0, 0), (1, 0, 0), ...,
# (CHESSBOARD_CORNERS_ROWCOUNT-1, CHESSBOARD_CORNERS_COLCOUNT-1, 0)
# Note that the Z value for all stays at 0, as this is a printed out 2D image
# And also that the max point is -1 of the max because we're zero-indexing
# The following line generates all the tuples needed at (0, 0, 0)
objp = numpy.zeros((CHESSBOARD_CORNERS_ROWCOUNT*CHESSBOARD_CORNERS_COLCOUNT,3), numpy.float32)
# The following line fills the tuples just generated with their values (0, 0, 0), (1, 0, 0), ...
objp[:,:2] = numpy.mgrid[0:CHESSBOARD_CORNERS_ROWCOUNT,0:CHESSBOARD_CORNERS_COLCOUNT].T.reshape(-1, 2)
# Need a set of images or a video taken with the camera you want to calibrate
# I'm using a set of images taken with the camera with the naming convention:
# 'camera-pic-of-chessboard-<NUMBER>.jpg'
images = glob.glob('./camera-pic-of-chessboard-*.jpg')
# All images used should be the same size, which if taken with the same camera shouldn't be a problem
imageSize = None # Determined at runtime
# Loop through images glob'ed
for iname in images:
# Open the image
img = cv2.imread(iname)
# Grayscale the image
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find chessboard in the image, setting PatternSize(2nd arg) to a tuple of (#rows, #columns)
board, corners = cv2.findChessboardCorners(gray, (CHESSBOARD_CORNERS_ROWCOUNT,CHESSBOARD_CORNERS_COLCOUNT), None)
# If a chessboard was found, let's collect image/corner points
if board == True:
# Add the points in 3D that we just discovered
objpoints.append(objp)
# Enhance corner accuracy with cornerSubPix
corners_acc = cv2.cornerSubPix(
image=gray,
corners=corners,
winSize=(11, 11),
zeroZone=(-1, -1),
criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) # Last parameter is about termination critera
imgpoints.append(corners_acc)
# If our image size is unknown, set it now
if not imageSize:
imageSize = gray.shape[::-1]
# Draw the corners to a new image to show whoever is performing the calibration
# that the board was properly detected
img = cv2.drawChessboardCorners(img, (CHESSBOARD_CORNERS_ROWCOUNT, CHESSBOARD_CORNERS_COLCOUNT), corners_acc, board)
# Pause to display each image, waiting for key press
cv2.imshow('Chessboard', img)
cv2.waitKey(0)
else:
print("Not able to detect a chessboard in image: {}".format(iname))
# Destroy any open CV windows
cv2.destroyAllWindows()
# Make sure at least one image was found
if len(images) < 1:
# Calibration failed because there were no images, warn the user
print("Calibration was unsuccessful. No images of chessboards were found. Add images of chessboards and use or alter the naming conventions used in this file.")
# Exit for failure
exit()
# Make sure we were able to calibrate on at least one chessboard by checking
# if we ever determined the image size
if not imageSize:
# Calibration failed because we didn't see any chessboards of the PatternSize used
print("Calibration was unsuccessful. We couldn't detect chessboards in any of the images supplied. Try changing the patternSize passed into findChessboardCorners(), or try different pictures of chessboards.")
# Exit for failure
exit()
# Now that we've seen all of our images, perform the camera calibration
# based on the set of points we've discovered
calibration, cameraMatrix, distCoeffs, rvecs, tvecs = cv2.calibrateCamera(
objectPoints=objpoints,
imagePoints=imgpoints,
imageSize=imageSize,
cameraMatrix=None,
distCoeffs=None)
# Print matrix and distortion coefficient to the console
print(cameraMatrix)
print(distCoeffs)
# Save values to be used where matrix+dist is required, for instance for posture estimation
# I save files in a pickle file, but you can use yaml or whatever works for you
f = open('calibration.pckl', 'wb')
pickle.dump((cameraMatrix, distCoeffs, rvecs, tvecs), f)
f.close()
# Print to console our success
print('Calibration successful. Calibration file used: {}'.format('calibration.pckl'))
| mit | -1,366,045,274,275,861,500 | 42.956522 | 212 | 0.712364 | false |
Blackclaws/client | src/client/_clientwindow.py | 1 | 52765 | from functools import partial
from PyQt4.QtCore import QUrl
from PyQt4.QtGui import QLabel, QStyle
from PyQt4.QtNetwork import QAbstractSocket
import config
import connectivity
from base import Client
from config import Settings
import chat
from client.player import Player
from client.players import Players
from client.updater import ClientUpdater
import fa
from connectivity.helper import ConnectivityHelper
from fa import GameSession
from fa.factions import Factions
from fa.game_session import GameSessionState
from ui.status_logo import StatusLogo
'''
Created on Dec 1, 2011
@author: thygrrr
'''
from PyQt4 import QtCore, QtGui, QtNetwork, QtWebKit
from types import IntType, FloatType, ListType, DictType
from client import ClientState, LOBBY_HOST, \
LOBBY_PORT, LOCAL_REPLAY_PORT
import logging
logger = logging.getLogger(__name__)
import util
import secondaryServer
import json
import sys
import replays
import time
import random
import notifications as ns
FormClass, BaseClass = util.loadUiType("client/client.ui")
class mousePosition(object):
def __init__(self, parent):
self.parent = parent
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
self.warning_buttons = dict()
self.onEdges = False
def computeMousePosition(self, pos):
self.onLeftEdge = pos.x() < 8
self.onRightEdge = pos.x() > self.parent.size().width() - 8
self.onTopEdge = pos.y() < 8
self.onBottomEdge = pos.y() > self.parent.size().height() - 8
self.onTopLeftEdge = self.onTopEdge and self.onLeftEdge
self.onBottomLeftEdge = self.onBottomEdge and self.onLeftEdge
self.onTopRightEdge = self.onTopEdge and self.onRightEdge
self.onBottomRightEdge = self.onBottomEdge and self.onRightEdge
self.onEdges = self.onLeftEdge or self.onRightEdge or self.onTopEdge or self.onBottomEdge
def resetToFalse(self):
self.onLeftEdge = False
self.onRightEdge = False
self.onTopEdge = False
self.onBottomEdge = False
self.cursorShapeChange = False
def isOnEdge(self):
return self.onEdges
class ClientWindow(FormClass, BaseClass):
'''
This is the main lobby client that manages the FAF-related connection and data,
in particular players, games, ranking, etc.
Its UI also houses all the other UIs for the sub-modules.
'''
topWidget = QtGui.QWidget()
# These signals are emitted when the client is connected or disconnected from FAF
connected = QtCore.pyqtSignal()
authorized = QtCore.pyqtSignal(object)
disconnected = QtCore.pyqtSignal()
state_changed = QtCore.pyqtSignal(object)
# This signal is emitted when the client is done rezising
doneresize = QtCore.pyqtSignal()
# These signals notify connected modules of game state changes (i.e. reasons why FA is launched)
viewingReplay = QtCore.pyqtSignal(QtCore.QUrl)
# Game state controls
gameEnter = QtCore.pyqtSignal()
gameExit = QtCore.pyqtSignal()
# These signals propagate important client state changes to other modules
statsInfo = QtCore.pyqtSignal(dict)
tourneyTypesInfo = QtCore.pyqtSignal(dict)
tutorialsInfo = QtCore.pyqtSignal(dict)
tourneyInfo = QtCore.pyqtSignal(dict)
modInfo = QtCore.pyqtSignal(dict)
gameInfo = QtCore.pyqtSignal(dict)
modVaultInfo = QtCore.pyqtSignal(dict)
coopInfo = QtCore.pyqtSignal(dict)
avatarList = QtCore.pyqtSignal(list)
playerAvatarList = QtCore.pyqtSignal(dict)
usersUpdated = QtCore.pyqtSignal(list)
localBroadcast = QtCore.pyqtSignal(str, str)
autoJoin = QtCore.pyqtSignal(list)
channelsUpdated = QtCore.pyqtSignal(list)
replayVault = QtCore.pyqtSignal(dict)
coopLeaderBoard = QtCore.pyqtSignal(dict)
# These signals are emitted whenever a certain tab is activated
showReplays = QtCore.pyqtSignal()
showMaps = QtCore.pyqtSignal()
showGames = QtCore.pyqtSignal()
showTourneys = QtCore.pyqtSignal()
showLadder = QtCore.pyqtSignal()
showChat = QtCore.pyqtSignal()
showMods = QtCore.pyqtSignal()
showCoop = QtCore.pyqtSignal()
matchmakerInfo = QtCore.pyqtSignal(dict)
remember = Settings.persisted_property('user/remember', type=bool, default_value=True)
login = Settings.persisted_property('user/login', persist_if=lambda self: self.remember)
password = Settings.persisted_property('user/password', persist_if=lambda self: self.remember)
gamelogs = Settings.persisted_property('game/logs', type=bool, default_value=False)
useUPnP = Settings.persisted_property('game/upnp', type=bool, default_value=True)
gamePort = Settings.persisted_property('game/port', type=int, default_value=6112)
def __init__(self, *args, **kwargs):
BaseClass.__init__(self, *args, **kwargs)
logger.debug("Client instantiating")
# Hook to Qt's application management system
QtGui.QApplication.instance().aboutToQuit.connect(self.cleanup)
# Init and wire the TCP Network socket to communicate with faforever.com
self.socket = QtNetwork.QTcpSocket()
self.socket.readyRead.connect(self.readFromServer)
self.socket.disconnected.connect(self.disconnectedFromServer)
self.socket.error.connect(self.socketError)
self._client_updater = None
self.blockSize = 0
self.uniqueId = None
self.sendFile = False
self.progress = QtGui.QProgressDialog()
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.warning_buttons = {}
# Tray icon
self.tray = QtGui.QSystemTrayIcon()
self.tray.setIcon(util.icon("client/tray_icon.png"))
self.tray.show()
self._state = ClientState.NONE
self.auth_state = ClientState.NONE # Using ClientState for reasons
self.session = None
self._connection_attempts = 0
# Timer for resize events
self.resizeTimer = QtCore.QTimer(self)
self.resizeTimer.timeout.connect(self.resized)
self.preferedSize = 0
self._receivers = {}
# Process used to run Forged Alliance (managed in module fa)
fa.instance.started.connect(self.startedFA)
fa.instance.finished.connect(self.finishedFA)
fa.instance.error.connect(self.errorFA)
self.gameInfo.connect(fa.instance.processGameInfo)
# Local Replay Server
self.replayServer = fa.replayserver.ReplayServer(self)
# GameSession
self.game_session = None # type: GameSession
# ConnectivityTest
self.connectivity = None # type: ConnectivityHelper
self.localIP = None
# stat server
self.statsServer = secondaryServer.SecondaryServer("Statistic", 11002, self)
# create user interface (main window) and load theme
self.setupUi(self)
self.setStyleSheet(util.readstylesheet("client/client.css"))
self.whatNewsView.setHtml("<body style='background-color: #000;'></body>")
self.setWindowTitle("FA Forever " + util.VERSION_STRING)
# Frameless
self.setWindowFlags(
QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowMinimizeButtonHint)
self.rubberBand = QtGui.QRubberBand(QtGui.QRubberBand.Rectangle)
self.mousePosition = mousePosition(self)
self.installEventFilter(self)
self.minimize = QtGui.QToolButton(self)
self.minimize.setIcon(util.icon("client/minimize-button.png"))
self.maximize = QtGui.QToolButton(self)
self.maximize.setIcon(util.icon("client/maximize-button.png"))
close = QtGui.QToolButton(self)
close.setIcon(util.icon("client/close-button.png"))
self.minimize.setMinimumHeight(10)
close.setMinimumHeight(10)
self.maximize.setMinimumHeight(10)
close.setIconSize(QtCore.QSize(22, 22))
self.minimize.setIconSize(QtCore.QSize(22, 22))
self.maximize.setIconSize(QtCore.QSize(22, 22))
close.setProperty("windowControlBtn", True)
self.maximize.setProperty("windowControlBtn", True)
self.minimize.setProperty("windowControlBtn", True)
self.logo = StatusLogo(self)
self.logo.disconnect_requested.connect(self.disconnect)
self.logo.reconnect_requested.connect(self.reconnect)
self.logo.about_dialog_requested.connect(self.linkAbout)
self.logo.connectivity_dialog_requested.connect(self.connectivityDialog)
self.menu = self.menuBar()
self.topLayout.addWidget(self.logo)
titleLabel = QLabel("FA Forever" if not config.is_beta() else "FA Forever BETA")
titleLabel.setProperty('titleLabel', True)
self.topLayout.addWidget(titleLabel)
self.topLayout.addStretch(500)
self.topLayout.addWidget(self.menu)
self.topLayout.addWidget(self.minimize)
self.topLayout.addWidget(self.maximize)
self.topLayout.addWidget(close)
self.topLayout.setSpacing(0)
self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.maxNormal = False
close.clicked.connect(self.close)
self.minimize.clicked.connect(self.showSmall)
self.maximize.clicked.connect(self.showMaxRestore)
self.moving = False
self.dragging = False
self.draggingHover = False
self.offset = None
self.curSize = None
sizeGrip = QtGui.QSizeGrip(self)
self.mainGridLayout.addWidget(sizeGrip, 2, 2)
# Wire all important signals
self.mainTabs.currentChanged.connect(self.mainTabChanged)
self.topTabs.currentChanged.connect(self.vaultTabChanged)
# Handy reference to the Player object representing the logged-in user.
self.me = None # FIXME: Move it elsewhere
self.players = Players(
self.me) # Players known to the client, contains the player_info messages sent by the server
self.urls = {}
self.power = 0 # current user power
self.id = 0
# Initialize the Menu Bar according to settings etc.
self.initMenus()
# Load the icons for the tabs
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.whatNewTab), util.icon("client/feed.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.chatTab), util.icon("client/chat.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.gamesTab), util.icon("client/games.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.coopTab), util.icon("client/coop.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.vaultsTab), util.icon("client/mods.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.ladderTab), util.icon("client/ladder.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tourneyTab), util.icon("client/tourney.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.livestreamTab), util.icon("client/twitch.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.replaysTab), util.icon("client/replays.png"))
self.mainTabs.setTabIcon(self.mainTabs.indexOf(self.tutorialsTab), util.icon("client/tutorials.png"))
QtWebKit.QWebSettings.globalSettings().setAttribute(QtWebKit.QWebSettings.PluginsEnabled, True)
# for moderator
self.modMenu = None
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
self.state_changed.emit(value)
@QtCore.pyqtSlot(bool)
def on_actionSavegamelogs_toggled(self, value):
self.gamelogs = value
def eventFilter(self, obj, event):
if (event.type() == QtCore.QEvent.HoverMove):
self.draggingHover = self.dragging
if self.dragging:
self.resizeWidget(self.mapToGlobal(event.pos()))
else:
if self.maxNormal == False:
self.mousePosition.computeMousePosition(event.pos())
else:
self.mousePosition.resetToFalse()
self.updateCursorShape(event.pos())
return False
def updateCursorShape(self, pos):
if self.mousePosition.onTopLeftEdge or self.mousePosition.onBottomRightEdge:
self.mousePosition.cursorShapeChange = True
self.setCursor(QtCore.Qt.SizeFDiagCursor)
elif self.mousePosition.onTopRightEdge or self.mousePosition.onBottomLeftEdge:
self.setCursor(QtCore.Qt.SizeBDiagCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onLeftEdge or self.mousePosition.onRightEdge:
self.setCursor(QtCore.Qt.SizeHorCursor)
self.mousePosition.cursorShapeChange = True
elif self.mousePosition.onTopEdge or self.mousePosition.onBottomEdge:
self.setCursor(QtCore.Qt.SizeVerCursor)
self.mousePosition.cursorShapeChange = True
else:
if self.mousePosition.cursorShapeChange == True:
self.unsetCursor()
self.mousePosition.cursorShapeChange = False
def showSmall(self):
self.showMinimized()
def showMaxRestore(self):
if (self.maxNormal):
self.maxNormal = False
if self.curSize:
self.setGeometry(self.curSize)
else:
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
def mouseDoubleClickEvent(self, event):
self.showMaxRestore()
def mouseReleaseEvent(self, event):
self.dragging = False
self.moving = False
if self.rubberBand.isVisible():
self.maxNormal = True
self.curSize = self.geometry()
self.setGeometry(self.rubberBand.geometry())
self.rubberBand.hide()
# self.showMaxRestore()
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
if self.mousePosition.isOnEdge() and self.maxNormal == False:
self.dragging = True
return
else:
self.dragging = False
self.moving = True
self.offset = event.pos()
def mouseMoveEvent(self, event):
if self.dragging and self.draggingHover == False:
self.resizeWidget(event.globalPos())
elif self.moving and self.offset != None:
desktop = QtGui.QDesktopWidget().availableGeometry(self)
if event.globalPos().y() == 0:
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == 0:
desktop.setRight(desktop.right() / 2.0)
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
elif event.globalPos().x() == desktop.right():
desktop.setRight(desktop.right() / 2.0)
desktop.moveLeft(desktop.right())
self.rubberBand.setGeometry(desktop)
self.rubberBand.show()
else:
self.rubberBand.hide()
if self.maxNormal == True:
self.showMaxRestore()
self.move(event.globalPos() - self.offset)
def resizeWidget(self, globalMousePos):
if globalMousePos.y() == 0:
self.rubberBand.setGeometry(QtGui.QDesktopWidget().availableGeometry(self))
self.rubberBand.show()
else:
self.rubberBand.hide()
origRect = self.frameGeometry()
left, top, right, bottom = origRect.getCoords()
minWidth = self.minimumWidth()
minHeight = self.minimumHeight()
if self.mousePosition.onTopLeftEdge:
left = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomLeftEdge:
left = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onTopRightEdge:
right = globalMousePos.x()
top = globalMousePos.y()
elif self.mousePosition.onBottomRightEdge:
right = globalMousePos.x()
bottom = globalMousePos.y()
elif self.mousePosition.onLeftEdge:
left = globalMousePos.x()
elif self.mousePosition.onRightEdge:
right = globalMousePos.x()
elif self.mousePosition.onTopEdge:
top = globalMousePos.y()
elif self.mousePosition.onBottomEdge:
bottom = globalMousePos.y()
newRect = QtCore.QRect(QtCore.QPoint(left, top), QtCore.QPoint(right, bottom))
if newRect.isValid():
if minWidth > newRect.width():
if left != origRect.left():
newRect.setLeft(origRect.left())
else:
newRect.setRight(origRect.right())
if minHeight > newRect.height():
if top != origRect.top():
newRect.setTop(origRect.top())
else:
newRect.setBottom(origRect.bottom())
self.setGeometry(newRect)
def setup(self):
import chat
import tourneys
import stats
import vault
import games
import tutorials
import downloadManager
import modvault
import coop
from chat._avatarWidget import avatarWidget
# download manager
self.downloader = downloadManager.downloadManager(self)
self.loadSettings()
# Initialize chat
self.chat = chat.Lobby(self)
# Color table used by the following method
# CAVEAT: This will break if the theme is loaded after the client package is imported
chat.CHAT_COLORS = json.loads(util.readfile("client/colors.json"))
# build main window with the now active client
self.ladder = stats.Stats(self)
self.games = games.Games(self)
self.tourneys = tourneys.Tourneys(self)
self.vault = vault.MapVault(self)
self.modvault = modvault.ModVault(self)
self.replays = replays.Replays(self)
self.tutorials = tutorials.Tutorials(self)
self.Coop = coop.Coop(self)
self.notificationSystem = ns.Notifications(self)
# set menu states
self.actionNsEnabled.setChecked(self.notificationSystem.settings.enabled)
# Other windows
self.avatarAdmin = self.avatarSelection = avatarWidget(self, None)
# warning setup
self.warning = QtGui.QHBoxLayout()
# live streams
self.LivestreamWebView.setUrl(QtCore.QUrl("http://www.faforever.com/livestream"))
self.warnPlayer = QtGui.QLabel(self)
self.warnPlayer.setText(
"A player of your skill level is currently searching for a 1v1 game. Click a faction to join them! ")
self.warnPlayer.setAlignment(QtCore.Qt.AlignHCenter)
self.warnPlayer.setAlignment(QtCore.Qt.AlignVCenter)
self.warnPlayer.setProperty("warning", True)
self.warning.addStretch()
self.warning.addWidget(self.warnPlayer)
def add_warning_button(faction):
button = QtGui.QToolButton(self)
button.setMaximumSize(25, 25)
button.setIcon(util.icon("games/automatch/%s.png" % faction.to_name()))
button.clicked.connect(partial(self.games.startSearchRanked, faction))
self.warning.addWidget(button)
return button
self.warning_buttons = {faction: add_warning_button(faction) for faction in Factions}
self.warning.addStretch()
self.mainGridLayout.addLayout(self.warning, 2, 0)
self.warningHide()
def warningHide(self):
'''
hide the warning bar for matchmaker
'''
self.warnPlayer.hide()
for i in self.warning_buttons.values():
i.hide()
def warningShow(self):
'''
show the warning bar for matchmaker
'''
self.warnPlayer.show()
for i in self.warning_buttons.values():
i.show()
def disconnect(self):
self.state = ClientState.DISCONNECTED
self.socket.disconnectFromHost()
self.chat.disconnect()
@QtCore.pyqtSlot()
def cleanup(self):
'''
Perform cleanup before the UI closes
'''
self.state = ClientState.SHUTDOWN
self.progress.setWindowTitle("FAF is shutting down")
self.progress.setMinimum(0)
self.progress.setMaximum(0)
self.progress.setValue(0)
self.progress.setCancelButton(None)
self.progress.show()
# Important: If a game is running, offer to terminate it gently
self.progress.setLabelText("Closing ForgedAllianceForever.exe")
if fa.instance.running():
fa.instance.close()
# Terminate Lobby Server connection
if self.socket.state() == QtNetwork.QTcpSocket.ConnectedState:
self.progress.setLabelText("Closing main connection.")
self.socket.disconnectFromHost()
# Clear UPnP Mappings...
if self.useUPnP:
self.progress.setLabelText("Removing UPnP port mappings")
fa.upnp.removePortMappings()
# Terminate local ReplayServer
if self.replayServer:
self.progress.setLabelText("Terminating local replay server")
self.replayServer.close()
self.replayServer = None
# Clean up Chat
if self.chat:
self.progress.setLabelText("Disconnecting from IRC")
self.chat.disconnect()
self.chat = None
# Get rid of the Tray icon
if self.tray:
self.progress.setLabelText("Removing System Tray icon")
self.tray.deleteLater()
self.tray = None
# Terminate UI
if self.isVisible():
self.progress.setLabelText("Closing main window")
self.close()
self.progress.close()
def closeEvent(self, event):
logger.info("Close Event for Application Main Window")
self.saveWindow()
if fa.instance.running():
if QtGui.QMessageBox.question(self, "Are you sure?",
"Seems like you still have Forged Alliance running!<br/><b>Close anyway?</b>",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) == QtGui.QMessageBox.No:
event.ignore()
return
return QtGui.QMainWindow.closeEvent(self, event)
def resizeEvent(self, size):
self.resizeTimer.start(400)
def resized(self):
self.resizeTimer.stop()
self.doneresize.emit()
def initMenus(self):
self.actionLink_account_to_Steam.triggered.connect(partial(self.open_url, Settings.get("STEAMLINK_URL")))
self.actionLinkWebsite.triggered.connect(partial(self.open_url, Settings.get("WEBSITE_URL")))
self.actionLinkWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionLinkForums.triggered.connect(partial(self.open_url, Settings.get("FORUMS_URL")))
self.actionLinkUnitDB.triggered.connect(partial(self.open_url, Settings.get("UNITDB_URL")))
self.actionNsSettings.triggered.connect(lambda: self.notificationSystem.on_showSettings())
self.actionNsEnabled.triggered.connect(lambda enabled: self.notificationSystem.setNotificationEnabled(enabled))
self.actionWiki.triggered.connect(partial(self.open_url, Settings.get("WIKI_URL")))
self.actionReportBug.triggered.connect(partial(self.open_url, Settings.get("TICKET_URL")))
self.actionShowLogs.triggered.connect(self.linkShowLogs)
self.actionTechSupport.triggered.connect(partial(self.open_url, Settings.get("SUPPORT_URL")))
self.actionAbout.triggered.connect(self.linkAbout)
self.actionClearCache.triggered.connect(self.clearCache)
self.actionClearSettings.triggered.connect(self.clearSettings)
self.actionClearGameFiles.triggered.connect(self.clearGameFiles)
self.actionSetGamePath.triggered.connect(self.switchPath)
self.actionSetGamePort.triggered.connect(self.switchPort)
# Toggle-Options
self.actionSetAutoLogin.triggered.connect(self.updateOptions)
self.actionSetAutoLogin.setChecked(self.remember)
self.actionSetSoundEffects.triggered.connect(self.updateOptions)
self.actionSetOpenGames.triggered.connect(self.updateOptions)
self.actionSetJoinsParts.triggered.connect(self.updateOptions)
self.actionSetLiveReplays.triggered.connect(self.updateOptions)
self.actionSaveGamelogs.toggled.connect(self.on_actionSavegamelogs_toggled)
self.actionSaveGamelogs.setChecked(self.gamelogs)
self.actionColoredNicknames.triggered.connect(self.updateOptions)
# Init themes as actions.
themes = util.listThemes()
for theme in themes:
action = self.menuTheme.addAction(str(theme))
action.triggered.connect(self.switchTheme)
action.theme = theme
action.setCheckable(True)
if util.getTheme() == theme:
action.setChecked(True)
# Nice helper for the developers
self.menuTheme.addSeparator()
self.menuTheme.addAction("Reload Stylesheet",
lambda: self.setStyleSheet(util.readstylesheet("client/client.css")))
@QtCore.pyqtSlot()
def updateOptions(self):
self.remember = self.actionSetAutoLogin.isChecked()
self.soundeffects = self.actionSetSoundEffects.isChecked()
self.opengames = self.actionSetOpenGames.isChecked()
self.joinsparts = self.actionSetJoinsParts.isChecked()
self.livereplays = self.actionSetLiveReplays.isChecked()
self.gamelogs = self.actionSaveGamelogs.isChecked()
self.players.coloredNicknames = self.actionColoredNicknames.isChecked()
self.saveChat()
@QtCore.pyqtSlot()
def switchTheme(self):
util.setTheme(self.sender().theme, True)
@QtCore.pyqtSlot()
def switchPath(self):
fa.wizards.Wizard(self).exec_()
@QtCore.pyqtSlot()
def switchPort(self):
import loginwizards
loginwizards.gameSettingsWizard(self).exec_()
@QtCore.pyqtSlot()
def clearSettings(self):
result = QtGui.QMessageBox.question(None, "Clear Settings",
"Are you sure you wish to clear all settings, login info, etc. used by this program?",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if (result == QtGui.QMessageBox.Yes):
util.settings.clear()
util.settings.sync()
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot()
def clearGameFiles(self):
util.clearDirectory(util.BIN_DIR)
util.clearDirectory(util.GAMEDATA_DIR)
@QtCore.pyqtSlot()
def clearCache(self):
changed = util.clearDirectory(util.CACHE_DIR)
if changed:
QtGui.QMessageBox.information(None, "Restart Needed", "FAF will quit now.")
QtGui.QApplication.quit()
@QtCore.pyqtSlot(str)
def open_url(self, url):
QtGui.QDesktopServices.openUrl(QUrl(url))
@QtCore.pyqtSlot()
def linkShowLogs(self):
util.showInExplorer(util.LOG_DIR)
@QtCore.pyqtSlot()
def connectivityDialog(self):
dialog = connectivity.ConnectivityDialog(self.connectivity)
dialog.exec_()
@QtCore.pyqtSlot()
def linkAbout(self):
dialog = util.loadUi("client/about.ui")
dialog.version_label.setText("Version: {}".format(util.VERSION_STRING))
dialog.exec_()
def saveWindow(self):
util.settings.beginGroup("window")
util.settings.setValue("geometry", self.saveGeometry())
util.settings.endGroup()
def saveChat(self):
util.settings.beginGroup("chat")
util.settings.setValue("soundeffects", self.soundeffects)
util.settings.setValue("livereplays", self.livereplays)
util.settings.setValue("opengames", self.opengames)
util.settings.setValue("joinsparts", self.joinsparts)
util.settings.setValue("coloredNicknames", self.players.coloredNicknames)
util.settings.endGroup()
def loadSettings(self):
self.loadChat()
# Load settings
util.settings.beginGroup("window")
geometry = util.settings.value("geometry", None)
if geometry:
self.restoreGeometry(geometry)
util.settings.endGroup()
util.settings.beginGroup("ForgedAlliance")
util.settings.endGroup()
def loadChat(self):
try:
util.settings.beginGroup("chat")
self.soundeffects = (util.settings.value("soundeffects", "true") == "true")
self.opengames = (util.settings.value("opengames", "true") == "true")
self.joinsparts = (util.settings.value("joinsparts", "false") == "true")
self.livereplays = (util.settings.value("livereplays", "true") == "true")
self.players.coloredNicknames = (util.settings.value("coloredNicknames", "false") == "true")
util.settings.endGroup()
self.actionColoredNicknames.setChecked(self.players.coloredNicknames)
self.actionSetSoundEffects.setChecked(self.soundeffects)
self.actionSetLiveReplays.setChecked(self.livereplays)
self.actionSetOpenGames.setChecked(self.opengames)
self.actionSetJoinsParts.setChecked(self.joinsparts)
except:
pass
def doConnect(self):
if not self.replayServer.doListen(LOCAL_REPLAY_PORT):
return False
# Begin connecting.
self.socket.connected.connect(self.on_connected)
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
return True
def reconnect(self):
"""
Reconnect to the server
:return:
"""
self._connection_attempts += 1
self.state = ClientState.RECONNECTING
self.socket.setSocketOption(QtNetwork.QTcpSocket.KeepAliveOption, 1)
self.socket.connectToHost(LOBBY_HOST, LOBBY_PORT)
@QtCore.pyqtSlot()
def on_connected(self):
self.state = ClientState.ACCEPTED
self.localIP = self.socket.localAddress()
self.send(dict(command="ask_session",
version=config.VERSION,
user_agent="faf-client"))
self.connected.emit()
@property
def can_login(self):
return self.remember and self.password and self.login
def show_login_wizard(self):
from loginwizards import LoginWizard
wizard = LoginWizard(self)
wizard.accepted.connect(self.perform_login)
wizard.exec_()
def doLogin(self):
self.state = ClientState.NONE
if not self.can_login:
self.show_login_wizard()
def getColor(self, name):
return chat.get_color(name)
@QtCore.pyqtSlot()
def startedFA(self):
'''
Slot hooked up to fa.instance when the process has launched.
It will notify other modules through the signal gameEnter().
'''
logger.info("FA has launched in an attached process.")
self.gameEnter.emit()
@QtCore.pyqtSlot(int)
def finishedFA(self, exit_code):
'''
Slot hooked up to fa.instance when the process has ended.
It will notify other modules through the signal gameExit().
'''
if not exit_code:
logger.info("FA has finished with exit code: " + str(exit_code))
else:
logger.warn("FA has finished with exit code: " + str(exit_code))
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def errorFA(self, error_code):
'''
Slot hooked up to fa.instance when the process has failed to start.
'''
if error_code == 0:
logger.error("FA has failed to start")
QtGui.QMessageBox.critical(self, "Error from FA", "FA has failed to start.")
elif error_code == 1:
logger.error("FA has crashed or killed after starting")
else:
text = "FA has failed to start with error code: " + str(error_code)
logger.error(text)
QtGui.QMessageBox.critical(self, "Error from FA", text)
self.gameExit.emit()
@QtCore.pyqtSlot(int)
def mainTabChanged(self, index):
'''
The main visible tab (module) of the client's UI has changed.
In this case, other modules may want to load some data or cease
particularly CPU-intensive interactive functionality.
LATER: This can be rewritten as a simple Signal that each module can then individually connect to.
'''
new_tab = self.mainTabs.widget(index)
if new_tab is self.gamesTab:
self.showGames.emit()
if new_tab is self.chatTab:
self.showChat.emit()
if new_tab is self.replaysTab:
self.showReplays.emit()
if new_tab is self.ladderTab:
self.showLadder.emit()
if new_tab is self.tourneyTab:
self.showTourneys.emit()
if new_tab is self.coopTab:
self.showCoop.emit()
@QtCore.pyqtSlot(int)
def vaultTabChanged(self, index):
new_tab = self.topTabs.widget(index)
if new_tab is self.mapsTab:
self.showMaps.emit()
if new_tab is self.modsTab:
self.showMods.emit()
@QtCore.pyqtSlot()
def joinGameFromURL(self, url):
'''
Tries to join the game at the given URL
'''
logger.debug("joinGameFromURL: " + url.toString())
if fa.instance.available():
add_mods = []
try:
modstr = url.queryItemValue("mods")
add_mods = json.loads(modstr) # should be a list
except:
logger.info("Couldn't load urlquery value 'mods'")
if fa.check.game(self):
uid, mod, map = url.queryItemValue('uid'), url.queryItemValue('mod'), url.queryItemValue('map')
if fa.check.check(mod,
map,
sim_mods=add_mods):
self.join_game(uid)
def writeToServer(self, action, *args, **kw):
'''
Writes data to the deprecated stream API. Do not use.
'''
logger.debug("Client: " + action)
block = QtCore.QByteArray()
out = QtCore.QDataStream(block, QtCore.QIODevice.ReadWrite)
out.setVersion(QtCore.QDataStream.Qt_4_2)
out.writeUInt32(2 * len(action) + 4)
out.writeQString(action)
self.socket.write(block)
@QtCore.pyqtSlot()
def readFromServer(self):
ins = QtCore.QDataStream(self.socket)
ins.setVersion(QtCore.QDataStream.Qt_4_2)
while ins.atEnd() == False:
if self.blockSize == 0:
if self.socket.bytesAvailable() < 4:
return
self.blockSize = ins.readUInt32()
if self.socket.bytesAvailable() < self.blockSize:
return
action = ins.readQString()
logger.debug("Server: '%s'" % action)
if action == "PING":
self.writeToServer("PONG")
self.blockSize = 0
return
try:
self.dispatch(json.loads(action))
except:
logger.error("Error dispatching JSON: " + action, exc_info=sys.exc_info())
self.blockSize = 0
@QtCore.pyqtSlot()
def disconnectedFromServer(self):
logger.warn("Disconnected from lobby server.")
if self.state == ClientState.ACCEPTED:
# Clear the online users lists
oldplayers = self.players.keys()
self.players = Players(self.me)
self.urls = {}
self.usersUpdated.emit(oldplayers)
if self.state != ClientState.DISCONNECTED:
self.state = ClientState.DROPPED
if self._connection_attempts < 2:
logger.info("Reconnecting immediately")
self.reconnect()
else:
timer = QtCore.QTimer(self)
timer.setSingleShot(True)
timer.timeout.connect(self.reconnect)
t = self._connection_attempts * 10000
timer.start(t)
logger.info("Scheduling reconnect in {}".format(t / 1000))
self.disconnected.emit()
@QtCore.pyqtSlot(QtNetwork.QAbstractSocket.SocketError)
def socketError(self, error):
if (error == QAbstractSocket.SocketTimeoutError
or error == QAbstractSocket.NetworkError
or error == QAbstractSocket.ConnectionRefusedError
or error == QAbstractSocket.RemoteHostClosedError):
logger.info("Timeout/network error: {}".format(self.socket.errorString()))
self.disconnectedFromServer()
else:
self.state = ClientState.DISCONNECTED
logger.error("Fatal TCP Socket Error: " + self.socket.errorString())
@QtCore.pyqtSlot()
def forwardLocalBroadcast(self, source, message):
self.localBroadcast.emit(source, message)
def manage_power(self):
''' update the interface accordingly to the power of the user'''
if self.power >= 1:
if self.modMenu == None:
self.modMenu = self.menu.addMenu("Administration")
actionAvatar = QtGui.QAction("Avatar manager", self.modMenu)
actionAvatar.triggered.connect(self.avatarManager)
self.modMenu.addAction(actionAvatar)
def requestAvatars(self, personal):
if personal:
self.send(dict(command="avatar", action="list_avatar"))
else:
self.send(dict(command="admin", action="requestavatars"))
def joinChannel(self, username, channel):
'''Join users to a channel'''
self.send(dict(command="admin", action="join_channel", user_ids=[self.players[username].id], channel=channel))
def closeFA(self, username):
'''Close FA remotly'''
self.send(dict(command="admin", action="closeFA", user_id=self.players[username].id))
def closeLobby(self, username):
'''Close lobby remotly'''
self.send(dict(command="admin", action="closelobby", user_id=self.players[username].id))
def addFriend(self, friend_id):
if friend_id in self.players:
self.players.friends.add(friend_id)
self.send(dict(command="social_add", friend=friend_id))
self.usersUpdated.emit([friend_id])
def addFoe(self, foe_id):
if foe_id in self.players:
self.players.foes.add(foe_id)
self.send(dict(command="social_add", foe=foe_id))
self.usersUpdated.emit([foe_id])
def remFriend(self, friend_id):
if friend_id in self.players:
self.players.friends.remove(friend_id)
self.send(dict(command="social_remove", friend=friend_id))
self.usersUpdated.emit([friend_id])
def remFoe(self, foe_id):
if foe_id in self.players:
self.players.foes.remove(foe_id)
self.send(dict(command="social_remove", foe=foe_id))
self.usersUpdated.emit([foe_id])
def send(self, message):
data = json.dumps(message)
if message.get('command') == 'hello':
logger.info('Logging in with {}'.format({
k: v for k, v in message.items() if k != 'password'
}))
else:
logger.info("Outgoing JSON Message: " + data)
self.writeToServer(data)
def subscribe_to(self, target, receiver):
self._receivers[target] = receiver
def unsubscribe(self, target, receiver):
del self._receivers[target]
def dispatch(self, message):
if "command" in message:
cmd = "handle_" + message['command']
if "target" in message:
receiver = self._receivers.get(message['target'])
if hasattr(receiver, cmd):
getattr(receiver, cmd)(message)
elif hasattr(receiver, 'handle_message'):
receiver.handle_message(message)
else:
logger.warn("No receiver for message {}".format(message))
else:
if hasattr(self, cmd):
getattr(self, cmd)(message)
else:
logger.error("Unknown JSON command: %s" % message['command'])
raise ValueError
else:
logger.debug("No command in message.")
def handle_session(self, message):
self.session = str(message['session'])
if self.remember and self.login and self.password:
self.perform_login()
@QtCore.pyqtSlot()
def perform_login(self):
self.uniqueId = util.uniqueID(self.login, self.session)
self.send(dict(command="hello",
login=self.login,
password=self.password,
unique_id=self.uniqueId,
session=self.session))
return True
def handle_invalid(self, message):
self.state = ClientState.DISCONNECTED
raise Exception(message)
def handle_stats(self, message):
self.statsInfo.emit(message)
def handle_update(self, message):
# Remove geometry settings prior to updating
# could be incompatible with an updated client.
Settings.remove('window/geometry')
logger.warn("Server says we need an update")
self.progress.close()
self.state = ClientState.DISCONNECTED
self._client_updater = ClientUpdater(message['update'])
self._client_updater.exec_()
def handle_welcome(self, message):
self._connection_attempts = 0
self.id = message["id"]
self.login = message["login"]
self.me = Player(id=self.id, login=self.login)
self.players[self.me.id] = self.me # FIXME
self.players.me = self.me # FIXME
self.players.login = self.login
logger.debug("Login success")
self.state = ClientState.ACCEPTED
util.crash.CRASH_REPORT_USER = self.login
if self.useUPnP:
fa.upnp.createPortMapping(self.socket.localAddress().toString(), self.gamePort, "UDP")
# update what's new page
self.whatNewsView.setUrl(QtCore.QUrl(
"http://www.faforever.com/?page_id=114&username={user}&pwdhash={pwdhash}".format(user=self.login,
pwdhash=self.password)))
self.updateOptions()
self.state = ClientState.ONLINE
self.authorized.emit(self.me)
# Run an initial connectivity test and initialize a gamesession object
# when done
self.connectivity = ConnectivityHelper(self, self.gamePort)
self.connectivity.connectivity_status_established.connect(self.initialize_game_session)
self.connectivity.start_test()
def initialize_game_session(self):
self.game_session = GameSession(self, self.connectivity)
def handle_registration_response(self, message):
if message["result"] == "SUCCESS":
self.auth_state = ClientState.CREATED
return
self.auth_state = ClientState.REJECTED
self.handle_notice({"style": "notice", "text": message["error"]})
def search_ranked(self, faction):
def request_launch():
msg = {
'command': 'game_matchmaking',
'mod': 'ladder1v1',
'state': 'start',
'gameport': self.gamePort,
'faction': faction
}
if self.connectivity.state == 'STUN':
msg['relay_address'] = self.connectivity.relay_address
self.send(msg)
self.game_session.ready.disconnect(request_launch)
if self.game_session:
self.game_session.ready.connect(request_launch)
self.game_session.listen()
def host_game(self, title, mod, visibility, mapname, password):
def request_launch():
msg = {
'command': 'game_host',
'title': title,
'mod': mod,
'visibility': visibility,
'mapname': mapname,
'password': password,
}
if self.connectivity.state == 'STUN':
msg['relay_address'] = self.connectivity.relay_address
self.send(msg)
self.game_session.ready.disconnect(request_launch)
if self.game_session:
self.game_session.ready.connect(request_launch)
self.game_session.listen()
def join_game(self, uid, password=None):
def request_launch():
msg = {
'command': 'game_join',
'uid': uid,
'gameport': self.gamePort
}
if password:
msg['password'] = password
if self.connectivity.state == "STUN":
msg['relay_address'] = self.connectivity.relay_address
self.send(msg)
self.game_session.ready.disconnect(request_launch)
if self.game_session:
self.game_session.ready.connect(request_launch)
self.game_session.listen()
def handle_game_launch(self, message):
if not self.game_session or not self.connectivity.is_ready:
logger.error("Not ready for game launch")
logger.info("Handling game_launch via JSON " + str(message))
silent = False
# Do some special things depending of the reason of the game launch.
rank = False
# HACK: Ideally, this comes from the server, too. LATER: search_ranked message
arguments = []
if message["mod"] == "ladder1v1":
arguments.append('/' + Factions.to_name(self.games.race))
# Player 1v1 rating
arguments.append('/mean')
arguments.append(str(self.me.ladder_rating_mean))
arguments.append('/deviation')
arguments.append(str(self.me.ladder_rating_deviation))
arguments.append('/players 2') # Always 2 players in 1v1 ladder
arguments.append('/team 1') # Always FFA team
# Launch the auto lobby
self.game_session.init_mode = 1
else:
# Player global rating
arguments.append('/mean')
arguments.append(str(self.me.rating_mean))
arguments.append('/deviation')
arguments.append(str(self.me.rating_deviation))
if self.me.country is not None:
arguments.append('/country ')
arguments.append(self.me.country)
# Launch the normal lobby
self.game_session.init_mode = 0
if self.me.clan is not None:
arguments.append('/clan')
arguments.append(self.me.clan)
# Ensure we have the map
if "mapname" in message:
fa.check.map(message['mapname'], force=True, silent=silent)
if "sim_mods" in message:
fa.mods.checkMods(message['sim_mods'])
# UPnP Mapper - mappings are removed on app exit
if self.useUPnP:
fa.upnp.createPortMapping(self.socket.localAddress().toString(), self.gamePort, "UDP")
info = dict(uid=message['uid'], recorder=self.login, featured_mod=message['mod'], launched_at=time.time())
fa.run(info, self.game_session.relay_port, arguments)
def handle_coop_info(self, message):
self.coopInfo.emit(message)
def handle_tournament_types_info(self, message):
self.tourneyTypesInfo.emit(message)
def handle_tournament_info(self, message):
self.tourneyInfo.emit(message)
def handle_tutorials_info(self, message):
self.tutorialsInfo.emit(message)
def handle_mod_info(self, message):
self.modInfo.emit(message)
def handle_game_info(self, message):
if 'games' in message:
for game in message['games']:
self.gameInfo.emit(game)
else:
self.gameInfo.emit(message)
def handle_modvault_list_info(self, message):
modList = message["modList"]
for mod in modList:
self.handle_modvault_info(mod)
def handle_modvault_info(self, message):
self.modVaultInfo.emit(message)
def handle_replay_vault(self, message):
self.replayVault.emit(message)
def handle_coop_leaderboard(self, message):
self.coopLeaderBoard.emit(message)
def handle_matchmaker_info(self, message):
if "action" in message:
self.matchmakerInfo.emit(message)
elif "potential" in message:
if message["potential"]:
self.warningShow()
else:
self.warningHide()
def handle_avatar(self, message):
if "avatarlist" in message:
self.avatarList.emit(message["avatarlist"])
def handle_admin(self, message):
if "avatarlist" in message:
self.avatarList.emit(message["avatarlist"])
elif "player_avatar_list" in message:
self.playerAvatarList.emit(message)
def handle_social(self, message):
if "friends" in message:
self.players.friends = set(message["friends"])
self.usersUpdated.emit(self.players.keys())
if "foes" in message:
self.players.foes = set(message["foes"])
self.usersUpdated.emit(self.players.keys())
if "channels" in message:
# Add a delay to the notification system (insane cargo cult)
self.notificationSystem.disabledStartup = False
self.channelsUpdated.emit(message["channels"])
if "autojoin" in message:
self.autoJoin.emit(message["autojoin"])
if "power" in message:
self.power = message["power"]
self.manage_power()
def handle_player_info(self, message):
players = message["players"]
# Firstly, find yourself. Things get easier once "me" is assigned.
for player in players:
if player["id"] == self.id:
self.me = Player(**player)
for player in players:
id = player["id"]
new_player = Player(**player)
self.players[id] = new_player
self.usersUpdated.emit([player['login']])
if self.me.clan is not None and new_player.clan == self.me.clan:
self.players.clanlist.add(player['login'])
def avatarManager(self):
self.requestAvatars(0)
self.avatarSelection.show()
def handle_authentication_failed(self, message):
QtGui.QMessageBox.warning(self, "Authentication failed", message["text"])
self.state = ClientState.DISCONNECTED
self.show_login_wizard()
def handle_notice(self, message):
if "text" in message:
style = message.get('style', None)
if style == "error":
QtGui.QMessageBox.critical(self, "Error from Server", message["text"])
elif style == "warning":
QtGui.QMessageBox.warning(self, "Warning from Server", message["text"])
elif style == "scores":
self.tray.showMessage("Scores", message["text"], QtGui.QSystemTrayIcon.Information, 3500)
self.localBroadcast.emit("Scores", message["text"])
else:
QtGui.QMessageBox.information(self, "Notice from Server", message["text"])
if message["style"] == "kill":
logger.info("Server has killed your Forged Alliance Process.")
fa.instance.kill()
if message["style"] == "kick":
logger.info("Server has kicked you from the Lobby.")
Client.register(ClientWindow)
| gpl-3.0 | 6,156,876,616,512,699,000 | 36.02807 | 130 | 0.622458 | false |
DigitalSlideArchive/cdsa_ipython_helpers | dsa_mongo_common_functions.py | 1 | 38290 | """ This contains various helper functions for the Digital Slide Archive"""
import re, csv, os, sys, optparse
import collections
from PIL import Image
import openslide
from openslide.lowlevel import OpenSlideError
import hashlib
import subprocess
import shutil,glob
import random
from functools import partial
def md5sum(filename):
with open(filename, mode='rb') as f:
d = hashlib.md5()
for buf in iter(partial(f.read, 128), b''):
d.update(buf)
return d.hexdigest()
"""Default Directories """
DEFAULT_WSI_DIR = '/NDPI_VAULT/ADRC/'
DEFAULT_PYRAMID_DIR = '/bigdata3/PYRAMIDS/ADRC/'
DEFAULT_DATABASE = 'adrc_slide_database'
DEFAULT_IIP_SERVER_ADDRESS = "http://node15.cci.emory.edu/cgi-bin/iipsrv.fcgi?Zoomify=";
"""CDSA SPECIFIC VARIABLES AND PATHS """
tcga_tumor_types = [ 'acc','blca','blnp','blp','brca','cesc','cntl','coad','dlbc','esca','gbm','hnsc','kich','kirc','kirp','laml','lcll','lcml','lgg','lihc','luad',\
'lusc','meso','ov','paad','pcpg','prad','read','sarc','skcm','stad','tgct','thca','ucec','ucs','uvm']
PATH_REPORT_ROOT_DIRS = ['/bcr/intgen.org/pathology_reports/reports/','/bcr/nationwidechildrens.org/pathology_reports/reports/']
CLIN_REPORT_ROOT = '/bcr/biotab/clin/'
CLIN_REPORT_ROOT_DIRS = ['/bcr/biotab/clin/']
dl_dir = "/SYNOLOGY_TCGA_MIRROR/TCGA_LOCAL_MIRROR/"
TCGA_LOCAL_ROOT_DIR = dl_dir + 'tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/'
TCGA_HTTP_ROOT_URL = 'https://tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous/tumor/'
"""PARAMETERS AND VARIABLE INITIALIZATION """
verbose = 0
default_level = ',0 ' ### default layer to use for ndpi2tiff
ndpi_count = 0
_verbose = 0
_verbose = 1
script_id_num = 3800 ### going to increment from some number...maybe ill make this random later
class LinePrinter():
"""
Print things to stdout on one line dynamically
"""
def __init__(self,data):
sys.stdout.write("\r\x1b[K"+data.__str__())
sys.stdout.flush()
"""
REGULAR EXPRESSION
"""
parse_tcga_tissue_and_stain_type = re.compile(r'org_(..*)\.(diagnostic|tissue)_images',re.IGNORECASE)
parse_TCGA_SUBJECT_ID = re.compile(r'(TCGA-..-....)')
parse_full_TCGA_ID = re.compile(r'(TCGA-..-....)-(\d\d)(.)-([^-]*)',re.IGNORECASE)
adrc_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.*)\.ndpi$', re.IGNORECASE)
adrc_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi$',re.IGNORECASE)
adrc_pat_three = re.compile(r'(E\d\d-\d+)_(\d+)_([^_]+)_(.*)\.ndpi$',re.IGNORECASE)
adrc_dzi_pat_one = re.compile(r'(ADRC\d\d-\d+)_(...?)_(.+)\.ndpi\.dzi\.tif$', re.IGNORECASE)
adrc_dzi_pat_two = re.compile(r'(OS\d\d-\d+)_(\d+)_(.+)_(.*)\.ndpi\.dzi\.tif$|(OS\d\d-\d+)_([^_]*)_(.*)\.ndpi\.dzi\.tif',re.IGNORECASE)
adrc_dzi_pat_three = re.compile(r'(E\d\d-\d+)_(\d?)_(.+)_(.*)\.ndpi\.dzi\.tif$',re.IGNORECASE)
"""
Output files and other logs
"""
f_out = open('corrupt_svs_files.txt','a+')
def connect_to_db( host, user, passwd, db):
"""I will return two cursors to make my life easier """
try:
db_dict = MySQLdb.connect(host, user, passwd, db, cursorclass=MySQLdb.cursors.DictCursor )
db_dict_cursor = db_dict.cursor()
update_cursor = db_dict.cursor()
return( db_dict_cursor, update_cursor)
except:
print "Could not connect to the database!!!",host,user,passwd,db
sys.exit()
return (None,None)
def openslide_test_file(full_file_path,file_type,db_cursor):
"""This will use the openslide bindings to get the width, height and filesize for an image or return an Error otherwise"""
width=height=filesize=orig_resolution=slide_title=md5 = None
try:
im = openslide.open_slide(full_file_path)
(width, height) = im.dimensions
base_file_name = os.path.basename(full_file_path)
filesize = os.path.getsize(full_file_path)
if(file_type== 'svs'):
orig_resolution = im.properties['aperio.AppMag']
#md5 = md5Checksum(full_file_path)
slide_name = os.path.basename(full_file_path)
return(True,width,height,filesize,orig_resolution,slide_name,md5)
except OpenSlideError, e:
print "Openslide returned an error",full_file_path
print >>sys.stderr, "Verify failed with:", repr(e.args)
print "Openslide returned an error",full_file_path
f_out.write(full_file_path+';\n')
insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) "
print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) )
#update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) ))
return(False,None,None,None,None,None,None)
except StandardError, e:
#file name likely not valid
print >>sys.stderr, "Verify failed with:", repr(e.args)
print "Openslide returned an error",full_file_path
f_out.write(full_file_path+';\n')
insert_corrupt_batch_stmt = "insert into `corrupt_or_unreadable_%s_files` (full_file_name,filesize) Values ('%s',%d) "
print insert_corrupt_batch_stmt % (file_type,full_file_path,os.path.getsize(full_file_path) )
#update_cursor.execute( insert_corrupt_batch_stmt % (full_file_path,os.path.getsize(full_file_path) ))
return(False,None,None,None,None,None,None)
except:
print "failed even earlier on",full_file_path
"""will log this to a file"""
return(False,width,height,filesize,orig_resolution,slide_title,md5)
return(False,width,height,filesize,orig_resolution,slide_title,md5)
def check_image_status_in_db(full_file_path,filetype,db_cursor):
""" this will do a lookup in the thumb database and see if the image is already there...
if it is... I don't bother do any additional file lookups
some of the metadata extraction can take a bit of time as I need to parse the PNG headers
filetype can be svs, bigtiff image, ndpi, pyramid image
"""
v = _verbose >= 1; vv = _verbose >= 2
if filetype == 'svs':
sql_lookup = "select count(*) as count from `svs_slide_info` where full_file_path='%s'" % (full_file_path)
db_cursor.execute(sql_lookup)
data = db_cursor.fetchone()
if data['count'] == 0:
if vv: print "Need to update entry"
(valid_image,width,height,filesize,orig_resolution,base_file_name,md5) = openslide_test_file(full_file_path,'svs',db_cursor)
if valid_image:
slide_folder = str(full_file_path.split('/')[-2])
sql = "insert into `svs_slide_info` ( slide_filename, image_width,image_height, resolution, full_file_path, slide_folder, filesize ,md5sum ) "
sql += " Values ('%s',%s,%s,%s,'%s', '%s',%d,'%s' ) " % ( base_file_name, width, height, orig_resolution, full_file_path, slide_folder, filesize ,md5)
db_cursor.execute(sql)
elif filetype == 'pyramid':
sql_lookup = "select count(*) as count from `dzi_pyramid_info` where full_file_path like ('"+full_file_path+"')"
db_cursor.execute(sql_lookup)
data = db_cursor.fetchone()
if data['count'] == 0:
if vv: print "Need to update entry"
(valid_image,width,height,filesize,orig_resolution,pyramid_file_name,md5) = openslide_test_file(full_file_path,'pyramid',db_cursor)
if valid_image:
slide_folder = str(full_file_path.split('/')[-2])
insert_sql = "insert into `dzi_pyramid_info` ( pyramid_filename, image_width, image_height, full_file_path, file_basename, filesize ,pyramid_folder) "\
+ " Values ('%s',%d,%d,'%s','%s', %d, '%s' ) " % ( pyramid_file_name, width, height, full_file_path , slide_folder, filesize , slide_folder)
print insert_sql
db_cursor.execute(insert_sql)
def set_active_archive_status(metadata_dict_cursor):
"""This will update and/or set the flag for a slide being an active archive from the TCGA data set"""
select_stmt = " select * from `latest_archive_info`"
print select_stmt
metadata_dict_cursor.execute(select_stmt)
result = metadata_dict_cursor.fetchall()
active_slide_archive = []
for row in result:
archive_name = row['ARCHIVE_NAME']
if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name:
# print archive_name
active_slide_archive.append(archive_name)
print "I have found",len(active_slide_archive),"active slid archives"
## i should probably set all rchives to null first..
####first set the entire thing to not have
update_stmt = "update svs_slide_info set active_tcga_slide='0'"
print update_stmt
metadata_dict_cursor.execute(update_stmt)
for cur_archive in active_slide_archive:
update_stmt = "update svs_slide_info set active_tcga_slide='1' where slide_folder='%s'" % cur_archive
print update_stmt
metadata_dict_cursor.execute(update_stmt)
"""Now need to check if file is on the filesystem
result = metadata_dict_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
"""
def validate_slide_pyramid_linkage(db_cursor,db_cursor_two):
select_stmt = " select * from `svs_slide_info`"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
invalid_pyramid_link = 0
print len(result),"rows to process"
for row in result:
#print row
invalid_row = False
pyramid = (row['pyramid_filename'])
if not os.path.isfile(pyramid):
print "Pyramid is missing...",pyramid
invalid_row = True
svs = (row['full_file_path'])
if not os.path.isfile(svs):
print "SVS is missing",svs
invalid_row = True
if os.path.basename(pyramid).split('.')[0] != os.path.basename(svs).split('.')[0]:
print svs,pyramid,"DONT SEEM TO MATCH"
print os.path.basename(pyramid),os.path.basename(svs)
invalid_row = True
if invalid_row:
del_sql = "delete from svs_slide_info where slide_id='%d'" % row['slide_id']
db_cursor_two.execute(del_sql)
##pyramid_file_name and full_file_path
def generate_slide_pyramid_linkage(db_cursor,db_cursor_two):
""" This will update the slide database and link the pyramids associated with the image.... will scan multiple
tables """
v = _verbose >= 1; vv = _verbose >= 2
v= True
vv = True
"""pyramid filenames match on slide_filename in the svs_slide_info table and slide_folder... there are the two
main keys"""
""" other fields of import include stain_type and main_project_name... this needs to be duplictable at some point
since a slide can be in more than one project.... other key field is tissue_type and patient_id
I may want to have this field iterate multiple fields one by one....
"""
## in the dzi_pyramid_info I have two fields that need to be dupdated...parent_slide_title and parent_slide_id
## probably only need one of these... other field thats relevant is pyramid_folder
select_stmt = " select * from `svs_slide_info` where pyramid_generated is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
matched_pyramids_found = 0
for row in result:
null_rows += 1
matched_pyramid_file = row['full_file_path'].replace('/bigdata/RAW_SLIDE_LINKS/CDSA/','/bigdata2/PYRAMIDS/CDSA/')+'.dzi.tif'
# print matched_pyramid_file
if(os.path.isfile(matched_pyramid_file)):
update_sql = "update svs_slide_info set pyramid_filename='%s',pyramid_generated='%d' where slide_id='%d'" % (matched_pyramid_file,True,row['slide_id'])
db_cursor.execute(update_sql)
matched_pyramids_found += 1
else:
pass
#//there should be a matching pyramid
#patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
# print patient_id
# if not patient_id[0] == None:
# else:
# print "Found no patient id...",full_file_path
print "there were",null_rows,"empty rows and",matched_pyramids_found,"matched pyramids"
select_stmt = " select * from `svs_slide_info` where patient_id is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
# print patient_id
null_rows += 1
if not patient_id[0] == None:
update_sql = "update svs_slide_info set patient_id='%s' where slide_id='%d'" % (patient_id[0],row['slide_id'])
db_cursor.execute(update_sql)
else:
print "Found no patient id...",full_file_path
print "there were",null_rows,"empty rows"
select_stmt = " select * from `svs_slide_info` where stain_type is NULL and tissue_type is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
(stain_type,tissue_type) = get_tcga_stain_type(full_file_path )
"""I originally AND 'ed the sql statement and it caused it to crash.... i guess that's the logical operator"""
null_rows += 1
if not stain_type == None and not tissue_type == None:
update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\
(stain_type,tissue_type,row['slide_id'])
db_cursor.execute(update_sql)
else:
print "Found no matching group type ...",full_file_path
print "there were",null_rows,"empty rows"
select_stmt = " select * from `dzi_pyramid_info` where parent_slide_id is NULL"
db_cursor.execute(select_stmt)
"""Now need to check if file is on the filesystem"""
result = db_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
pyramid_folder = row['pyramid_folder']
pyramid_filename = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the
## origin slide filename but has extra crap at the end...
## and also this can be a one to many relationship.. i.e. i may have pyramidized a file
## multiple times
pyramid_id = row['pyramid_id']
slide_filename = pyramid_filename.replace('.dzi.tif','')
### = row['pyramid_filename'] ### of note it is quite likely the pyramid filename does NOT match the the dzi.tif is the issue
pyramid_to_orig_slide_match = "select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename)
db_cursor_two.execute(pyramid_to_orig_slide_match)
slide_match_result = db_cursor_two.fetchall()
if slide_match_result:
for slide_row in slide_match_result:
print slide_row
slide_id = slide_row['slide_id']
"""so now that I found a match I need to reverse the lookup and get the pyramid id.."""
# set_slide_match_sql = "update svs_slide_info select * from svs_slide_info where slide_folder='%s' and slide_filename like '%s'" %(pyramid_folder,slide_filename)
set_pyramid_match_sql = "update dzi_pyramid_info set parent_slide_id='%d' where pyramid_id='%d'" %(slide_id,pyramid_id)
db_cursor_two.execute( set_pyramid_match_sql)
else:
# print "No match for",slide_filename,"so found a null file set",pyramid_folder
pass
""" null_rows += 1
if not stain_type == None and not tissue_type == None:
update_sql = "update svs_slide_info set stain_type='%s', tissue_type='%s' where slide_id=%d" %\
(stain_type,tissue_type,row['slide_id'])
metadata_cursor.execute(update_sql)
else:
print "Found no matching group type ...",full_file_path
print "there were",null_rows,"empty rows"
"""
def get_file_metadata ( input_file, file_type):
"""this function wil scan a system file and try axtract certain metadata about the file..
this will vary based on the root file type i.e. ndpi, svs, big tff, etc"""
print input_file, file_type
def find_clin_reports ( tumor_type ):
"""also grab all the clinical data....."""
clin_data = []
clin_data_struct = {}
""" it seems like the clinical data reports are the cleanest with nationwidechildrens """
for clin_rpt_dir in CLIN_REPORT_ROOT_DIRS:
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+clin_rpt_dir
#print path_base_dir
for dpath, dnames, fnames in os.walk( path_base_dir, followlinks=True):
for file in fnames:
if '.txt' in file:
filebase = file.rstrip('.txt')
full_file_path = dpath+'/'+filebase
#full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
clin_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return clin_data_struct
def find_path_reports ( tumor_type ):
"""this will walk the directories and find pdf files that are path reports """
pdf_path_reports = []
path_data_struct = {}
"""Path reports seem to be in more than one base directory depending on if intgen or nationwides curated them"""
for PATH_REPORT_ROOT in PATH_REPORT_ROOT_DIRS:
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT
#print path_base_dir
for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True):
for file in fnames:
if '.pdf' in file:
filebase = file.rstrip('.pdf')
full_file_path = dpath+'/'+filebase
#full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return path_data_struct
def find_tcga_clinical_files ( tumor_type ):
"""this will walk the directories and find pdf files that are path reports """
pdf_path_reports = []
path_data_struct = {}
path_base_dir = TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT
#print path_base_dir
for dpath, dnames, fnames in os.walk( TCGA_LOCAL_ROOT_DIR+tumor_type+PATH_REPORT_ROOT, followlinks=True):
for file in fnames:
if '.pdf' in file:
filebase = file.rstrip('.pdf')
#full_file_path = dpath+'/'+filebase
full_file_path = 'temp'
web_path = full_file_path.replace(TCGA_LOCAL_ROOT_DIR,'')
path_data_struct[filebase] = { 'web_path':web_path, 'full_file_path':full_file_path }
#Making the full file path a relative web path
#pdf_path_reports.append(path_data_struct)
return path_data_struct
def find_ndpi_image_list( ndpi_root_path ):
"""project_name is passed along with the potentially more than one root image path for ndpi files"""
found_ndpi_files = []
ndpi_root_path = ndpi_root_path.rstrip('/')
for dpath, dnames, fnames in os.walk( ndpi_root_path, followlinks=True):
for file in fnames:
if '.ndpi' in file:
#filebase = file.rstrip('.ndpi')
#print dpath
found_ndpi_files.append(dpath +'/'+file)
print len(found_ndpi_files),"NDPI files were located"
return found_ndpi_files
def find_svs_image_list( project_name, svs_root_path_list ):
"""project_name is passed along with the potentially more than one root image path for ndpi files"""
found_svs_files = []
svs_files_found = 0
for svs_root_path in svs_root_path_list:
print svs_root_path
for dpath, dnames, fnames in os.walk( svs_root_path+project_name, followlinks=True):
for file in fnames:
if '.svs' in file:
filebase = file.rstrip('.svs')
full_filename = dpath+'/'+file
#check_image_status_in_db(full_filename,'svs') # change this to add corrupt files and bytes file found
# found_svs_files.append(filebase)
found_svs_files.append(full_filename)
svs_files_found += 1
output = "Processed: %d svsfiles " % \
(svs_files_found )
#corrupt_svs_count, total_gigapixels, total_bytes, old_batch_svs)
LinePrinter(output)
return(found_svs_files)
def find_pyramid_images( project_name, pyramid_root_dirs):
## first find the available resolutions...
pyramid_images = []
pyramids_found = 0
### I am going to add or scan for a 20X, 5X or 40X instead... and use that
for pyramid_root in pyramid_root_dirs:
if os.path.isdir(pyramid_root+project_name):
for dpath, dnames, fnames in os.walk( pyramid_root+project_name, followlinks=True):
for file in fnames:
if '.dzi.tif' in file.lower():
full_filename = dpath+'/'+file
pyramids_found += 1
if verbose: print file,dpath
#check_image_status_in_db(full_filename,'pyramid') # change this to add corrupt files and bytes file found
output = "Processed: %d pyramids" % pyramids_found
LinePrinter(output)
pyramid_images.append(full_filename)
return(pyramid_images)
def get_tcga_stain_type( string_to_check):
""" this function pulls out the stain and tissue type from the TCGA path file names """
m = parse_tcga_tissue_and_stain_type.search(string_to_check)
if m:
return (m.group(1),m.group(2) )
else:
return (None,None)
class Table:
def __init__(self, db, name):
self.db = db
self.name = name
self.dbc = self.db.cursor()
def __getitem__(self, item):
self.dbc.execute("select * from %s limit %s, 1" %(self.name, item))
return self.dbc.fetchone()
def __len__(self):
self.dbc.execute("select count(*) as count from %s" % (self.name))
count_info = self.dbc.fetchone()
l = int( count_info['count'] )
return l
"""
Acronyyms and abbreivations used as well as syntax info
wsi = whole slide image
-8 specifies bigtiff output and the -c sets the compression
pick the level to get which should be 0-- i.e. what layer am i trying to convert
"""
def check_for_valid_ADRC_ID( string_to_check):
"""a file should start with ideally ADRC##-#### or OS or osmething similar
Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """
m = adrc_pat_one.match(string_to_check)
m_second_pat = adrc_pat_two.match(string_to_check)
m_third_pat = adrc_pat_three.match(string_to_check)
if m:
patient_id = m.group(1)
section_id = m.group(2)
stain = m.group(3)
# print patient_id,section_id,stain
return(True)
elif m_second_pat:
patient_id = m_second_pat.group(1)
section_id = m_second_pat.group(2)
stain = m_second_pat.group(3)
# print patient_id,section_id,stain
return(True)
elif m_third_pat:
patient_id = m_third_pat.group(1)
section_id = m_third_pat.group(2)
stain = m_third_pat.group(3)
else:
print "no match",string_to_check
return(False)
def parse_slide_info_for_ADRC_ID( string_to_check):
"""a file should start with ideally ADRC##-#### or OS or osmething similar
Valid filename should be ADRCXX-XXXX_<Section>_<STAIN>_<NOtes> """
stain_tag_normalization_dict = { "AB" : "Abeta", "ABETA" : "ABeta", "US_tau": "Tau", "US_pTDP" : "pTDP",
"TAU" : "Tau" , "TAU" : "tau", "US_AB" : "ABeta", "US_aSYN-4B12" : "aSyn-4B12",
"BIEL" : "Biel"}
m = adrc_dzi_pat_one.match(string_to_check)
m_second_pat = adrc_dzi_pat_two.match(string_to_check)
m_third_pat = adrc_dzi_pat_three.match(string_to_check)
if m:
patient_id = m.group(1)
section_id = m.group(2)
stain = m.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
elif m_second_pat:
patient_id = m_second_pat.group(1)
section_id = m_second_pat.group(2)
stain = m_second_pat.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
elif m_third_pat:
patient_id = m_third_pat.group(1)
section_id = m_third_pat.group(2)
stain = m_third_pat.group(3)
if stain in stain_tag_normalization_dict.keys(): stain = stain_tag_normalization_dict[stain]
print patient_id,section_id,stain
return(True,patient_id,section_id,stain)
else:
print "no match",string_to_check
return(False,None,None,None)
def get_tcga_id( string_to_check , get_full_tcga_id):
""" will either return the TCGA-12-3456 or the entire TCGA sample ID which is much much longer... TCGA-12-3456-12-23-232-32"""
if(get_full_tcga_id):
m = parse_full_TCGA_ID.match(string_to_check)
if m:
TCGA_FULL_ID = m.group(1)+'-'+m.group(2)+m.group(3)+'-'+m.group(4)
return (m.group(1),TCGA_FULL_ID)
else:
return None,None
m = parse_TCGA_SUBJECT_ID.match(string_to_check)
if m:
return (m.group(0),'None')
else:
return (None,None)
def set_database_slide_metadata(database,table):
"""this will iterate and update various project related attributes that may not be set on initial parse
such as stain type, tissue_type , etc... """
## update stain_Type first
sql_lookup = "select * from `"+ database + "`.`dzi_pyramid_info` where stain_type is NULL "
metadata_dict_cursor.execute(sql_lookup)
data = metadata_dict_cursor.fetchall()
for row in data:
# print row
(found_tags, patient_id, section_id, stain) = parse_slide_info_for_ADRC_ID( row['pyramid_filename'])
if found_tags:
update_sql = "update `" + database + "`.`"+"dzi_pyramid_info` set stain_type='%s' where pyramid_id='%d'" % ( stain, row['pyramid_id'])
print update_sql
update_cursor.execute(update_sql)
update_annotation_sql = "select * from `" + database + "`.`dzi_pyramid_info` where has_annotation is Null"
metadata_dict_cursor.execute(update_annotation_sql)
data = metadata_dict_cursor.fetchall()
for row in data:
print row
def update_annotations(database):
"""will find xml annotation files and update the database """
base_path = '/var/www/adrc_js/xml_annotation_files/'
# crawl looking for svs files
for dirpath, dirnames, filenames in os.walk(base_path, followlinks=True, onerror=_listdir_error):
for fname in filenames:
# NDPI (slide) file?
if 'xml' in fname:
file_with_path = os.path.join(dirpath, fname)
print file_with_path,dirpath,dirnames,filenames
base_filename = os.path.basename(fname)
base_filename = base_filename.replace('.xml','')
print base_filename
find_slide_sql = "select * from dzi_pyramid_info where pyramid_filename like '%s%%'" % (base_filename)
print find_slide_sql
metadata_dict_cursor.execute( find_slide_sql)
data = metadata_dict_cursor.fetchall()
for row in data:
print data
update_sql = "update dzi_pyramid_info set has_annotation='1' where pyramid_id='%d'" % (row['pyramid_id'])
print update_sql
update_cursor.execute(update_sql)
def gen_ndpi_pyramid(input_file,pyramid_file):
""" this is a new method that will convert an NDPI to a tiff without necessitating tiling"""
v = _verbose >= 1; vv = _verbose >= 2
ndpi2tiff_command = "/bigdata3/BIG_TIFF_IMAGES/ndpi2tiff -8 -t -c lzw:2 "
script_file_base_path = '/fastdata/tmp/SGE_SCRIPTS/'
SSD_TEMP_SPACE = '/fastdata/tmp/'
global script_id_num ### going to increment from some number...maybe ill make this random later
current_command_list = '#/bin/bash \n' ### set this to null... ill only open a script file if i actually run a command
delete_bigtiff_image = True ## determines if I should cleanup/delete the bigtiff i generate
## this is an intermediate file before pyramid generation
print input_file,pyramid_file
if not os.path.isfile(pyramid_file):
### for speed I am going to copy the input file to /fastdata/tmp..
### I am copying the input_file from its home to a cache dir of SSD goodness
ssd_cached_file = SSD_TEMP_SPACE + os.path.basename(input_file)
if v: print ssd_cached_file,"cached file name"
if not os.path.isfile(ssd_cached_file):
current_command_list += "sleep "+str(random.randint(1,180) ) + ' \n'
current_command_list += "cp "+input_file+' '+SSD_TEMP_SPACE+'\n'
## after deliberation copying from the script versus via ssh helps throttle disk copy from
## the long term image store which is slower..
## I decided to add a random sleep time of 0 - 180 seconds in each job
ndpi2tiff_command = ndpi2tiff_command + ssd_cached_file + default_level
if v: print ndpi2tiff_command
output_file = ssd_cached_file+',0.tif'
if not os.path.isfile(output_file):
current_command_list += ndpi2tiff_command +'\n'
pyramid_output_dir = os.path.dirname(pyramid_file)
if not os.path.isdir(pyramid_output_dir):
os.makedirs(pyramid_output_dir)
#vips_pyramid_output = cur_file.replace(input_dir,pyramid_directory) +'.dzi.tif'
vips_command = 'vips im_vips2tiff -v '+output_file+' '+pyramid_file+':jpeg:90,tile:256x256,pyramid,,,,8 '
print vips_command
current_command_list += vips_command
if v: print current_command_list
### now writing the script
current_bash_script = script_file_base_path+'ndpi2tiff-'+str(script_id_num)+'.sh'
f_out = open(current_bash_script,'w')
f_out.write(current_command_list)
if delete_bigtiff_image:
f_out.write('\n rm -rf \''+output_file+'\' \n')
f_out.write('rm -rf '+ssd_cached_file+' \n')
## this may be better to just not put part of the command script
script_id_num += 1
f_out.close()
sge_submit_cmd = "qsub -q slide_convert.q "+current_bash_script
print sge_submit_cmd
output = subprocess.check_output (sge_submit_cmd,stderr=subprocess.STDOUT, shell=True)
print output
def _listdir_error(error):
print >>sys.stderr, "Could not traverse/list:", error.filename
def check_files(wsi_dir=DEFAULT_WSI_DIR):
"""Checks for NDPI and SVS images
can probably be deleted...
Arguments:
wsi_dir -- The base directory to (recursively) search for .ndpi images.
Returns: counts of found images: (ndpi, pyramid)
"""
print "Parsing",wsi_dir
# sanity checks
if not os.path.isdir(wsi_dir):
raise IOError('SVS or NDPI base path is not a directory or is unreadable: ' + str(wsi_dir))
# get rid of any trailing slashes
wsi_dir = wsi_dir.rstrip('/')
global ndpi_count
# arg handling
v = _verbose >= 1; vv = _verbose >= 2
wsi_prefix_len = len(wsi_dir) + 1 # plus 1 for leading '/'
ndpi_pat = re.compile(r'.*\.ndpi$', re.IGNORECASE)
# crawl looking for svs files
for dirpath, dirnames, filenames in os.walk(wsi_dir, followlinks=True, onerror=_listdir_error):
for fname in filenames:
# NDPI (slide) file?
if ndpi_pat.match(fname):
ndpi_count +=1
file_with_path = os.path.join(dirpath, fname)
if v: print >>sys.stderr, "Slide: ", file_with_path
path_suffix = dirpath[wsi_prefix_len:]
path = fname.split('/')
file = path[len(path)-1]
### first check if the ndpi file is registered in our database...
check_image_status_in_db(file_with_path,'ndpi','adrc_slide_database','ndpi_slide_info')
if check_for_valid_ADRC_ID( file) or True :
input_file = os.path.join(dirpath)+'/'+file
pyramid_file = input_file.replace(DEFAULT_WSI_DIR,DEFAULT_PYRAMID_DIR)+'.dzi.tif'
if not os.path.isfile(pyramid_file):
print "Generate pyramid for",file
gen_ndpi_pyramid(input_file,pyramid_file)
else:
check_image_status_in_db(pyramid_file,'pyramid','adrc_slide_database','dzi_pyramid_info')
return ( ndpi_count)
def create_ADRC_schemas():
create_adrc_pyramid_schema = """
CREATE TABLE `dzi_pyramid_info` (
`pyramid_filename` varchar(200) DEFAULT NULL,
`image_width` int(10) unsigned DEFAULT NULL,
`image_height` int(10) unsigned DEFAULT NULL,
`resolution` int(11) DEFAULT '40',
`full_file_path` varchar(255) DEFAULT NULL,
`file_basename` varchar(100) DEFAULT NULL,
`filesize` int(10) unsigned DEFAULT NULL,
`parent_slide_filename` varchar(50) DEFAULT NULL,
`parent_slide_id` int(10) unsigned DEFAULT NULL,
`pyramid_folder` varchar(80) DEFAULT NULL,
`main_project_name` varchar(20) DEFAULT NULL,
`stain_type` varchar(30) DEFAULT NULL,
`tissue_type` varchar(30) DEFAULT NULL,
`pyramid_id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`pyramid_id`),
KEY `full_file_name` (`full_file_path`),
KEY `full_file_path` (`full_file_path`)
) ENGINE=MyISAM ;
CREATE TABLE `corrupt_or_unreadable_pyramid_files` (
`full_file_name` text,
`filesize` int(10) unsigned DEFAULT NULL,
`active_archive` tinyint(4) DEFAULT NULL,
`pyramid_id` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`pyramid_id`)
)
"""
print create_adrc_pyramid_schema
"""def main(args=None):
if args is None: args = sys.argv[1:]
global _verbose; _verbose = opts.verbose
currentdir = DEFAULT_WSI_DIR
# for currentdir in DIRS_WITH_IMAGES:
#check_files(wsi_dir=opts.wsi_dir)
# (ndpi_count) = check_files(currentdir+'ADRC61-128/') ## is running on node16
(ndpi_count) = check_files(currentdir)
# create_ADRC_schemas()
#et_database_slide_metadata('adrc_slide_database','dzi_pyramid_info')
# update_annotations('adrc_slide_databse')
print "NDPI slides:", ndpi_count
"""
def update_md5_values(database,table_to_crawl,primary_key,db_cursor, update_cursor):
#sql_lookup = "select * from `%s`.`%s` where md5sum is NULL and pyramid_folder like '%%BRCA%%' " % (database,table_to_crawl)
sql_lookup = "select * from `%s`.`%s` where md5sum is NULL " % (database,table_to_crawl)
db_cursor.execute(sql_lookup)
data = db_cursor.fetchall()
print len(data),"rows to process"
for row in data:
if os.path.isfile(row['full_file_path']):
print row
update_stmt = "update `%s`.`%s` set md5sum='%s' where %s='%s'" % (database,table_to_crawl,md5sum(row['full_file_path']),primary_key,row[primary_key])
print update_stmt
update_cursor.execute(update_stmt)
else:
print "missing",row
update_stmt = "delete from `%s`.`%s` where %s='%s'" % (database,table_to_crawl,primary_key,row[primary_key])
print update_stmt
#update_cursor.execute(update_stmt)
def locate_md5_collissions(database,table_to_crawl,db_cursor, update_cursor):
sql_lookup = "select md5sum, count(*) as count from `%s`.`%s` group by md5sum having count>1" % (database,table_to_crawl)
print sql_lookup
db_cursor.execute(sql_lookup)
data = db_cursor.fetchall()
print len(data),"rows to process"
md5_collision_list = []
for row in data:
#print row
md5_collision_list.append(row['md5sum'])
#print md5_collision_list
print len(md5_collision_list),"entries with 2 or more matching md5 values"
for md5 in md5_collision_list:
if md5 is not None:
dup_sql = "select * from `%s`.`%s` where md5sum='%s'" % (database,table_to_crawl,md5)
#print dup_sql
db_cursor.execute(dup_sql)
data = db_cursor.fetchall()
#print data[0]
print "------------NEXT ENTRY has %d---------------" % len(data)
#print data
filename = os.path.basename(data[0]['full_file_path'])
#print svs_filename
for row in data:
print row['pyramid_filename']
if filename not in row['full_file_path']:
base_tcga_id = filename.split('.')[0]
if base_tcga_id not in row['full_file_path']:
print "shit",filename,row['full_file_path'],base_tcga_id
print row
# print data[0]
#print update_stmt
#update_cursor.execute(update_stmt)
#pyramid_filename': '/bigdata2/PYRAMIDS/CDSA/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs.dzi.tif', 'active_tcga_slide': 0, 'resolution': 40L, 'md5sum': None, 'image_width': 113288L, 'pyramid_generated': 1, 'patient_id': 'TCGA-E2-A14Y', 'stain_type': 'BRCA', 'image_height': 84037L, 'filesize': 1971660649L, 'slide_folder': 'nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0', 'slide_filename': 'TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs', 'main_project_name': None, 'slide_id': 29602L,
# 'full_file_path': '/bigdata/RAW_SLIDE_LINKS/CDSA-LOCAL/BRCA_Diagnostic/nationwidechildrens.org_BRCA.diagnostic_images.Level_1.93.0.0/TCGA-E2-A14Y-01Z-00-DX1.804A22A3-FD8D-4C8A-A766-48D28434DE22.svs',
# 'tissue_type': 'diagnostic'}
### find collisions across pyramid_filenames as well..
def find_rogue_pyramid_filenames(database,db_cursor,con_two):
"""so this will check and see if the full file path and the pyramid_filename are... the same file... im wondering if I screwed up at some point
and made the associations wrong"""
rogue_sql = "select * from `%s`.`svs_slide_info`" % (database)
print rogue_sql
db_cursor.execute(rogue_sql)
data = db_cursor.fetchall()
for row in data:
pyr = os.path.basename( row['pyramid_filename'])
svs = os.path.basename( row['full_file_path'] )
if svs not in pyr and pyr is not '':
print "SHIT, pyr=%s,svs=%s" % ( pyr,svs)
print row
def find_unlinked_files( db_cursor):
"""this will look for archive directories that do not have a corresponding link in the RAW_SLIDE_LINK
dir"""
select_stmt = " select * from `latest_archive_info`"
print select_stmt
db_cursor.execute(select_stmt)
result = db_cursor.fetchall()
active_slide_archive = []
for row in result:
archive_name = row['ARCHIVE_NAME']
if 'slide' in archive_name or 'diagnostic' in archive_name or 'tissue' in archive_name:
# print archive_name
active_slide_archive.append(archive_name)
print "I have found",len(active_slide_archive),"active slid archives"
link_path = '/bigdata/RAW_SLIDE_LINKS/CDSA/*/'
all_linked_dirs = glob.glob( link_path+'*')
currently_linked_dirs = [ os.path.basename(dir) for dir in all_linked_dirs]
for active_dir in active_slide_archive:
if active_dir not in currently_linked_dirs:
print "need to link",active_dir
return(active_slide_archive)
#(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod')
#import dsa_common_functions as dsa
#(cur_one, cur_two) = dsa.connect_to_db('localhost','root','cancersuckz!','cdsa_js_prod')
#active_archive_list = dsa.find_unlinked_files(cur_one)
#active_archive_list
#history
"""Now need to check if file is on the filesystem
result = metadata_dict_cursor.fetchall()
null_rows = 0
for row in result:
full_file_path = row['full_file_path']
patient_id = get_tcga_id( os.path.basename(full_file_path) ,False)
"""
"""
"""
if __name__ == '__main__':
print "Nothing to do..."
#(con_one,con_two) = connect_to_db('localhost', 'root', 'cancersuckz!', 'cdsa_js_prod')
find_unlinked_files(con_one)
#update_md5_values('cdsa_js_prod','svs_slide_info','slide_id',con_one,con_two)
#locate_md5_collissions('cdsa_js_prod','svs_slide_info',con_one,con_two)
#locate_md5_collissions('cdsa_js_prod','dzi_pyramid_info',con_one,con_two)
validate_slide_pyramid_linkage(con_one,con_two)
#find_rogue_pyramid_filenames('cdsa_js_prod',con_one,con_two)
#update_md5_values('cdsa_js_prod','dzi_pyramid_info','pyramid_id',con_one,con_two)
generate_slide_pyramid_linkage(con_one,con_two)
| apache-2.0 | 2,299,405,208,769,935,400 | 39.347734 | 619 | 0.688587 | false |
mosra/m.css | plugins/m/dox.py | 1 | 8330 | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
from docutils.parsers.rst.states import Inliner
from docutils import nodes, utils
from docutils.parsers import rst
from docutils.parsers.rst.roles import set_classes
import xml.etree.ElementTree as ET
import os
import re
import logging
logger = logging.getLogger(__name__)
# Modified from abbr / gh / gl / ... to add support for queries and hashes
link_regexp = re.compile(r'(?P<title>.*) <(?P<link>[^?#]+)(?P<hash>[?#].+)?>')
def parse_link(text):
link = utils.unescape(text)
m = link_regexp.match(link)
if m:
title, link, hash = m.group('title', 'link', 'hash')
if not hash: hash = '' # it's None otherwise
else:
title, hash = '', ''
return title, link, hash
def init(tagfiles, input):
global symbol_mapping, symbol_prefixes, tagfile_basenames
# Pre-round to populate subclasses. Clear everything in case we init'd
# before already.
tagfile_basenames = []
symbol_mapping = {}
symbol_prefixes = ['']
for f in tagfiles:
tagfile, path = f[:2]
prefixes = f[2] if len(f) > 2 else []
css_classes = f[3] if len(f) > 3 else []
tagfile_basenames += [(os.path.splitext(os.path.basename(tagfile))[0], path, css_classes)]
symbol_prefixes += prefixes
tree = ET.parse(os.path.join(input, tagfile))
root = tree.getroot()
for child in root:
if child.tag == 'compound' and 'kind' in child.attrib:
# Linking to pages
if child.attrib['kind'] == 'page':
link = path + child.find('filename').text + '.html'
symbol_mapping[child.find('name').text] = (child.find('title').text, link, css_classes)
# Linking to files
if child.attrib['kind'] == 'file':
file_path = child.find('path')
link = path + child.find('filename').text + ".html"
symbol_mapping[(file_path.text if file_path is not None else '') + child.find('name').text] = (None, link, css_classes)
for member in child.findall('member'):
if not 'kind' in member.attrib: continue
# Preprocessor defines and macros
if member.attrib['kind'] == 'define':
symbol_mapping[member.find('name').text + ('()' if member.find('arglist').text else '')] = (None, link + '#' + member.find('anchor').text, css_classes)
# Linking to namespaces, structs and classes
if child.attrib['kind'] in ['class', 'struct', 'namespace']:
name = child.find('name').text
link = path + child.findtext('filename') # <filename> can be empty (cppreference tag file)
symbol_mapping[name] = (None, link, css_classes)
for member in child.findall('member'):
if not 'kind' in member.attrib: continue
# Typedefs, constants
if member.attrib['kind'] == 'typedef' or member.attrib['kind'] == 'enumvalue':
symbol_mapping[name + '::' + member.find('name').text] = (None, link + '#' + member.find('anchor').text, css_classes)
# Functions
if member.attrib['kind'] == 'function':
# <filename> can be empty (cppreference tag file)
symbol_mapping[name + '::' + member.find('name').text + "()"] = (None, link + '#' + member.findtext('anchor'), css_classes)
# Enums with values
if member.attrib['kind'] == 'enumeration':
enumeration = name + '::' + member.find('name').text
symbol_mapping[enumeration] = (None, link + '#' + member.find('anchor').text, css_classes)
for value in member.findall('enumvalue'):
symbol_mapping[enumeration + '::' + value.text] = (None, link + '#' + value.attrib['anchor'], css_classes)
# Sections
for section in child.findall('docanchor'):
symbol_mapping[section.text] = (section.attrib.get('title', ''), link + '#' + section.text, css_classes)
def dox(name, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
title, target, hash = parse_link(text)
# Otherwise adding classes to the options behaves globally (uh?)
_options = dict(options)
set_classes(_options)
# Avoid assert on adding to undefined member later
if 'classes' not in _options: _options['classes'] = []
# Try linking to the whole docs first
for basename, url, css_classes in tagfile_basenames:
if basename == target:
if not title:
# TODO: extract title from index page in the tagfile
logger.warning("Link to main page `{}` requires a title".format(target))
title = target
_options['classes'] += css_classes
node = nodes.reference(rawtext, title, refuri=url + hash, **_options)
return [node], []
for prefix in symbol_prefixes:
if prefix + target in symbol_mapping:
link_title, url, css_classes = symbol_mapping[prefix + target]
if title:
use_title = title
elif link_title:
use_title = link_title
else:
if link_title is not None:
logger.warning("Doxygen anchor `{}` has no title, using its ID as link title".format(target))
use_title = target
_options['classes'] += css_classes
node = nodes.reference(rawtext, use_title, refuri=url + hash, **_options)
return [node], []
# TODO: print file and line
#msg = inliner.reporter.warning(
#'Doxygen symbol %s not found' % target, line=lineno)
#prb = inliner.problematic(rawtext, rawtext, msg)
if title:
logger.warning("Doxygen symbol `{}` not found, rendering just link title".format(target))
node = nodes.inline(rawtext, title, **_options)
else:
logger.warning("Doxygen symbol `{}` not found, rendering as monospace".format(target))
node = nodes.literal(rawtext, target, **_options)
return [node], []
def register_mcss(mcss_settings, **kwargs):
rst.roles.register_local_role('dox', dox)
init(input=mcss_settings['INPUT'],
tagfiles=mcss_settings.get('M_DOX_TAGFILES', []))
# Below is only Pelican-specific functionality. If Pelican is not found, these
# do nothing.
def _pelican_configure(pelicanobj):
settings = {
# For backwards compatibility, the input directory is pelican's CWD
'INPUT': os.getcwd(),
}
for key in ['M_DOX_TAGFILES']:
if key in pelicanobj.settings: settings[key] = pelicanobj.settings[key]
register_mcss(mcss_settings=settings)
def register(): # for Pelican
from pelican import signals
signals.initialized.connect(_pelican_configure)
| mit | -2,459,522,413,668,937,700 | 42.369792 | 179 | 0.59169 | false |
lliss/tr-55 | tr55/water_quality.py | 1 | 1621 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from tr55.tablelookup import lookup_load, lookup_nlcd
def get_volume_of_runoff(runoff, cell_count, cell_resolution):
"""
Calculate the volume of runoff over the entire modeled area
Args:
runoff (number): Q from TR55, averaged amount of runoff over a number
of cells.
cell_count (integer): The number of cells included in the area
cell_resolution (number): The size in square meters that a cell
represents
Returns:
The volume of runoff liters in of the total area of interest
"""
# Runoff is in inches, so convert to meters which is the units for the cell
# area and compute the meter-cells in the group. Multiply the resolution
# of the cell to get the runoff volume in cubic meters.
inch_to_meter = 0.0254
runoff_m = runoff * inch_to_meter
meter_cells = runoff_m * cell_count
volume_cubic_meters = meter_cells * cell_resolution
liters = volume_cubic_meters * 1000
return liters
def get_pollutant_load(use_type, pollutant, runoff_liters):
"""
Calculate the pollutant load over a particular land use type given an
amount of runoff generated on that area and an event mean concentration
of the pollutant. Returns the pollutant load in lbs.
"""
mg_per_kg = 1000000
lbs_per_kg = 2.205
nlcd = lookup_nlcd(use_type)
emc = lookup_load(nlcd, pollutant)
load_mg_l = emc * runoff_liters
return (load_mg_l / mg_per_kg) * lbs_per_kg
| apache-2.0 | -5,661,412,742,735,621,000 | 29.018519 | 79 | 0.679827 | false |
examachine/pisi | pisi/data/component.py | 1 | 7983 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
# Author: Eray Ozkural <eray at pardus.org.tr>
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
import pisi
import pisi.context as ctx
import pisi.exml.xmlfile as xmlfile
import pisi.exml.autoxml as autoxml
from pisi.db.itembyrepo import ItemByRepoDB
class Error(pisi.Error):
pass
__metaclass__ = autoxml.autoxml
class Distribution(xmlfile.XmlFile):
__metaclass__ = autoxml.autoxml
tag = "PISI"
t_SourceName = [autoxml.Text, autoxml.mandatory] # name of distribution (source)
t_Description = [autoxml.LocalText, autoxml.mandatory]
t_Version = [autoxml.Text, autoxml.optional]
t_Type = [autoxml.Text, autoxml.mandatory]
t_Dependencies = [ [autoxml.Text], autoxml.optional, "Dependencies/Distribution"]
t_BinaryName = [autoxml.Text, autoxml.optional] # name of repository (binary distro)
t_Architecture = [autoxml.Text, autoxml.optional] # architecture identifier
class Component(xmlfile.XmlFile):
"representation for component declarations"
__metaclass__ = autoxml.autoxml
tag = "PISI"
t_Name = [autoxml.String, autoxml.mandatory] # fully qualified name
# component name in other languages, for instance in Turkish
# LocalName for system.base could be sistem.taban or "Taban Sistem",
# this could be useful for GUIs
t_LocalName = [autoxml.LocalText, autoxml.mandatory]
# Information about the component
t_Summary = [autoxml.LocalText, autoxml.mandatory]
t_Description = [autoxml.LocalText, autoxml.mandatory]
#t_Icon = [autoxml.Binary, autoxml.mandatory]
# Dependencies to other components
t_Dependencies = [ [autoxml.String], autoxml.optional, "Dependencies/Component"]
# the parts of this component.
# to be filled by the component database, thus it is optional.
t_Packages = [ [autoxml.String], autoxml.optional, "Parts/Package"]
t_Sources = [ [autoxml.String], autoxml.optional, "Parts/Source"]
# TODO: this is probably not necessary since we use fully qualified
# module names (like in Java)
#t_PartOf = [autoxml.Text, autoxml.mandatory]
#FIXME: recursive declarations do not work!
#class ComponentTree(xmlfile.XmlFile):
# "index representation for the component structure"
#
# __metaclass__ = autoxml.autoxml
#
# tag = "Component"
#
# t_Name = [autoxml.Text, autoxml.mandatory] # fully qualified name
# #t_Icon = [autoxml.Binary, autoxml.mandatory]
# t_Dependencies = [ [autoxml.Text], autoxml.optional, "Component"]
# #t_Parts = [ [pisi.component.ComponentTree], autoxml.optional, "Component"]
class ComponentDB(object):
"""a database of components"""
#FIXME: we might need a database per repo in the future
def __init__(self):
self.d = ItemByRepoDB('component')
def close(self):
self.d.close()
def destroy(self):
self.d.destroy()
def has_component(self, name, repo = pisi.db.itembyrepo.repos, txn = None):
#name = shelve.LockedDBShelf.encodekey(name)
name = str(name)
return self.d.has_key(name, repo, txn)
def get_component(self, name, repo=None, txn = None):
try:
return self.d.get_item(name, repo, txn=txn)
except pisi.db.itembyrepo.NotfoundError, e:
raise Error(_('Component %s not found') % name)
def get_component_repo(self, name, repo=None, txn = None):
#name = shelve.LockedDBShelf.encodekey(name)
try:
return self.d.get_item_repo(name, repo, txn=txn)
except pisi.db.itembyrepo.NotfoundError, e:
raise Error(_('Component %s not found') % name)
def get_union_comp(self, name, txn = None, repo = pisi.db.itembyrepo.repos ):
"""get a union of all repository components packages, not just the first repo in order.
get only basic repo info from the first repo"""
def proc(txn):
s = self.d.d.get(name, txn=txn)
pkgs = set()
srcs = set()
for repostr in self.d.order(repo = repo):
if s.has_key(repostr):
pkgs |= set(s[repostr].packages)
srcs |= set(s[repostr].sources)
comp = self.get_component(name)
comp.packages = list(pkgs)
comp.sources = list(srcs)
return comp
return self.d.txn_proc(proc, txn)
def list_components(self, repo=None):
return self.d.list(repo)
def update_component(self, component, repo, txn = None):
def proc(txn):
if self.has_component(component.name, repo, txn):
# preserve the list of packages
component.packages = self.d.get_item(component.name, repo, txn).packages
self.d.add_item(component.name, component, repo, txn)
self.d.txn_proc(proc, txn)
def add_package(self, component_name, package, repo, txn = None):
def proc(txn):
assert component_name
if self.has_component(component_name, repo, txn):
component = self.get_component(component_name, repo, txn)
else:
component = Component( name = component_name )
if not package in component.packages:
component.packages.append(package)
self.d.add_item(component_name, component, repo, txn) # update
self.d.txn_proc(proc, txn)
def remove_package(self, component_name, package, repo = None, txn = None):
def proc(txn, repo):
if not self.has_component(component_name, repo, txn):
raise Error(_('Information for component %s not available') % component_name)
if not repo:
repo = self.d.which_repo(component_name, txn=txn) # get default repo then
component = self.get_component(component_name, repo, txn)
if package in component.packages:
component.packages.remove(package)
self.d.add_item(component_name, component, repo, txn) # update
ctx.txn_proc(lambda x: proc(txn, repo), txn)
def add_spec(self, component_name, spec, repo, txn = None):
def proc(txn):
assert component_name
if self.has_component(component_name, repo, txn):
component = self.get_component(component_name, repo, txn)
else:
component = Component( name = component_name )
if not spec in component.sources:
component.sources.append(spec)
self.d.add_item(component_name, component, repo, txn) # update
self.d.txn_proc(proc, txn)
def remove_spec(self, component_name, spec, repo = None, txn = None):
def proc(txn, repo):
if not self.has_component(component_name, repo, txn):
raise Error(_('Information for component %s not available') % component_name)
if not repo:
repo = self.d.which_repo(component_name, txn=txn) # get default repo then
component = self.get_component(component_name, repo, txn)
if spec in component.sources:
component.sources.remove(spec)
self.d.add_item(component_name, component, repo, txn) # update
ctx.txn_proc(lambda x: proc(txn, repo), txn)
def clear(self, txn = None):
self.d.clear(txn)
def remove_component(self, name, repo = None, txn = None):
name = str(name)
self.d.remove_item(name, repo, txn)
def remove_repo(self, repo, txn = None):
self.d.remove_repo(repo, txn=txn)
| gpl-3.0 | -2,851,246,268,599,992,300 | 37.196172 | 95 | 0.629463 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.