code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
---|---|---|---|---|---|
"""
"""
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.CFG import CFG
from DIRAC.Core.Utilities import List
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Utilities.JDL import loadJDLAsCFG, dumpCFGAsJDL
from DIRAC.WorkloadManagementSystem.Agent.SiteDirector import getSubmitPools
class JobManifest(object):
def __init__(self, manifest=""):
self.__manifest = CFG()
self.__dirty = False
self.__ops = False
if manifest:
result = self.load(manifest)
if not result['OK']:
raise Exception(result['Message'])
def isDirty(self):
return self.__dirty
def setDirty(self):
self.__dirty = True
def clearDirty(self):
self.__dirty = False
def load(self, dataString):
"""
Auto discover format type based on [ .. ] of JDL
"""
dataString = dataString.strip()
if dataString[0] == "[" and dataString[-1] == "]":
return self.loadJDL(dataString)
else:
return self.loadCFG(dataString)
def loadJDL(self, jdlString):
"""
Load job manifest from JDL format
"""
result = loadJDLAsCFG(jdlString.strip())
if not result['OK']:
self.__manifest = CFG()
return result
self.__manifest = result['Value'][0]
return S_OK()
def loadCFG(self, cfgString):
"""
Load job manifest from CFG format
"""
try:
self.__manifest.loadFromBuffer(cfgString)
except Exception as e:
return S_ERROR("Can't load manifest from cfg: %s" % str(e))
return S_OK()
def dumpAsCFG(self):
return str(self.__manifest)
def getAsCFG(self):
return self.__manifest.clone()
def dumpAsJDL(self):
return dumpCFGAsJDL(self.__manifest)
def __getCSValue(self, varName, defaultVal=None):
if not self.__ops:
self.__ops = Operations(group=self.__manifest['OwnerGroup'], setup=self.__manifest['DIRACSetup'])
if varName[0] != "/":
varName = "JobDescription/%s" % varName
return self.__ops.getValue(varName, defaultVal)
def __checkNumericalVar(self, varName, defaultVal, minVal, maxVal):
"""
Check a numerical var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue("Default%s" % varName, defaultVal)
else:
varValue = self.__manifest[varName]
initialVal = varValue
try:
varValue = long(varValue)
except BaseException:
return S_ERROR("%s must be a number" % varName)
minVal = self.__getCSValue("Min%s" % varName, minVal)
maxVal = self.__getCSValue("Max%s" % varName, maxVal)
varValue = max(minVal, min(varValue, maxVal))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkChoiceVar(self, varName, defaultVal, choices):
"""
Check a choice var
"""
initialVal = False
if varName not in self.__manifest:
varValue = self.__getCSValue("Default%s" % varName, defaultVal)
else:
varValue = self.__manifest[varName]
initialVal = varValue
if varValue not in self.__getCSValue("Choices%s" % varName, choices):
return S_ERROR("%s is not a valid value for %s" % (varValue, varName))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkMultiChoice(self, varName, choices):
"""
Check a multi choice var
"""
initialVal = False
if varName not in self.__manifest:
return S_OK()
else:
varValue = self.__manifest[varName]
initialVal = varValue
choices = self.__getCSValue("Choices%s" % varName, choices)
for v in List.fromChar(varValue):
if v not in choices:
return S_ERROR("%s is not a valid value for %s" % (v, varName))
if initialVal != varValue:
self.__manifest.setOption(varName, varValue)
return S_OK(varValue)
def __checkMaxInputData(self, maxNumber):
"""
Check Maximum Number of Input Data files allowed
"""
varName = "InputData"
if varName not in self.__manifest:
return S_OK()
varValue = self.__manifest[varName]
if len(List.fromChar(varValue)) > maxNumber:
return S_ERROR('Number of Input Data Files (%s) greater than current limit: %s' %
(len(List.fromChar(varValue)), maxNumber))
return S_OK()
def __contains__(self, key):
""" Check if the manifest has the required key
"""
return key in self.__manifest
def setOptionsFromDict(self, varDict):
for k in sorted(varDict):
self.setOption(k, varDict[k])
def check(self):
"""
Check that the manifest is OK
"""
for k in ['OwnerName', 'OwnerDN', 'OwnerGroup', 'DIRACSetup']:
if k not in self.__manifest:
return S_ERROR("Missing var %s in manifest" % k)
# Check CPUTime
result = self.__checkNumericalVar("CPUTime", 86400, 100, 500000)
if not result['OK']:
return result
result = self.__checkNumericalVar("Priority", 1, 0, 10)
if not result['OK']:
return result
allowedSubmitPools = getSubmitPools(self.__manifest['OwnerGroup'])
result = self.__checkMultiChoice("SubmitPools", list(set(allowedSubmitPools)))
if not result['OK']:
return result
result = self.__checkMultiChoice("PilotTypes", ['private'])
if not result['OK']:
return result
maxInputData = Operations().getValue("JobDescription/MaxInputData", 500)
result = self.__checkMaxInputData(maxInputData)
if not result['OK']:
return result
operation = Operations(group=self.__manifest['OwnerGroup'])
allowedJobTypes = operation.getValue("JobDescription/AllowedJobTypes", ['User', 'Test', 'Hospital'])
transformationTypes = operation.getValue("Transformations/DataProcessing", [])
result = self.__checkMultiChoice("JobType", allowedJobTypes + transformationTypes)
if not result['OK']:
return result
return S_OK()
def createSection(self, secName, contents=False):
if secName not in self.__manifest:
if contents and not isinstance(contents, CFG):
return S_ERROR("Contents for section %s is not a cfg object" % secName)
self.__dirty = True
return S_OK(self.__manifest.createNewSection(secName, contents=contents))
return S_ERROR("Section %s already exists" % secName)
def getSection(self, secName):
self.__dirty = True
if secName not in self.__manifest:
return S_ERROR("%s does not exist" % secName)
sec = self.__manifest[secName]
if not sec:
return S_ERROR("%s section empty" % secName)
return S_OK(sec)
def setSectionContents(self, secName, contents):
if contents and not isinstance(contents, CFG):
return S_ERROR("Contents for section %s is not a cfg object" % secName)
self.__dirty = True
if secName in self.__manifest:
self.__manifest[secName].reset()
self.__manifest[secName].mergeWith(contents)
else:
self.__manifest.createNewSection(secName, contents=contents)
def setOption(self, varName, varValue):
"""
Set a var in job manifest
"""
self.__dirty = True
levels = List.fromChar(varName, "/")
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
cfg.createNewSection(l)
cfg = cfg[l]
cfg.setOption(levels[-1], varValue)
def remove(self, opName):
levels = List.fromChar(opName, "/")
cfg = self.__manifest
for l in levels[:-1]:
if l not in cfg:
return S_ERROR("%s does not exist" % opName)
cfg = cfg[l]
if cfg.deleteKey(levels[-1]):
self.__dirty = True
return S_OK()
return S_ERROR("%s does not exist" % opName)
def getOption(self, varName, defaultValue=None):
"""
Get a variable from the job manifest
"""
cfg = self.__manifest
return cfg.getOption(varName, defaultValue)
def getOptionList(self, section=""):
"""
Get a list of variables in a section of the job manifest
"""
cfg = self.__manifest.getRecursive(section)
if not cfg or 'value' not in cfg:
return []
cfg = cfg['value']
return cfg.listOptions()
def isOption(self, opName):
"""
Check if it is a valid option
"""
return self.__manifest.isOption(opName)
def getSectionList(self, section=""):
"""
Get a list of sections in the job manifest
"""
cfg = self.__manifest.getRecursive(section)
if not cfg or 'value' not in cfg:
return []
cfg = cfg['value']
return cfg.listSections()
| andresailer/DIRAC | WorkloadManagementSystem/Client/JobState/JobManifest.py | Python | gpl-3.0 | 8,502 |
import os
import argparse
from logger import HoneyHornetLogger
from threading import BoundedSemaphore
import threading
import logging
from datetime import date, datetime
from termcolor import colored
import http.client
import re
import time
class ViewChecker(HoneyHornetLogger):
def __init__(self, config=None):
HoneyHornetLogger.__init__(self)
self.config = config
self.verbose = False
self.banner = False
MAX_CONNECTIONS = 20 # max threads that can be created
self.CONNECTION_LOCK = BoundedSemaphore(value=MAX_CONNECTIONS)
self.TIMER_DELAY = 3 # timer delay used for Telnet testing
self.default_filepath = os.path.dirname(os.getcwd())
log_name = str(date.today()) + "_DEBUG.log"
log_name = os.path.join(self.default_filepath, "logs", log_name)
logging.basicConfig(filename=log_name, format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
def determine_camera_model(self, vulnerable_host, https=False, retry=False):
""" simple banner grab with http.client """
ports = []
self.CONNECTION_LOCK.acquire()
service = "DETERMINE-CAMERA-MODEL"
if retry is False:
try:
host = vulnerable_host.ip
ports_to_check = set(vulnerable_host.ports)
except vulnerable_host.DoesNotExist:
host = str(vulnerable_host)
ports_to_check = set(ports.split(',').strip())
elif retry is True:
host = vulnerable_host
if self.verbose:
print("[*] Checking camera make & model of {0}".format(host))
logging.info('{0} set for {1} service'.format(host, service))
try:
for port in ports_to_check:
if https is True:
conn = http.client.HTTPSConnection(host, port)
else:
conn = http.client.HTTPConnection(host, port)
conn.request("GET", "/")
http_r1 = conn.getresponse()
camera_check = http_r1.read(1024)
headers = http_r1.getheaders()
if self.verbose:
print(http_r1.status, http_r1.reason)
print(http_r1.status, http_r1.reason)
results = re.findall(r"<title>(?P<camera_title>.*)</title>", str(camera_check))
if results:
print(results)
else:
print("No match for <Title> tag found.")
# puts banner into the class instance of the host
# vulnerable_host.put_banner(port, banner_txt, http_r1.status, http_r1.reason, headers)
# banner_grab_filename = str(date.today()) + "_banner_grabs.log"
# banner_grab_filename = os.path.join(self.default_filepath, "logs", banner_grab_filename)
# with open(banner_grab_filename, 'a') as banner_log:
# banner_to_log = "host={0}, http_port={1},\nheaders={2},\nbanner={3}\n".format(host, port,
# headers, banner_txt)
# banner_log.write(banner_to_log)
except http.client.HTTPException:
try:
self.determine_camera_model(host, https=True, retry=True)
except Exception as error:
logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error))
except Exception as error:
if error[0] == 104:
self.determine_camera_model(host, https=True, retry=True)
logging.exception("{0}\t{1}\t{2}\t{3}".format(host, port, service, error))
except KeyboardInterrupt:
exit(0)
self.CONNECTION_LOCK.release()
def run_view_checker(self, hosts_to_check):
"""
Function tests hosts for default credentials on open 'admin' ports
Utilizes threading to greatly speed up the scanning
"""
service = "building_threads"
logging.info("Building threads.")
logging.info("Verbosity set to {0}".format(self.verbose))
threads = []
print("[*] Testing vulnerable host ip addresses...")
try:
for vulnerable_host in hosts_to_check:
if self.verbose:
print('[*] checking >> {0}'.format(vulnerable_host.ip))
if set(vulnerable_host.ports):
t0 = threading.Thread(target=self.determine_camera_model, args=(vulnerable_host, ))
threads.append(t0)
logging.info("Starting {0} threads.".format(len(threads)))
for thread in threads:
thread.start()
for thread in threads:
thread.join(120)
except KeyboardInterrupt:
exit(0)
except threading.ThreadError as error:
logging.exception("{0}\t{1}".format(service, error))
except Exception as e:
logging.exception(e)
| ajackal/honey-hornet | honeyhornet/viewchecker.py | Python | gpl-3.0 | 5,097 |
# Copyright (C) 2016 The BET Development Team
# -*- coding: utf-8 -*-
# This demonstrates how to use BET in serial to sample a serial external model.
# run by calling "python serial_serial.py"
import os
import subprocess
import scipy.io as sio
import bet.sampling.basicSampling as bsam
def lb_model(input_data):
io_file_name = "io_file"
io_mdat = dict()
io_mdat['input'] = input_data
# save the input to file
sio.savemat(io_file_name, io_mdat)
# run the model
subprocess.call(['python', 'serial_model.py', io_file_name])
# read the output from file
io_mdat = sio.loadmat(io_file_name)
output_data = io_mdat['output']
return output_data
my_sampler = bsam.sampler(lb_model)
my_discretization = my_sampler.create_random_discretization(sample_type='r',
input_obj=4, savefile="serial_serial_example", num_samples=100)
| smattis/BET-1 | examples/parallel_and_serial_sampling/serial_serial.py | Python | gpl-3.0 | 927 |
#
# Copyright (c) 2009, 2011, 2012 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
# $Revision: 1046 $
# $Author: tkeffer $
# $Date: 2013-02-21 06:38:26 -0800 (Thu, 21 Feb 2013) $
#
"""Almanac data
This module can optionally use PyEphem, which offers high quality
astronomical calculations. See http://rhodesmill.org/pyephem. """
import time
import sys
import math
import weeutil.Moon
import weewx.units
# If the user has installed ephem, use it. Otherwise, fall back to the weeutil algorithms:
try:
import ephem
except ImportError:
import weeutil.Sun
# NB: In order to avoid an 'autocall' bug in Cheetah versions before 2.1,
# this class must not be a "new-style" class.
class Almanac():
"""Almanac data.
ATTRIBUTES.
As a minimum, the following attributes are available:
sunrise: Time (local) upper limb of the sun rises above the horizon, formatted using the format 'timeformat'.
sunset: Time (local) upper limb of the sun sinks below the horizon, formatted using the format 'timeformat'.
moon_phase: A description of the moon phase(eg. "new moon", Waxing crescent", etc.)
moon_fullness: Percent fullness of the moon (0=new moon, 100=full moon)
If the module 'ephem' is used, them many other attributes are available.
Here are a few examples:
sun.rise: Time upper limb of sun will rise above the horizon today in unix epoch time
sun.transit: Time of transit today (sun over meridian) in unix epoch time
sun.previous_sunrise: Time of last sunrise in unix epoch time
sun.az: Azimuth (in degrees) of sun
sun.alt: Altitude (in degrees) of sun
mars.rise: Time when upper limb of mars will rise above horizon today in unix epoch time
mars.ra: Right ascension of mars
etc.
EXAMPLES (note that these will only work in the Pacific Time Zone)
>>> t = 1238180400
>>> print timestamp_to_string(t)
2009-03-27 12:00:00 PDT (1238180400)
>>> almanac = Almanac(t, 46.0, -122.0)
Test backwards compatibility with attribute 'moon_fullness':
>>> print "Fullness of the moon (rounded) is %.2f%% [%s]" % (almanac.moon_fullness, almanac.moon_phase)
Fullness of the moon (rounded) is 2.00% [new (totally dark)]
Now get a more precise result for fullness of the moon:
>>> print "Fullness of the moon (more precise) is %.2f%%" % almanac.moon.moon_phase
Fullness of the moon (more precise) is 1.70%
Test backwards compatibility with attributes 'sunrise' and 'sunset'
>>> print "Sunrise, sunset:", almanac.sunrise, almanac.sunset
Sunrise, sunset: 06:56 19:30
Get sunrise, sun transit, and sunset using the new 'ephem' syntax:
>>> print "Sunrise, sun transit, sunset:", almanac.sun.rise, almanac.sun.transit, almanac.sun.set
Sunrise, sun transit, sunset: 06:56 13:13 19:30
Do the same with the moon:
>>> print "Moon rise, transit, set:", almanac.moon.rise, almanac.moon.transit, almanac.moon.set
Moon rise, transit, set: 06:59 14:01 21:20
Exercise equinox, solstice routines
>>> print almanac.next_vernal_equinox
20-Mar-2010 10:32
>>> print almanac.next_autumnal_equinox
22-Sep-2009 14:18
>>> print almanac.next_summer_solstice
20-Jun-2009 22:45
>>> print almanac.previous_winter_solstice
21-Dec-2008 04:03
>>> print almanac.next_winter_solstice
21-Dec-2009 09:46
Exercise moon state routines
>>> print almanac.next_full_moon
09-Apr-2009 07:55
>>> print almanac.next_new_moon
24-Apr-2009 20:22
>>> print almanac.next_first_quarter_moon
02-Apr-2009 07:33
Now location of the sun and moon
>>> print "Solar azimuth, altitude = (%.2f, %.2f)" % (almanac.sun.az, almanac.sun.alt)
Solar azimuth, altitude = (154.14, 44.02)
>>> print "Moon azimuth, altitude = (%.2f, %.2f)" % (almanac.moon.az, almanac.moon.alt)
Moon azimuth, altitude = (133.55, 47.89)
Try the pyephem "Naval Observatory" example.
>>> t = 1252252800
>>> print timestamp_to_gmtime(t)
2009-09-06 16:00:00 UTC (1252252800)
>>> atlanta = Almanac(t, 33.8, -84.4, pressure=0, horizon=-34.0/60.0)
>>> # Print it in GMT, so it can easily be compared to the example:
>>> print timestamp_to_gmtime(atlanta.sun.previous_rising.raw)
2009-09-06 11:14:56 UTC (1252235696)
>>> print timestamp_to_gmtime(atlanta.moon.next_setting.raw)
2009-09-07 14:05:29 UTC (1252332329)
Now try the civil twilight examples:
>>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).previous_rising.raw)
2009-09-06 10:49:40 UTC (1252234180)
>>> print timestamp_to_gmtime(atlanta(horizon=-6).sun(use_center=1).next_setting.raw)
2009-09-07 00:21:22 UTC (1252282882)
"""
def __init__(self, time_ts, lat, lon,
altitude=None, # Use 'None' in case a bad value is passed in
temperature=None, # "
pressure=None, # "
horizon=None, # "
moon_phases=weeutil.Moon.moon_phases,
formatter=weewx.units.Formatter()):
"""Initialize an instance of Almanac
time_ts: A unix epoch timestamp with the time of the almanac. If None, the
present time will be used.
lat, lon: Observer's location
altitude: Observer's elevation in **meters**. [Optional. Default is 0 (sea level)]
temperature: Observer's temperature in **degrees Celsius**. [Optional. Default is 15.0]
pressure: Observer's atmospheric pressure in **mBars**. [Optional. Default is 1010]
horizon: Angle of the horizon in degrees [Optional. Default is zero]
moon_phases: An array of 8 strings with descriptions of the moon
phase. [optional. If not given, then weeutil.Moon.moon_phases will be used]
formatter: An instance of weewx.units.Formatter() with the formatting information
to be used.
"""
self.time_ts = time_ts if time_ts else time.time()
self.time_djd = timestamp_to_djd(self.time_ts)
self.lat = lat
self.lon = lon
self.altitude = altitude if altitude is not None else 0.0
self.temperature = temperature if temperature is not None else 15.0
self.pressure = pressure if pressure is not None else 1010.0
self.horizon = horizon if horizon is not None else 0.0
self.moon_phases = moon_phases
self.formatter = formatter
(y,m,d) = time.localtime(self.time_ts)[0:3]
(self.moon_index, self._moon_fullness) = weeutil.Moon.moon_phase(y, m, d)
self.moon_phase = self.moon_phases[self.moon_index]
# Check to see whether the user has module 'ephem'.
if 'ephem' in sys.modules:
self.hasExtras = True
else:
# No ephem package. Use the weeutil algorithms, which supply a minimum of functionality
(sunrise_utc, sunset_utc) = weeutil.Sun.sunRiseSet(y, m, d, self.lon, self.lat)
# The above function returns its results in UTC hours. Convert
# to a local time tuple
sunrise_tt = weeutil.weeutil.utc_to_local_tt(y, m, d, sunrise_utc)
sunset_tt = weeutil.weeutil.utc_to_local_tt(y, m, d, sunset_utc)
self._sunrise = time.strftime("%H:%M", sunrise_tt)
self._sunset = time.strftime("%H:%M", sunset_tt)
self.hasExtras = False
# Shortcuts, used for backwards compatibility
@property
def sunrise(self):
return self.sun.rise if self.hasExtras else self._sunrise
@property
def sunset(self):
return self.sun.set if self.hasExtras else self._sunset
@property
def moon_fullness(self):
return int(self.moon.moon_phase+0.5) if self.hasExtras else self._moon_fullness
# What follows is a bit of Python wizardry to allow syntax such as:
# almanac(horizon=-0.5).sun.rise
def __call__(self, **kwargs):
"""Call an almanac object as a functor. This allows overriding the values
used when the Almanac instance was initialized.
Named arguments:
Any named arguments will be passed on to the initializer of the ObserverBinder,
overriding any default values. These are all optional:
almanac_time: The observer's time in unix epoch time.
lat: The observer's latitude in degrees
lon: The observer's longitude in degrees
altitude: The observer's altitude in meters
horizon: The horizon angle in degrees
temperature: The observer's temperature (used to calculate refraction)
pressure: The observer's pressure (used to calculate refraction)
"""
# Using an encapsulated class allows easy access to the default values
class ObserverBinder(object):
# Use the default values provided by the outer class (Almanac):
def __init__(self, almanac_time=self.time_ts, lat=self.lat, lon=self.lon,
altitude=self.altitude, horizon=self.horizon, temperature=self.temperature,
pressure=self.pressure, formatter=self.formatter):
# Build an ephem Observer object
self.observer = ephem.Observer()
self.observer.date = timestamp_to_djd(almanac_time)
self.observer.lat = math.radians(lat)
self.observer.long = math.radians(lon)
self.observer.elev = altitude
self.observer.horizon = math.radians(horizon)
self.observer.temp = temperature
self.observer.pressure= pressure
self.formatter = formatter
def __getattr__(self, body):
"""Return a BodyWrapper that binds the observer to a heavenly body.
If there is no such body an exception of type AttributeError will
be raised.
body: A heavenly body. Examples, 'sun', 'moon', 'jupiter'
Returns:
An instance of a BodyWrapper. It will bind together the heavenly
body (an instance of something like ephem.Jupiter) and the observer
(an instance of ephem.Observer)
"""
# Find the module used by pyephem. For example, the module used for
# 'mars' is 'ephem.Mars'. If there is no such module, an exception
# of type AttributeError will get thrown.
ephem_module = getattr(ephem, body.capitalize())
# Now, together with the observer object, return an
# appropriate BodyWrapper
return BodyWrapper(ephem_module, self.observer, self.formatter)
# This will override the default values with any explicit parameters in kwargs:
return ObserverBinder(**kwargs)
def __getattr__(self, attr):
if not self.hasExtras:
# If the Almanac does not have extended capabilities, we can't
# do any of the following. Raise an exception.
raise AttributeError, "Unknown attribute %s" % attr
# We do have extended capability. Check to see if the attribute is a calendar event:
elif attr in ['previous_equinox', 'next_equinox',
'previous_solstice', 'next_solstice',
'previous_autumnal_equinox', 'next_autumnal_equinox',
'previous_vernal_equinox', 'next_vernal_equinox',
'previous_winter_solstice', 'next_winter_solstice',
'previous_summer_solstice', 'next_summer_solstice',
'previous_new_moon', 'next_new_moon',
'previous_first_quarter_moon', 'next_first_quarter_moon',
'previous_full_moon', 'next_full_moon',
'previous_last_quarter_moon', 'next_last_quarter_moon']:
# This is how you call a function on an instance when all you have
# is the function's name as a string
djd = getattr(ephem, attr)(self.time_djd)
return weewx.units.ValueHelper((djd, "dublin_jd", "group_time"),
context="ephem_year", formatter=self.formatter)
else:
# It's not a calendar event. The attribute must be a heavenly body
# (such as 'sun', or 'jupiter'). Create an instance of
# ObserverBinder by calling the __call__ function in Almanac, but
# with no parameters
binder = self()
# Now try getting the body as an attribute. If successful, an
# instance of BodyWrapper will be returned. If not, an exception of
# type AttributeError will be raised.
return getattr(binder, attr)
fn_map = {'rise' : 'next_rising',
'set' : 'next_setting',
'transit' : 'next_transit'}
class BodyWrapper(object):
"""This class wraps a celestial body. It returns results in degrees (instead of radians)
and percent (instead of fractions). For times, it returns the results as a ValueHelper.
It also deals with the unfortunate design decision in pyephem to change
the state of the celestial body when using it as an argument in certain functions."""
def __init__(self, body_factory, observer, formatter):
"""Initialize a wrapper
body_factory: A function that returns an instance of the body
to be wrapped. Example would be ephem.Sun
observer: An instance of ephem.Observer, containing the observer's lat, lon, time, etc.
formatter: An instance of weewx.units.Formatter(), containing the formatting
to be used for times.
"""
self.body_factory = body_factory
self.observer = observer
self.formatter = formatter
self.body = body_factory(observer)
self.use_center = False
# Calculate and store the start-of-day in Dublin Julian Days:
(y,m,d) = time.localtime(djd_to_timestamp(observer.date))[0:3]
self.sod_djd = timestamp_to_djd(time.mktime((y,m,d,0,0,0,0,0,-1)))
def __call__(self, use_center=False):
self.use_center = use_center
return self
def __getattr__(self, attr):
if attr in ['az', 'alt', 'a_ra', 'a_dec', 'g_ra', 'ra', 'g_dec', 'dec',
'elong', 'radius', 'hlong', 'hlat', 'sublat', 'sublong']:
# Return the results in degrees rather than radians
return math.degrees(getattr(self.body, attr))
elif attr=='moon_phase':
# Return the result in percent
return 100.0 * self.body.moon_phase
elif attr in ['next_rising', 'next_setting', 'next_transit', 'next_antitransit',
'previous_rising', 'previous_setting', 'previous_transit', 'previous_antitransit']:
# These functions have the unfortunate side effect of changing the state of the body
# being examined. So, create a temporary body and then throw it away
temp_body = self.body_factory()
time_djd = getattr(self.observer, attr)(temp_body, use_center=self.use_center)
return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter)
elif attr in fn_map:
# These attribute names have to be mapped to a different function name. Like the
# attributes above, they also have the side effect of changing the state of the body.
# Finally, they return the time of the event anywhere in the day (not just the next
# event), so they take a second argument in the function call.
temp_body = self.body_factory(self.observer)
# Look up the function to be called for this attribute (eg, call 'next_rising' for 'rise')
fn = fn_map[attr]
# Call the function, with a second argument giving the start-of-day
time_djd = getattr(self.observer, fn)(temp_body, self.sod_djd)
return weewx.units.ValueHelper((time_djd, "dublin_jd", "group_time"), context="ephem_day", formatter=self.formatter)
else:
# Just return the result unchanged.
return getattr(self.body, attr)
def timestamp_to_djd(time_ts):
"""Convert from a unix time stamp to the number of days since 12/31/1899 12:00 UTC
(aka "Dublin Julian Days")"""
# The number 25567.5 is the start of the Unix epoch (1/1/1970). Just add on the
# number of days since then
return 25567.5 + time_ts/86400.0
def djd_to_timestamp(djd):
"""Convert from number of days since 12/31/1899 12:00 UTC ("Dublin Julian Days") to unix time stamp"""
return (djd-25567.5) * 86400.0
if __name__ == '__main__':
import doctest
from weeutil.weeutil import timestamp_to_string, timestamp_to_gmtime #@UnusedImport
doctest.testmod()
| hoevenvd/weewx_poller | bin/weewx/almanac.py | Python | gpl-3.0 | 17,450 |
#lib/ontologies/com/usefulinc/ns | h4ck3rm1k3/gcc-ontology | lib/ontologies/com/usefulinc/ns/__init__.py | Python | gpl-3.0 | 32 |
# -*- coding: UTF-8 -*-
#auther mengskysama
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
import lcs
from urllib import quote
from urllib import unquote
from tornado import gen
import ttlrcdump
listen_port = 38439
def ChooiseItem(xml, artist):
#print '==============================================='
#print xml.decode('utf-8').encode('gbk')
n = xml.find('<?xml')
if n == -1:
return False
artist = ttlrcdump.FilterSearchStr(artist)
#remove item if artist != artist
n = 0
pos = 0
t = xml.count('id=')
for n in range(0, t):
begin = xml.find('artist="', pos)
end = xml.find('" title', begin)
_artist = ttlrcdump.FilterSearchStr(xml[begin+8:end])
pos = end
n += 1
arith = lcs.arithmetic()
samelen = len(arith.lcs(_artist,artist))
#print samelen
if samelen < 5 and samelen < len(artist)/3 :
begin = xml.rfind('<lrc',0 ,pos)
end = xml.find('lrc>', pos)
xml = xml[:begin] + xml[end + 4:]
pos = begin
n -= 1
t -= 1
#print xml.decode('utf-8').encode('gbk')
#print '==============================================='
n = xml.find('id=')
if n == -1:
return False
#remove item if artist != artist
n = 0
begin = xml.find('artist="', n)
end = xml.find('" title', n)
n = end
_artist = ttlrcdump.FilterSearchStr(xml[begin+10:end])
strs = ('动新','動新','动基','对照','對照','中日','中英','修正','假名')
for _str in strs:
n = xml.find(_str)
if n != -1:
break
if n == -1:
n = xml.find('<lrc')
else:
n = xml.rfind('<lrc', 0, n)
if n > -1:
begin = xml.find('id="', n) + 4
end = xml.find('"', begin)
#print xml[begin:end]
id = xml[begin:end]
begin = xml.find('artist="', n) + 8
end = xml.find('"', begin )
#print quote(xml[begin:end])
artist = xml[begin:end].replace('&','&').replace(''',"'").replace('"','"').replace('<','<').replace('>','>')
begin = xml.find('title="', n) + 7
end = xml.find('"', begin)
#print quote(xml[begin + 7:end])
title = xml[begin:end].replace('&','&').replace(''',"'").replace('"','"').replace('<','<').replace('>','>')
#ret = "id=%s&artist=%s&title=%s" % (id, quote(artist), quote(title))
#print ret
data = {'id':id, 'artist':artist, 'title':title}
return data
return False
def get_arg(req, arg):
begin = req.find('%s=' % arg)
if begin != -1:
begin += len(arg) + 1
end = req.find('&', begin)
if end != -1:
return req[begin:end]
else:
return req[begin:]
@gen.coroutine
def handle_request(request):
if request.uri.startswith('/lrc'):
try:
id = get_arg(request.uri, 'id')
artist = unquote(get_arg(request.uri, 'artist'))
title = unquote(get_arg(request.uri, 'title'))
ttservernum = int(get_arg(request.uri, 'ttservernum'))
#print id.decode('utf-8').encode('gbk')
#print artist.decode('utf-8').encode('gbk')
#print title.decode('utf-8').encode('gbk')
print str(ttservernum)
http_client = tornado.httpclient.AsyncHTTPClient()
#print ttlrcdump.GetDownloadLrcReq(id, artist, title)
req = tornado.httpclient.HTTPRequest(ttlrcdump.GetDownloadLrcReq(ttservernum, id, artist, title))
res = yield http_client.fetch(req)
lrc = res.body.replace('>', '】')
lrc = lrc.replace('<', '【')
lrc = lrc.replace('\r\n', '<br />')
lrc = lrc.replace('\n', '<br />')
lrc = lrc.replace('\r', '<br />')
context = '<script type="text/javascript" src="/templates/ddjs/lrc_content_inner_1.js"></script></div>%s</li>'
context = context.replace('%s',lrc, 1)
#print context
request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context))
except tornado.httpclient.HTTPError, code:
print 'HTTPError except Code' + str(code)
except Exception,e:
print e
finally:
request.finish()
elif (request.uri.find('/?keyword=') != -1):
uri = request.uri.decode('gbk').replace('%20',' ')
if uri.find('&') != -1:
keyword = uri[10:uri.find('&')]
else:keyword = uri[10:]
#print repr(keyword)
keyword = keyword.encode('gbk')
#print repr(keyword)
keyword = keyword.decode('utf-8')
#print repr(keyword)
keyword = eval(repr(keyword)[1:])
#print repr(keyword)
keyword = keyword.decode('gbk').encode('utf-8')
#print keyword.decode('utf-8').encode('gbk')
#print repr(keyword)
try:
if keyword.count(' ') == 0:
keyword += ' '
n = 0
ttservernum = 0
cnt = keyword.count(' ')
for i in range(0, cnt):
#try to prase art and title
n = keyword.find(' ', n) + 1
artist = keyword[0:n-1]
title = keyword[n:]
#print 'title %s' % title
if title.startswith( '(') and i < cnt - 1:
#歌名一般不可能以括号开头
continue
#print 'guess art=%s' % artist.decode('utf-8').encode('gbk')
#print 'guess tit=%s' % title.decode('utf-8').encode('gbk')
trycnt = 0
if artist.find('and') == -1 and title.find('and') == -1:
trycnt = 1
while True:
reqartist = ''
reqtitle = ''
if trycnt == 0:
reqartist = artist.replace('and', '')
reqtitle = title.replace('and', '')
elif trycnt == 1:
reqartist = artist
reqtitle = title
http_client = tornado.httpclient.AsyncHTTPClient()
#print ttlrcdump.GetSearchLrcReq(ttservernum, artist, title)
ttservernum = ttlrcdump.GetServerNum()
req = tornado.httpclient.HTTPRequest(ttlrcdump.GetSearchLrcReq(ttservernum, reqartist, reqtitle))
res = yield http_client.fetch(req)
ret = ChooiseItem(res.body, artist)
if ret != False or trycnt > 0:
break
trycnt += 1
if ret != False:
break
if ret != False:
context = '<div class="newscont mb15" style="line-height:160%;margin-top:10px">' \
'歌手:<a class="mr">%s</a><br>' \
'专辑:<a class="mr"></a>' \
'歌曲:<a class="mr ">%s<span class="highlighter">a</span></a><br>' \
'查看:<a class="mr"href="%s" target="_blank">LRC' \
'<div style="clear:both;"></div>' \
'<div class="page wid f14">'
context = context.replace('%s', artist, 1)
uni_title = title.decode('utf-8')
strrep = ''
for i in range(0, len(uni_title)):
strrep += '<span class="highlighter">%s</span>' % uni_title[i].encode('utf-8')
context = context.replace('%s', strrep, 1)
context = context.replace('%s', "/lrc/?id=%s&artist=%s&title=%s&ttservernum=%s" % (str(ret['id']), quote(str(ret['artist'])), quote(str(ret['title'])), str(ttservernum)))
#print context.decode('utf-8').encode('gbk')
else:
context = 'Lrc Not Found'
request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context))
except tornado.httpclient.HTTPError, code:
print 'HTTPError except Code' + str(code)
except Exception,e:
print e
finally:
request.finish()
else:
#print 'Unknow Request:%s' % request.uri
context = '<head><meta http-equiv="refresh" content="0;url=http://foottlrc.mengsky.net/"></head>'
request.write('HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s' % (len(context), context))
request.finish()
def main():
http_server = tornado.httpserver.HTTPServer(handle_request)
http_server.listen(listen_port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main() | mengskysama/FoobarTTLyric | lrcserv.py | Python | gpl-3.0 | 8,866 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from edu.umd.rhsmith.diads.tools.sentiment import ISentimentAnalyzer
import pickle
import re
import os
import sys
import time
import traceback
import nltk
from nltk.corpus import stopwords
class SentimentAnalyzerP(ISentimentAnalyzer, object):
''' Sentiment Analyzer Utility '''
def __init__(self, classifierFilename, featuresFilename):
##### CODE FOR FEATURE EXTRACTION FROM TWEET TEXT
self.punc_reducer = re.compile(r'(\W)\1+')
self.repeat_reducer = re.compile(r'(\w)\1{2,}')
self.punc_breaker_1 = re.compile(r'(\w{2,})(\W\s)')
self.punc_breaker_2 = re.compile(r'(\s\W)(\w{2,})')
self.punc_breaker_3 = re.compile(r'(\w{3,})(\W{2}\s)')
self.punc_breaker_4 = re.compile(r'(\s\W{2})(\w{3,})')
self.quote_replacer = re.compile(r'"')
self.amp_replacer = re.compile(r'&')
self.gt_replacer = re.compile(r'>')
self.lt_replacer = re.compile(r'<')
self.mention_replacer = re.compile(r'@\w+')
self.link_replacer = re.compile(r'\b(([\w-]+://?|www[.])[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|/)))')
#self.link_replacer = re.compile(r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))')
self.caps_finder = re.compile(r'(\b[A-Z]{4,})\b')
self.lol_reducer = re.compile(r'\b[aeo]*h[aeo]+(h+[aeo]*)*\b|\bl(o+l+)+s*z*(e?d)?\b|\brofls*z*(e?d)?\b|\blu+l+s*z*(e?d)?\b|\blmf+a+o+\b')
nltk.data.path.append("%s/py/nltk_data" % os.getcwd())
self.stopwords_dict = [(x, True) for x in stopwords.words()]
##### IMPORT THE SENTIMENT CLASSIFIER #####
try:
print "Trying to import sentiment classifier; could take a couple minutes..."
sys.stdout.flush()
f = open(classifierFilename, 'r')
self.classifier = pickle.load(f)
f.close()
f = open(featuresFilename, 'r')
self.masterfeats = pickle.load(f)
f.close()
print "Sentiment classifier import succeeded!"
sys.stdout.flush()
except Exception:
print "Sentiment classifier import failed!"
print traceback.format_exc()
sys.exit(1)
def featurify(self, text, master = None):
ext_tokens = []
# replace """ with a double quote
text = self.quote_replacer.sub(r'"', text)
text = self.amp_replacer.sub(r'&', text)
text = self.gt_replacer.sub(r'>', text)
text = self.lt_replacer.sub(r'<', text)
#print text
# replace mentions with a dummy string
(text, num) = self.mention_replacer.subn(r'', text)
if num > 0:
ext_tokens.append("<MENTION>")
# replace links with a dummy string
(text, num) = self.link_replacer.subn(r'', text)
if num > 0:
ext_tokens.append("<LINK>")
# find words in all caps and add a dummy string to note that
(text, num) = self.caps_finder.subn(r'\1', text)
if num > 0:
ext_tokens.append("<CAPS>")
# find laughter and replace with a dummy string
(text, num) = self.lol_reducer.subn(r'', text)
if num > 0:
ext_tokens.append("<LULZ>")
# lowercase everything
text = text.lower()
# isolates and reduces long spans of repeated punctuation to a single item (like "...." / " !!!! " / "????")
text = self.punc_reducer.sub(r' \1 ', text)
# shorten long spans of repeated word chars to three ("soooooooo" ==> "sooo")
text = self.repeat_reducer.sub(r'\1\1\1', text)
# break single-character punctuation off of words of size 2 or more (quotes, exclaims, periods)
text = self.punc_breaker_1.sub(r' \1 \2 ', text)
text = self.punc_breaker_2.sub(r' \1 \2 ', text)
# break double-character punctuation off of words of size 3 or more (quote-period, question-exclaim)
text = self.punc_breaker_3.sub(r' \1 \2 ', text)
text = self.punc_breaker_4.sub(r' \1 \2 ', text)
# split on all whitespace
tokens = re.split(r'\s+', text)
# remove stopwords and blanks
tokens = [x for x in tokens if len(x) > 0 and x not in self.stopwords_dict]
# add in manual extra tokens
tokens += ext_tokens
#print tokens
#print
if master == None:
feats = dict([(word, True) for word in tokens])
else:
feats = dict([(word, True) for word in tokens if word in master])
# make the feature data structure
return feats
def process(self, text):
try:
# hack to skip statuses that have weird non-unicode text in them;
# these can cause problems down the line for the regexes in featurify()
try:
unicode(text, "ascii", "strict")
except UnicodeDecodeError:
#print "Unicode error on status %i; stripping." % row['id']
#sys.stdout.flush()
try:
text = unicode(text, "utf-8").encode("ascii", "ignore")
except UnicodeDecodeError:
print "Unicode error on status; skipping."
sys.stdout.flush()
# featurify the text, using only the features in the master list
statfeat = {}
#try:
# with time_limit(10):
statfeat = self.featurify(text, self.masterfeats)
#except TimeoutException:
# print "Featurify timed out for status_id %i" % row['id']
if len(statfeat) > 0:
result = self.classifier.prob_classify(statfeat)
probs = dict([(x, result.prob(x)) for x in result.samples()])
# calculate a score in [-1, +1]
score = probs['pos'] * 2.0 - 1.0
else:
# skip classification b/c there are no features!
score = 0.0
return score
except Exception:
print "Problem processing queries:"
print traceback.format_exc()
sys.stdout.flush()
| rmachedo/MEater | py/SentimentAnalyzerP.py | Python | gpl-3.0 | 5,719 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from cron_status import *
class TestChangeDetection(unittest.TestCase):
"""Test if the change detection is operational."""
# Please note that status_history_list is backwards,
# i.e., newest entry first.
def test_all_okay(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
self.assertEqual(status[0].current_status, ContainerStatus.OKAY)
self.assertTrue(status[0].container_name in status_history_list[0])
self.assertEqual(status[0].current_msg, status_history_list[0][status[0].container_name][1])
def test_all_failed(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(changed, status[0].changed) # because there is only 1 container
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
self.assertEqual(status[0].current_status, ContainerStatus.FAILED)
def test_failed_after_starting_short(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_failed_after_starting_very_long(self):
status_history_list = [{'foo': (ContainerStatus.FAILED, 'no msg')}]
status_history_list += [
{'foo': (ContainerStatus.STARTING, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_okay_after_failed(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
def test_failed_after_okay(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
]
status_history_list += [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * STATUS_HISTORY_LENGTH
changed, status = detect_flapping_and_changes(status_history_list)
self.assertTrue(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_missing_data(self):
status_history_list = [
{'foo': (ContainerStatus.FAILED, 'no msg')}
] * (STATUS_HISTORY_LENGTH - 1)
status_history_list += [{'foo': (ContainerStatus.OKAY, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.FAILED)
def test_too_much_data(self):
status_history_list = [
{'foo': (ContainerStatus.OKAY, 'no msg')}
] * (STATUS_HISTORY_LENGTH + 1)
status_history_list += [{'foo': (ContainerStatus.FAILED, 'no msg')}]
changed, status = detect_flapping_and_changes(status_history_list)
self.assertFalse(changed)
self.assertEqual(status[0].overall_status, ContainerStatus.OKAY)
if __name__ == '__main__':
unittest.main()
| fau-fablab/kastenwesen | test_cron.py | Python | gpl-3.0 | 4,191 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-03-11 22:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Class_year',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('class_year_text', models.CharField(default='Class Year', editable=False, max_length=10)),
],
),
migrations.CreateModel(
name='Name',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_text', models.CharField(default='Name', editable=False, max_length=4)),
],
),
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('resume_text', models.CharField(default='Resume', editable=False, max_length=6)),
],
),
migrations.CreateModel(
name='Wpi_email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('wpi_email_text', models.CharField(default='WPI Email', editable=False, max_length=9)),
],
),
migrations.AddField(
model_name='resume',
name='identifier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'),
),
migrations.AddField(
model_name='name',
name='identifier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'),
),
migrations.AddField(
model_name='class_year',
name='identifier',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='resumedrop.Wpi_email'),
),
]
| himanshusahay/resume-drop | mysite/resumedrop/migrations/0001_initial.py | Python | gpl-3.0 | 2,237 |
#!/usr/bin/env python
import os
import sys
def usage():
print "{0} <feed>".format(os.path.basename(__file__))
if __name__ == '__main__':
kmotion_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(kmotion_dir)
from core.camera_lost import CameraLost
feed = ''
if len(sys.argv) > 1:
feed = sys.argv[1]
cam_lost = CameraLost(kmotion_dir, feed)
if cam_lost.reboot_camera():
sys.exit()
else:
usage()
sys.exit(1)
| dvor85/kmotion | bin/reboot_cam.py | Python | gpl-3.0 | 523 |
# Do not edit this file, pipeline versioning is governed by git tags
__version__="1.0.1" | bennahugo/RFIMasker | RFIMasker/version.py | Python | gpl-3.0 | 88 |
"""
Root finding methods
====================
Routines in this module:
bisection(f, a, b, eps=1e-5)
newton1(f, df, eps=1e-5)
newtonn(f, J, x0, eps=1e-5)
secant(f, x0, x1, eps=1e-5)
inv_cuadratic_interp(f, a, b, c, eps=1e-5)
lin_fracc_interp(f, a, b, c, eps=1e-5)
broyden(f, x0, B0, eps=1e-5)
"""
import numpy as np
'''
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Copyright (C) 4/24/17 Carlos Brito
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
'''
__all__ = ['bisection', 'newton1', 'secant', 'newtonn',
'inv_cuadratic_interp', 'lin_fracc_interp',
'brent']
def bisection(f, a, b, eps=1e-5, display=False):
"""
Find root of f.
This function computes a root of the function f using the bisection method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
Lower bound.
b : float
High bound.
eps : float
Tolerance.
Returns
-------
m : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
if a > b:
a, b = b, a
while((b - a) > eps):
m = a + np.float32(b - a) / 2.
if (np.sign(f(a)) == np.sign(f(m))):
a = m
else:
b = m
if display:
print 'iteration ', iterations
print 'm: ', m
iterations += 1
return m, iterations
def newton1(f, df, x0, eps=1e-5, display=False):
"""
Find root of f.
This method computes the root of f using Newton's method.
Parameters
----------
f : function
Function we want to find the root of.
df : function
Derivative of f.
x0 : float
This is the starting point for the method.
eps : float
Tolerance.
Returns
-------
root : float
Root of f.
"""
iterations = 0
x_old = np.float(x0)
x_new = x_old
while(True):
try:
x_old = x_new
x_new = x_old - f(x_old) / df(x_old)
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(x_old - x_new) <= eps):
break
except(ZeroDivisionError):
return np.nan
root = x_new
return root, iterations
def secant(f, x0, x1, eps=1e-5, display=False):
"""
Parameters
----------
f : function
Function we want to find the root of.
x0 : float
First initial value "close" to the root of f.
x1: float
Second initial value "close" to the root of f.
eps : float
Tolerance.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
x_old_0 = x0
x_old_1 = x1
x_new = x0 - f(x0) * (x1 - x0) / (f(x1) - f(x0))
while True:
x_old_0 = x_old_1
x_old_1 = x_new
x_new = x_old_1 - f(x_old_1) * \
((x_old_1 - x_old_0) / (f(x_old_1) - f(x_old_0)))
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(x_old_1 - x_new) < eps):
break
root = x_new
return root, iterations
def inv_cuadratic_interp(f, a, b, c, eps=1e-5, display=False):
"""
Find root of f.
This method finds the root of f using the inverse cuadratic
interpolation method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
First initial value.
b : float
Second initial value.
c : float
Third initial value.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
while True:
u = f(b) / f(c)
v = f(b) / f(a)
w = f(a) / f(c)
p = v * (w * (u - w) * (c - b) - (1 - u) * (b - a))
q = (w - 1) * (u - 1) * (v - 1)
x_new = b + p / q
a = b
b = c
c = x_new
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(f(x_new)) < eps):
break
root = x_new
return root, iterations
def lin_fracc_interp(f, a, b, c, eps=1e-5, display=False):
"""
Find root of f.
This method finds the root of f using the linear fractional
interpolation method.
Parameters
----------
f : function
Function we want to find the root of.
a : float
First initial value.
b : float
Second initial value.
c : float
Third initial value.
Returns
-------
root : float
Root of f.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
while True:
numerator = (a - c) * (b - c) * (f(a) - f(b)) * f(c)
denominator = (a - c) * (f(c) - f(b)) * f(a) - \
(b - c) * (f(c) - f(a)) * f(b)
h = numerator / denominator
x_new = c + h
a = b
b = c
c = x_new
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
if(abs(f(x_new)) < eps):
break
root = x_new
return root, iterations
def broyden(f, x0, B0, eps=1e-5, display=False):
"""
Finds roots for functions of k-variables.
This function utilizes Broyden's method to find roots in a
k-dimensional function f utilizing the initial Jacobian B0
at x0.
Parameters
----------
f : function which takes an array_like matrix and
returns an array_like matrix
Function we want to find the root of.
x0 : array_like
Initial point.
B0 : array_like
Jacobian of function at x0.
eps : float
Error tolerance.
Returns
-------
root : array_like
Root of function.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
x_new = x0
B_new = B0
while True:
x_old = x_new
B_old = B_new
s = np.dot(np.linalg.inv(B_old), -f(x_old).T) # solve for s
x_new = x_old + s
y = f(x_new) - f(x_old)
B_new = B_old + (np.dot((y - np.dot(B_old, s)), s.T)
) / (np.dot(s.T, s))
if display:
print 'iteration ', iterations
print 'x:', x_new
print 'B', B_new
iterations += 1
# convergence check
if(np.all(np.abs(x_old - x_new) <= eps)):
break
root = x_new
return root, iterations
def newtonn(f, J, x0, eps=1e-5, display=False):
"""
Finds roots for functions of k-variables.
This function utilizes Newton's method for root finding
to find roots in a k-dimensional function. To do this,
it takes the Jacobian of the function and an initial
point.
Parameters
----------
f : function which takes an array_like matrix and
returns an array_like matrix
J : function returning an array_like matrix
Jacobian of function.
x0 : array_like
Initial point.
eps : float
Error tolerance.
Returns
-------
root : array_like
Root of function.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
x_new = x0
try:
while True:
x_old = x_new
x_new = x_old - np.dot(np.linalg.inv(J(x_old)), f(x_old))
if display:
print 'iteration ', iterations
print 'x: ', x_new
iterations += 1
# convergence check
if(np.all(np.abs(x_old - x_new) <= eps)):
break
except np.linalg.LinAlgError:
print 'Error during iteration. Matrix is probably singular'
return None
root = x_new
return root, iterations
def brent(f, a, b, eps=1e-5, display=False):
"""
Finds root of a one dimensional function.
This function utilizes Brent's method for root finding
to find roots in a one dimensional function. To do this,
it needs a function and an interval which contains the
root.
Parameters
----------
f : function
Function we want to find the root of.
a : float
Low bound of interval
b : float
High bound of interval
eps : float
Tolerance.
Returns
-------
root : float
Root of function.
iterations : int
Number of iterations taken to find root.
"""
iterations = 0
mflag = False
d = 0.
if f(a) * f(b) >= 0:
raise ValueError('root is not bracketed')
if(abs(f(a)) < abs(f(b))):
a, b = b, a # swap vlaues
c = a
mflag = True
while (True):
if f(a) != f(c) and f(b) != f(c):
# inverse quadratic interpolation
s = (a * f(b) * f(c)) / ((f(a) - f(b)) * (f(a) - f(c))) + \
(b * f(a) * f(c)) / ((f(b) - f(a)) * (f(b) - f(c))) + \
(c * f(a) * f(b)) / ((f(c) - f(a)) * (f(c) - f(b)))
else:
# secant method
s = b - f(b) * (b - a) / (f(b) - f(a))
tmp1 = (3. * a + b) / 4.
tmp2 = b
if tmp1 > tmp2:
tmp1, tmp2 = tmp2, tmp1
if not (tmp1 < s < tmp2) or \
mflag and (abs(s - b)) >= (abs(c - d) / 2.) or \
not mflag and (abs(s - b)) >= (abs(c - d) / 2.) or \
mflag and (abs(b - c)) < abs(eps) or \
not mflag and (abs(c - d)) < abs(eps):
# bisection method
s = (a + b) / 2.
mflag = True
else:
mflag = False
d = c
c = b
if f(a) * f(s) < 0:
b = s
else:
a = s
if abs(f(a)) < abs(f(b)):
a, b = b, a
if display:
print 'iteration: ', iterations
print 'x: ', s
iterations += 1
# convergence check
if f(b) == 0 or f(s) == 0 or (abs(b - a) < eps):
break
root = s
return root, iterations
| carlosb/scicomp | scicomp/rootfind/rootfindpack.py | Python | gpl-3.0 | 11,013 |
#!/usr/bin/env python
'''
Copyright (C) 2014 Janina Mass
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
'''
import sys
import getopt
import subprocess
import threading
import os
import shutil
import matplotlib
#don't use X:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy
from distutils import spawn
#########################
# last update:
# Fr 16 Mai 2014 14:25:46 CEST
# [JMass]
#########################
global GAP
GAP = "-"
class Alignment(object):
def __init__(self, id=None, fasta = None, members = []):
self.id = id
self.fasta = fasta
self.members = []
self.gapPos = []
self.mismatchPos = []
self.matchPos = []
self.matchGapPos = []
self.attachSequences()
self.calcNumbers()
def __repr__(self):
ids = self.members
return("Alignment:{},{}".format(self.id, ids))
def __len__(self):
try:
return(len(self.members[0].sequence))
except TypeError as e:
sys.stderr.write(e)
sys.stderr.write("attachSequences first")
return(0)
def getStats(self):
res = ""
res+="{},{},{},{},{},{}".format(len(self.matchPos),len(self.matchGapPos),len(self.mismatchPos),len(self)-len(self.gapPos),len(self.gapPos),len(self) )
return(res)
def attachSequences(self):
fp = FastaParser()
print("FASTA:", self.fasta)
for f in fp.read_fasta(self.fasta):
newSeq = Sequence(id = f[0], sequence = f[1])
self.members.append(newSeq)
def calcNumbers(self):
for i in range(0, len(self)):
curpos = [m.sequence[i] for m in self.members]
if GAP in curpos:
#dynamic penalty:
tmp = "".join(curpos)
gappyness = tmp.count(GAP)/float(len(self.members))
half = len(self.members)/2.0
if gappyness > half:
toPunish = [m for m in self.members if m.sequence[i]!=GAP]
for t in toPunish:
t._dynamicPenalty+=gappyness
elif gappyness < half:
#punish gappers
toPunish = [m for m in self.members if m.sequence[i]==GAP]
for t in toPunish:
t._dynamicPenalty+=1-gappyness
else:
pass
#/dyn penalty
self.gapPos.append(i)
#sequences that cause gaps:
gappers = [m for m in self.members if m.sequence[i] == GAP]
for m in gappers:
m.gapsCaused.append(i)
#unique gaps caused:
if len(gappers) == 1:
m.uniqueGapsCaused.append(i)
#insertions
inserters = [m for m in self.members if m.sequence[i] != GAP]
for m in inserters:
m.insertionsCaused.append(i)
#unique insertions caused:
if len(inserters) == 1:
m.uniqueInsertionsCaused.append(i)
nongap = [c for c in curpos if c != GAP]
cpset = set(curpos)
if (len(cpset) >1 and GAP not in cpset):
self.mismatchPos.append(i)
for m in self.members:
m.mismatchShared.append(i)
elif (len(cpset) == 1 and GAP not in cpset):
self.matchPos.append(i)
for m in self.members:
m.matchShared.append(i)
elif (len(cpset)==2 and GAP in cpset and len(nongap)>2):
self.matchGapPos.append(i)
def showAlignment(self, numbers = False):
res = []
mmPos = []
alignmentLength = len(self.members[0].sequence)
for i in range(0, alignmentLength):
curpos = [m.sequence[i] for m in self.members]
if numbers:
res.append(str(i)+" "+" ".join(curpos))
else:
res.append(" ".join(curpos))
return("\n".join(res))
class Sequence():
def __init__(self, id = "", sequence = None, isForeground = False):
self.id = id
self.sequence = sequence
self.isForeground = isForeground
self.insertionsCaused = [] #positions
self.uniqueInsertionsCaused = []
self.gapsCaused = []#positions
self.uniqueGapsCaused = []
self.matchShared = []
self.mismatchShared = []
self._penalty = None
# penalize by site:
# > n/2 gaps (@site): penalyze inserts by gaps/n
# < n/2 gaps (@site): penalyze gaps by inserts/n
self._dynamicPenalty = 0
def setForeground(self, bool = True):
self.isForeground = bool
def __repr__(self):
return("Sequence: {}".format(self.id))
@property
def penalty(self, uniqueGapPenalty=10, uniqueInsertPenalty=10, gapPenalty = 1, insertPenalty =1 ):
self.penalty =sum([ len(self.insertionsCaused)*insertPenalty, len(self.uniqueInsertionsCaused)*uniqueGapPenalty, len(self.gapsCaused)*gapPenalty, len(self.uniqueGapsCaused)*uniqueGapPenalty])
return(self.penalty)
def summary(self):
s = ""
s+=self.id
s+="insertionsCaused:{},uniqueInsertionsCaused:{}, gapsCaused:{}, uniqueGapsCaused:{}, penalty:{}, dynPenalty:{}".format(len(self.insertionsCaused), len(self.uniqueInsertionsCaused), len(self.gapsCaused), len(self.uniqueGapsCaused), self.penalty, self._dynamicPenalty)
return(s)
def getCustomPenalty(self,gapPenalty, uniqueGapPenalty, insertionPenalty , uniqueInsertionPenalty, mismatchPenalty, matchReward):
res = (len(self.gapsCaused)-len(self.uniqueGapsCaused))* gapPenalty\
+ len(self.uniqueGapsCaused)*uniqueGapPenalty\
+ (len(self.insertionsCaused)-len(self.uniqueInsertionsCaused)) * insertionPenalty\
+ len(self.uniqueInsertionsCaused) * uniqueInsertionPenalty\
+ len(self.mismatchShared)* mismatchPenalty\
+ len(self.matchShared) *matchReward
return(res)
class FastaParser(object):
def read_fasta(self, fasta, delim = None, asID = 0):
"""read from fasta fasta file 'fasta'
and split sequence id at 'delim' (if set)\n
example:\n
>idpart1|idpart2\n
ATGTGA\n
and 'delim="|"' returns ("idpart1", "ATGTGA")
"""
name = ""
fasta = open(fasta, "r")
while True:
line = name or fasta.readline()
if not line:
break
seq = []
while True:
name = fasta.readline()
name = name.rstrip()
if not name or name.startswith(">"):
break
else:
seq.append(name)
joinedSeq = "".join(seq)
line = line[1:]
if delim:
line = line.split(delim)[asID]
yield (line.rstrip(), joinedSeq.rstrip())
fasta.close()
###########################################
#TODO documentation
def usage():
print ("""
######################################
# pysickle.py v0.1.1
######################################
usage:
pysickle.py -f multifasta alignment
options:
-f, --fasta=FILE multifasta alignment (eg. "align.fas")
OR
-F, --fasta_dir=DIR directory with multifasta files (needs -s SUFFIX)
-s, --suffix=SUFFIX will try to work with files that end with SUFFIX (eg ".fas")
-a, --msa_tool=STR supported: "mafft" [default:"mafft"]
-i, --max_iterations=NUM force stop after NUM iterations
-n, --num_threads=NUM max number of threads to be executed in parallel [default: 1]
-m, --mode=MODE set strategy to remove outlier sequences [default: "Sites"]
available modes (not case sensitive):
"Sites", "Gaps", "uGaps","Insertions",
"uInsertions","uInstertionsGaps", "custom"
-l, --log write logfile
-h, --help prints this
only for mode "custom":
-g, --gap_penalty=NUM set gap penalty [default: 1.0]
-G, --unique_gap_penalty=NUM set unique gap penalty [default: 10.0]
-j, --insertion_penalty=NUM set insertion penalty [default:1.0]
-J, --unique_insertion_penalty=NUM set insertion penalty [default:1.0]
-M, --mismatch_penalty=NUM set mismatch penalty [default:1.0]
-r, --match_reward=NUM set match reward [default: -10.0]
""")
sys.exit(2)
############################################
def checkPath(progname):
#TODO extend
avail = ["mafft"]
if progname.lower() not in avail:
raise Exception("Program not supported. Only {} allowed.".format(",".join(avail)))
else:
path = spawn.find_executable(progname)
print("Found {} in {}\n".format(progname, path))
if not path:
raise Exception("Could not find {} on your system! Exiting. Available options:{}\n".format(progname, ",".join(avail)))
sys.exit(127)
def checkMode(mode):
avail = ["sites", "gaps", "ugaps","insertions", "uinsertions", "uinsertionsgaps", "custom"]
if mode not in avail:
raise Exception("Mode {} not available. Only {} allowed\n".format(mode, ",".join(avail)))
class TooFewSequencesException(Exception):
pass
def adjustDir(dirname, mode):
if mode == "unisertionsgaps":
abbr = "uig"
else:
abbr = mode[0:2]
return(dirname+"_"+abbr)
def getSeqToKeep(alignment, mode, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty , mismatch_penalty, match_reward):
if mode == "sites":
toKeep = removeDynamicPenalty(alignment)
elif mode == "gaps":
toKeep = removeCustomPenalty(alignment, gapPenalty=1, uniqueGapPenalty=1, insertionPenalty =0, uniqueInsertionPenalty=0, mismatchPenalty=0, matchReward = 0)
if not toKeep:
removeDynamicPenalty(alignment)
elif mode == "ugaps":
toKeep = removeMaxUniqueGappers(alignment)
if not toKeep:
toKeep = removeDynamicPenalty(alignment)
return(toKeep)
elif mode == "insertions":
toKeep = removeCustomPenalty(alignment, gapPenalty=0, uniqueGapPenalty=0, insertionPenalty =1, uniqueInsertionPenalty=1, mismatchPenalty=0, matchReward = 0)
if not toKeep:
removeDynamicPenalty(alignment)
elif mode == "uinsertions":
toKeep = removeMaxUniqueInserters(alignment)
if not toKeep:
removeDynamicPenalty(alignment)
elif mode == "uinsertionsgaps":
toKeep = removeMaxUniqueInsertsPlusGaps(alignment)
if not toKeep:
removeDynamicPenalty(alignment)
elif mode == "custom":
toKeep = removeCustomPenalty(alignment, gapPenalty=gap_penalty, uniqueGapPenalty=unique_gap_penalty, insertionPenalty =insertion_penalty, uniqueInsertionPenalty=unique_insertion_penalty, mismatchPenalty=mismatch_penalty, matchReward = match_reward)
if not toKeep:
removeDynamicPenalty(alignment)
else:
raise Exception("Sorry, sth went wrong at getSeqToKeep\n")
return(toKeep)
def schoenify(fasta=None, max_iter=None, finaldir = None, tmpdir = None, msa_tool = None,mode = None, logging = None, gap_penalty= None, unique_gap_penalty = None, insertion_penalty = None, unique_insertion_penalty = None, mismatch_penalty = None, match_reward = None ):
if not fasta:
raise Exception("Schoenify: Need alignment in fasta format.")
else:
arr = numpy.empty([1, 8], dtype='int32')
iteration = 0
fastabase = os.path.basename(fasta)
statsout = finaldir+os.sep+fastabase+".info"
tabout = finaldir + os.sep+fastabase+".csv"
resout = finaldir +os.sep+fastabase+".res"
if logging:
info = open(statsout,"w")
iterTab = []
headerTab = ["matches", "matchesWithGaps","mismatches"," nogap", "gaps","length","iteration","numSeq"]
alignmentstats = []
newAlignment = Alignment(fasta = fasta)
#sanity check
if len(newAlignment.members) < 3:
raise TooFewSequencesException("Need more than 2 sequences in alignment: {}\n".format(newAlignment.fasta))
if not max_iter or (max_iter > len(newAlignment.members)-2):
max_iter = len(newAlignment.members)-2
print("#max iterations:{}".format(str(max_iter)))
while (iteration < max_iter):
toKeep = getSeqToKeep(alignment = newAlignment, mode = mode, gap_penalty = gap_penalty, unique_gap_penalty = unique_gap_penalty, insertion_penalty=insertion_penalty, unique_insertion_penalty = unique_insertion_penalty, mismatch_penalty=mismatch_penalty, match_reward=match_reward)
print("# iteration: {}/{} \n".format(iteration, max_iter))
if len(toKeep) <2 :
break
res= ""
for k in toKeep:
seq ="".join([s for s in k.sequence if s !=GAP])
res+=(">{}\n{}\n".format(k.id,seq))
iterfile = tmpdir+os.sep+".".join(fastabase.split(".")[0:-1])+"."+str(iteration)
with open(iterfile+".tmp",'w') as out:
out.write(res)
#log
if logging:
for m in newAlignment.members:
info.write(m.summary()+"\n")
#log
alignmentstats.append(newAlignment.getStats().split(","))
iterTab.append((",".join(x for y in alignmentstats for x in y))+","+ str(iteration)+","+str(len(newAlignment.members)))
alignmentstats = []
iteration +=1
if msa_tool == "mafft":
proc = subprocess.Popen(["mafft","--auto", iterfile+".tmp"], stdout=open(iterfile+".out",'w'), bufsize=1)
proc.communicate()
newAlignment = Alignment(id = iterfile, fasta=iterfile+".out")
#TODO extend
if logging:
info.close()
with open(tabout, 'w') as taboutf:
taboutf.write(",".join(headerTab))
taboutf.write("\n")
taboutf.write("\n".join(iterTab ))
for i in iterTab:
row = [int(j) for j in i.split(",")]
arr = numpy.vstack((arr,numpy.array(row)))
#delete row filled with zeros
arr = numpy.delete(arr,0,0)
###########
LOCK.acquire()
plt.figure(1)
plt.suptitle(fastabase, fontsize=12)
ax = plt.subplot(3,1,1)
for i,l in zip([0,1,2,3,4,5,6,7],['match','matchWithGap','mismatch','nogap','gap','length','iteration','numSeq' ]):
if not i in [6,7]:
plt.plot(arr[:,6], arr[:,i], label=l)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles, labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax= plt.subplot(3,1,2)
plt.plot(arr[:,6], arr[:,7])
ax.set_ylabel('count')
ax.legend(["numSeq"],bbox_to_anchor=(1.05, 0.3), loc=2, borderaxespad=0.)
ax= plt.subplot(3,1,3)
scoring =(arr[:,5]-arr[:,4])*arr[:,7]
try:
maxIndex = scoring.argmax()
with open(resout,'w')as resouth:
resouth.write("# Ranking: {}\n".format(scoring[:].argsort()[::-1]))
resouth.write("# Best set: {}".format(str(maxIndex)))
plt.plot(arr[:,6],scoring)
ax.legend(["(length-gaps)*numSeq"],bbox_to_anchor=(1.05, 0.3), loc=2, borderaxespad=0.)
ax.set_xlabel('iteration')
plt.savefig(finaldir+os.sep+fastabase+'.fun.png', bbox_inches='tight')
plt.clf()
finalfa = tmpdir+os.sep+".".join(fastabase.split(".")[0:-1])+"."+str(maxIndex)+".tmp"
finalfabase = os.path.basename(finalfa)
shutil.copy(finalfa,finaldir+os.sep+finalfabase)
except ValueError as e:
sys.stderr.write(str(e))
finally:
LOCK.release()
def removeMaxUniqueGappers(alignment):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
s = alignment.showAlignment(numbers = True)
mxUniqueGaps = max([len(k.uniqueGapsCaused) for k in alignment.members])
keepers = [k for k in alignment.members if len(k.uniqueGapsCaused) < mxUniqueGaps]
return(keepers)
def removeMaxUniqueInserters(alignment):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
s = alignment.showAlignment(numbers = True)
mxUniqueIns = max([len(k.uniqueInsertionsCaused) for k in alignment.members])
keepers = [k for k in alignment.members if len(k.uniqueInsertionsCaused) < mxUniqueIns]
return(keepers)
def removeMaxPenalty(alignment):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
s = alignment.showAlignment(numbers = True)
mx = max([k.penalty for k in alignment.members])
keepers = [k for k in alignment.members if k.penalty < mx]
return(keepers)
def removeCustomPenalty(alignment, gapPenalty=None, uniqueGapPenalty=None, insertionPenalty=None, uniqueInsertionPenalty=None, mismatchPenalty=None, matchReward = None):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
mx = max([k.getCustomPenalty( gapPenalty =gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty,mismatchPenalty=mismatchPenalty, matchReward = matchReward) for k in alignment.members])
print("MAX",mx)
print([k.getCustomPenalty(gapPenalty=gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty ,mismatchPenalty=mismatchPenalty, matchReward = matchReward) for k in alignment.members ])
keepers = [k for k in alignment.members if k.getCustomPenalty(gapPenalty=gapPenalty, uniqueGapPenalty=uniqueGapPenalty, insertionPenalty=insertionPenalty, uniqueInsertionPenalty=uniqueInsertionPenalty ,mismatchPenalty=mismatchPenalty, matchReward = matchReward) < mx]
return(keepers)
def removeDynamicPenalty(alignment):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
s = alignment.showAlignment(numbers = True)
mx = max([k._dynamicPenalty for k in alignment.members])
keepers = [k for k in alignment.members if k._dynamicPenalty < mx]
return(keepers)
def removeMaxUniqueInsertsPlusGaps(alignment):
if not isinstance(alignment, Alignment):
raise Exception("Must be of class Alignment")
s = alignment.showAlignment(numbers = True)
mxUniqueIns = max([len(k.uniqueInsertionsCaused)+len(k.uniqueGapsCaused) for k in alignment.members])
keepers = [k for k in alignment.members if (len(k.uniqueInsertionsCaused)+len(k.uniqueGapsCaused)) < mxUniqueIns]
return(keepers)
class SchoenifyThread(threading.Thread):
def __init__(self,fasta, max_iter, finaldir,tmpdir, msa_tool, mode, logging, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty, mismatch_penalty , match_reward):
super(SchoenifyThread, self).__init__()
self.fasta=fasta
self.max_iter=max_iter
self.finaldir=finaldir
self.tmpdir = tmpdir
self.msa_tool =msa_tool
self.mode = mode
self.logging = logging
#custom
self.gap_penalty = gap_penalty
self.unique_gap_penalty = unique_gap_penalty
self.insertion_penalty = insertion_penalty
self.unique_insertion_penalty = unique_insertion_penalty
self.mismatch_penalty = mismatch_penalty
self.match_reward = match_reward
def run(self):
SEMAPHORE.acquire()
try:
schoenify(fasta=self.fasta, max_iter = self.max_iter, finaldir=self.finaldir,tmpdir = self.tmpdir, msa_tool=self.msa_tool, mode = self.mode, logging = self.logging, gap_penalty= self.gap_penalty, unique_gap_penalty = self.unique_gap_penalty, insertion_penalty = self.insertion_penalty, unique_insertion_penalty = self.unique_insertion_penalty, mismatch_penalty = self.mismatch_penalty, match_reward = self.match_reward )
except TooFewSequencesException as e:
sys.stderr.write(str(e))
SEMAPHORE.release()
def getFastaList(dir=None, suffix=None):
for f in os.listdir(dir):
if f.endswith(suffix):
yield(os.sep.join([dir,f]))
def main():
fastalist = []
fastadir = None
suffix= None
max_iter = None
finaldir = None
tmpdir = None
msa_tool = "mafft"
num_threads = 1
mode = "sites"
logging = False
#custom penalty:
gap_penalty = 1.0
unique_gap_penalty = 10.0
insertion_penalty = 1.0
unique_insertion_penalty = 1.0
mismatch_penalty = 1.0
match_reward = -10.0
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "f:F:s:i:a:n:m:g:G:j:J:M:r:lh", ["fasta=","fasta_dir=","suffix=","max_iteration=","msa_tool=",
"num_threads=","mode=","gap_penalty", "unique_gap_penalty", "insertion_penalty=", "unique_insertion_penalty=", "mismatch_penalty=",
"match_reward=", "log","help"])
except getopt.GetoptError as err:
print (str(err))
usage()
for o, a in opts:
if o in ("-f", "--fasta"):
fastalist = a.split(",")
statsout = fastalist[0]+".info"
tabout = fastalist[0]+".csv"
finaldir = os.path.dirname(fastalist[0])+"ps_out"
tmpdir = os.path.dirname(fastalist[0])+"ps_tmp"
elif o in ("-h","--help"):
usage()
elif o in ("-n", "--num_threads"):
num_threads = int(a)
elif o in ("-F","--fasta_dir"):
fastadir = a
finaldir = fastadir+os.sep+"ps_out"
tmpdir = fastadir+os.sep+"ps_tmp"
elif o in ("-s", "--suffix"):
suffix = a
elif o in ("-i", "--max_iteration"):
max_iter = int(a)
elif o in ("-a", "--msa_tool"):
msa_tool = a.lower()
elif o in ("-m", "--mode"):
mode = a.lower()
elif o in ("-l", "--log"):
logging = True
#only for mode "custom":
elif o in ("-g", "--gap_penalty"):
gap_penalty = float(a)
elif o in ("-G","--unique_gap_penalty"):
unique_gap_penalty = float(a)
elif o in ("-j", "--insertion_penalty"):
insertion_penalty = float(a)
elif o in ("-J", "--unique_insertion_penalty"):
unique_insertion_penalty = float(a)
elif o in ("-M", "--mismatch_penalty"):
mismatch_penalty = float(a)
elif o in ("-r", "--match_reward"):
match_reward = float(a)
else:
assert False, "unhandled option"
if not fastalist and not (fastadir and suffix):
usage()
else:
checkPath(progname = msa_tool)
checkMode(mode=mode)
finaldir = adjustDir(finaldir, mode)
tmpdir = adjustDir(tmpdir, mode)
global SEMAPHORE
SEMAPHORE=threading.BoundedSemaphore(num_threads)
if not os.path.exists(finaldir):
os.mkdir(finaldir)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
if fastadir:
print(suffix)
for f in getFastaList(fastadir, suffix):
print(f)
fastalist.append(f)
for fasta in fastalist:
SchoenifyThread(fasta, max_iter,finaldir,tmpdir, msa_tool, mode, logging, gap_penalty, unique_gap_penalty, insertion_penalty, unique_insertion_penalty, mismatch_penalty , match_reward).start()
#############################################
LOCK = threading.Lock()
SEMAPHORE = threading.BoundedSemaphore()
##########
if __name__ == "__main__":
main()
| gglyptodon/tmp | Pysickle/pysickle/pysickle.py | Python | gpl-3.0 | 24,572 |
from emburse.resource import (
EmburseObject,
Account,
Allowance,
Card,
Category,
Company,
Department,
Label,
Location,
Member,
SharedLink,
Statement,
Transaction
)
class Client(EmburseObject):
"""
Emburse API Client
API enables for the creation of expense cards at scale for custom business solutions as well as for
third-party app integrations. Cards can be created with set spending limits and assigned with just an email.
Some use cases include vendor payments, employee expense control, and fleet card management.
API Version:
v1
API Docs:
https://www.emburse.com/api/v1/docs#getting-started
Authors:
Marc Ford <[email protected]>
"""
@property
def Account(self):
"""
Emburse Account Object,
configured with the auth token from the client
:return: A configured emburse.resource.Account
:rtype: Account
"""
return Account(auth_token=self.auth_token)
@property
def Allowance(self):
"""
Emburse Allowance Object,
configured with the auth token from the client
:return: A configured emburse.resource.Allowance
:rtype: Allowance
"""
return Allowance(auth_token=self.auth_token)
@property
def Card(self):
"""
Emburse Card Object,
configured with the auth token from the client
:return: A configured emburse.resource.Card
:rtype: Card
"""
return Card(auth_token=self.auth_token)
@property
def Category(self):
"""
Emburse Category Object,
configured with the auth token from the client
:return: A configured emburse.resource.Category
:rtype: Category
"""
return Category(auth_token=self.auth_token)
@property
def Company(self):
"""
Emburse Company Object,
configured with the auth token from the client
:return: A configured emburse.resource.Company
:rtype: Company
"""
return Company(auth_token=self.auth_token)
@property
def Department(self):
"""
Emburse Department Object,
configured with the auth token from the client
:return: A configured emburse.resource.Department
:rtype: Department
"""
return Department(auth_token=self.auth_token)
@property
def Label(self):
"""
Emburse Label Object,
configured with the auth token from the client
:return: A configured emburse.resource.Label
:rtype: Label
"""
return Label(auth_token=self.auth_token)
@property
def Location(self):
"""
Emburse Location Object,
configured with the auth token from the client
:return: A configured emburse.resource.Location
:rtype: Location
"""
return Location(auth_token=self.auth_token)
@property
def Member(self):
"""
Emburse Member Object,
configured with the auth token from the client
:return: A configured emburse.resource.Member
:rtype: Member
"""
return Member(auth_token=self.auth_token)
@property
def SharedLink(self):
"""
Emburse SharedLink Object,
configured with the auth token from the client
:return: A configured emburse.resource.SharedLink
:rtype: SharedLink
"""
return SharedLink(auth_token=self.auth_token)
@property
def Statement(self):
"""
Emburse Statement Object,
configured with the auth token from the client
:return: A configured emburse.resource.Statement
:rtype: Statement
"""
return Statement(auth_token=self.auth_token)
@property
def Transaction(self):
"""
Emburse Transaction Object,
configured with the auth token from the client
:return: A configured emburse.resource.Transaction
:rtype: Transaction
"""
return Transaction(auth_token=self.auth_token)
| MarcFord/Emburse-python | emburse/client.py | Python | gpl-3.0 | 4,177 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^([a-zA-Z0-9_\-]+)/$', views.poll, name='poll'),
url(r'^([a-zA-Z0-9_\-]+).csv$', views.poll, {'export': True}, name='poll_export_csv'),
url(r'^([a-zA-Z0-9_\-]+)/comment/$', views.comment, name='poll_comment'),
url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/edit/$', views.comment, name='poll_comment_edit'),
url(r'^([a-zA-Z0-9_\-]+)/comment/(\d+)/delete/$', views.delete_comment, name='poll_deleteComment'),
url(r'^([a-zA-Z0-9_\-]+)/watch/$', views.watch, name='poll_watch'),
url(r'^([a-zA-Z0-9_\-]+)/settings/$', views.settings, name='poll_settings'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/$', views.edit_choice, name='poll_editChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/date/$', views.edit_date_choice, name='poll_editDateChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/date/$', views.edit_dt_choice_date, name='poll_editDTChoiceDate'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/time/$', views.edit_dt_choice_time, name='poll_editDTChoiceTime'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/dateTime/combinations/$', views.edit_dt_choice_combinations,
name='poll_editDTChoiceCombinations'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choices/universal/$', views.edit_universal_choice, name='poll_editUniversalChoice'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues/', views.edit_choicevalues, name='poll_editchoicevalues'),
url(r'^([a-zA-Z0-9_\-]+)/edit/choicevalues_create', views.edit_choicevalues_create,
name='poll_editchoicevalues_create'),
url(r'^([a-zA-Z0-9_\-]+)/delete/$', views.delete, name='poll_delete'),
url(r'^([a-zA-Z0-9_\-]+)/vote/$', views.vote, name='poll_vote'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/assign/$', views.vote_assign, name='poll_voteAssign'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/edit/$', views.vote, name='poll_voteEdit'),
url(r'^([a-zA-Z0-9_\-]+)/vote/(\d+)/delete/$', views.vote_delete, name='poll_voteDelete'),
url(r'^([a-zA-Z0-9_\-]+)/copy/$', views.copy, name='poll_copy'),
]
| fsinfuhh/Bitpoll | bitpoll/poll/urls.py | Python | gpl-3.0 | 2,082 |
# TODO.TXT-CLI-python test script
# Copyright (C) 2011-2012 Sigmavirus24, Jeff Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# TLDR: This is licensed under the GPLv3. See LICENSE for more details.
import unittest
import base
import todo
class AppendTest(base.BaseTest):
def test_append(self):
todo.cli.addm_todo("\n".join(self._test_lines_no_pri(self.num)))
for i in range(1, self.num + 1):
todo.cli.append_todo([str(i), "testing", "append"])
self.assertNumLines(self.num, "Test\s\d+\stesting\sappend")
if __name__ == "__main__":
unittest.main()
| MinchinWeb/wm_todo | tests/test_append.py | Python | gpl-3.0 | 1,200 |
import json
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from apps.exercises.models import Attempts
from apps.maps.models import Graphs
from apps.ki.utils import performInference
from apps.research.utils import getParticipantByUID, studyFilter
def knowledge_inference(request, gid=""):
if request.method == "GET":
g = get_object_or_404(Graphs, pk=gid)
if not request.user.is_authenticated(): return HttpResponse(status=403)
u, uc = User.objects.get_or_create(pk=request.user.pk)
p = getParticipantByUID(request.user.pk, gid)
if g.study_active and p is None: return HttpResponse(status=401)
ex = Attempts.objects.filter(graph=g).filter(submitted=True)
ex = studyFilter(g, p, u, ex)
inferences = []
if ex.count() > 1:
r = [e.get_correctness() for e in ex]
inferences = performInference(g.concept_dict, r)
return HttpResponse(json.dumps(inferences), mimetype='application/json')
else:
return HttpResponse(status=405)
| danallan/octal-application | server/apps/ki/views.py | Python | gpl-3.0 | 1,125 |
#!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# A sword KJV indexed search module.
# Copyright (C) 2012 Josiah Gordon <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
copying_str = \
'''
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
'''
warranty_str = \
'''
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
'''
""" KJV indexer and search modules.
BibleSearch: Can index and search the 'KJV' sword module using different types
of searches, including the following:
Strongs number search - Searches for all verses containing either
the phrase strongs phrase, any strongs
number or a superset of the strongs
numbers.
Morphological tags search - Same as the strongs...
Word or phrase search - Same as the strongs...
Regular expression search - Searches the whole Bible using the provided
regular expression.
"""
from sys import argv, exit
from cmd import Cmd
from difflib import get_close_matches
from functools import wraps
from time import strftime
from textwrap import fill
from collections import defaultdict
from itertools import product
import os
import sys
import json
import re
from .utils import *
try:
import bla
from .sword_verses import *
except ImportError:
Sword = None
from .verses import *
COLOR_LEVEL = 3
# Highlight colors.
highlight_color = '\033[7m'
highlight_text = '%s\\1\033[m' % highlight_color
word_regx = re.compile(r'\b([\w-]+)\b')
# Strip previous color.
strip_color_regx = re.compile('\033\[[\d;]*m')
def render_raw2(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>seg|q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
<(?:divineName)>
([^<]*?)
([\'s]*)
</(?:divineName)>
''', re.I | re.X)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
info_print(verse_text, tag=4)
def recurse_tag(text):
""" Recursively parse raw verse text using regular expressions, and
returns the correctly formatted text.
"""
v_text = ''
for match in test_regx.finditer(text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
strongs_str = ''
morph_str = ''
italic_str = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
note_str = ' <n>%s</n>'
else:
note_str = '%s'
if strongs and strong_regx.search(tag_attr):
strongs_list = strong_regx.findall(tag_attr)
strongs_str = ' <%s>' % '> <'.join(strongs_list)
if morph and morph_regx.search(tag_attr):
morph_list = morph_regx.findall(tag_attr)
morph_str = ' {%s}' % '} {'.join(morph_list)
if match.re.search(tag_text):
temp_text = recurse_tag(tag_text) + strongs_str + morph_str
v_text += note_str % italic_str % (temp_text)
else:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
opt = marker_regx.sub('<p>\\1</p> ', opt)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = note_str % italic_str % tag_text
v_text += opt + tag_text + strongs_str + morph_str
v_text += punct
return v_text
return recurse_tag(verse_text)
def render_raw(verse_text, strongs=False, morph=False):
""" Render raw verse text.
"""
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
test_regx = re.compile(r'''
([^<]*)
<(?P<tag>q|w|transChange|note)([^>]*)>
([\w\W]*?)
</(?P=tag)>
([^<]*)
''', re.I | re.X)
divname_regx = re.compile(r'''
(?:<seg>)?
<(?:divineName)>+
([^<]*?)
([\'s]*)
</(?:divineName)>
(?:</seg>)?
''', re.I | re.X)
xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>',
re.I)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
v_text = ''
info_print(verse_text, tag=4)
for match in test_regx.finditer(verse_text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
italic_str = '%s'
if match.re.search(tag_text):
if 'added' in tag_attr.lower():
italic_str = '<i>%s</i>' + punct
punct = ''
match_list = match.re.findall(tag_text + punct)
else:
match_list = [match.groups()]
temp_text = ''
for opt, tag_name, tag_attr, tag_text, punct in match_list:
info_print((opt, tag_name, tag_attr, tag_text, punct), tag=4)
tag_text = divname_regx.sub(div_upper, tag_text)
tag_text = xadded_regx.sub('<i>\\1</i>', tag_text)
if 'marker' in opt.lower():
temp_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
opt = ''
if 'note' in tag_name.lower() or 'study' in tag_attr.lower():
temp_text += ' <n>%s</n>' % tag_text
tag_text = ''
temp_italic = '<i>%s</i>' if 'added' in tag_attr.lower() else '%s'
temp_text += temp_italic % (opt + tag_text)
if tag_name.strip().lower() in ['transchange', 'w', 'seg']:
if strong_regx.search(tag_attr) and strongs:
temp_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
temp_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
temp_text += punct
v_text += italic_str % temp_text
continue
opt, tag_name, tag_attr, tag_text, punct = match.groups()
tag_text = divname_regx.sub(
lambda m: m.group(1).upper() + m.group(2), tag_text)
if 'marker' in opt.lower():
v_text += '<p>%s</p> ' % marker_regx.sub('\\1', opt)
if 'added' in tag_attr.lower():
v_text += '<i>'
elif 'note' in tag_name.lower() or 'study' in tag_attr.lower():
v_text += ' <n>%s</n>' % tag_text
if match.re.search(tag_text):
for i in match.re.finditer(tag_text):
info_print(i.groups(), tag=4)
o, t_n, t_a, t_t, p = i.groups()
if t_n.strip().lower() in ['transchange', 'w']:
v_text += o + t_t
if strong_regx.search(t_a) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(t_a))
if morph_regx.search(t_a) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(t_a))
v_text += p
else:
if tag_name.strip().lower() in ['transchange', 'w']:
v_text += tag_text
if strong_regx.search(tag_attr) and strongs:
v_text += \
' <%s>' % '> <'.join(strong_regx.findall(tag_attr))
if morph_regx.search(tag_attr) and morph:
v_text += \
' {%s}' % '} {'.join(morph_regx.findall(tag_attr))
if 'added' in tag_attr.lower():
v_text += '</i>'
v_text += punct
info_print('%s: %s: %s: %s: %s' % (opt, tag_name, tag_attr,
tag_text, punct), tag=4)
return v_text
def render_verses_with_italics(ref_list, wrap=True, strongs=False,
morph=False, added=True, notes=False,
highlight_func=None, module='KJV', *args):
""" Renders a the verse text at verse_ref with italics highlighted.
Returns a strong "verse_ref: verse_text"
ref_list - List of references to render
wrap - Whether to wrap the text.
strongs - Include Strong's Numbers in the output.
morph - Include Morphological Tags in the output.
added - Include added text (i.e. italics) in the output.
notes - Include study notes at the end of the text.
highlight_func - A function to highlight anything else
(i.e. search terms.)
module - Sword module to render from.
*args - Any additional arguments to pass to
hightlight_func
highlight_func should take at least three arguments, verse_text,
strongs, and morph.
"""
# Set the colors of different items.
end_color = '\033[m'
# Build replacement strings that highlight Strong's Numbers and
# Morphological Tags.
if COLOR_LEVEL >= 2:
# The Strong's and Morphology matching regular expressions.
# Match strongs numbers.
strongs_regx = re.compile(r'''
<((?:\033\[[\d;]*m)*?[GH]?\d+?(?:\033\[[\d;]*m)*?)>
''', re.I | re.X)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
info_print("Rendering results, please wait...\n", tag=0)
morph_regx = re.compile(r'''
\{((?:\033\[[\d+;]*m)*?[\w-]*?(?:\033\[[\d+;]*m)*?)\}
''', re.X)
strongs_color = '\033[36m'
morph_color = '\033[35m'
strongs_highlight = '<%s\\1%s>' % (strongs_color, end_color)
morph_highlight = '{%s\\1%s}' % (morph_color, end_color)
if COLOR_LEVEL >= 0:
ref_color = '\033[32m'
ref_highlight = '%s\\1%s' % (ref_color, end_color)
if COLOR_LEVEL >= 1 and added:
italic_color = '\033[4m'
italic_regx = re.compile(r'<i>\s?(.*?)\s?</i>', re.S)
italic_highlight = '%s\\1%s' % (italic_color, end_color)
# Get the local text encoding.
encoding = get_encoding()
# A substitution replacement function for highlighting italics.
def italic_color(match):
""" Color italic text, but first remove any previous color.
"""
# Strip any previous colors.
match_text = strip_color_regx.sub('', match.groups()[0])
# Color the italics.
return word_regx.sub(italic_highlight, match_text)
# Get an iterator over all the requested verses.
verse_iter = IndexedVerseTextIter(iter(ref_list), strongs, morph,
italic_markers=(COLOR_LEVEL >= 1),
added=added, paragraph=added,
notes=notes, module=module)
if VERBOSE_LEVEL == 20:
verse_iter = VerseTextIter(iter(ref_list), strongs, morph,
module=module, markup=1, #Sword.FMT_PLAIN,
render='render_raw')
if VERBOSE_LEVEL >= 30:
verse_iter = RawDict(iter(ref_list), module=module)
for verse_ref, verse_text in verse_iter:
if VERBOSE_LEVEL >= 30:
len_longest_key = len(max(verse_text[1].keys(), key=len))
for key, value in verse_text[1].items():
print('\033[33m{0:{1}}\033[m: {2}'.format(key,
len_longest_key,
value))
verse_text = verse_text[1]['_verse_text'][0]
# Encode than decode the verse text to make it compatable with
# the locale.
verse_text = verse_text.strip().encode(encoding, 'replace')
verse_text = verse_text.decode(encoding, 'replace')
verse_text = '%s: %s' % (verse_ref, verse_text)
# The text has to be word wrapped before adding any color, or else the
# color will add to the line length and the line will wrap too soon.
if wrap:
verse_text = fill(verse_text, screen_size()[1],
break_on_hyphens=False)
if COLOR_LEVEL >= 0:
# Color the verse reference.
colored_ref = word_regx.sub(ref_highlight, verse_ref)
verse_text = re.sub(verse_ref, colored_ref, verse_text)
if COLOR_LEVEL >= 1 and added:
# Highlight the italic text we previously pulled out.
verse_text = italic_regx.sub(italic_color, verse_text)
if COLOR_LEVEL >= 2:
# Highlight Strong's and Morphology if they are visible.
if strongs:
verse_text = strongs_regx.sub(strongs_highlight, verse_text)
if morph:
verse_text = morph_regx.sub(morph_highlight, verse_text)
if COLOR_LEVEL >= 3:
# Highlight the different elements.
if highlight_func:
verse_text = highlight_func(verse_text, *args)
# Finally produce the formated text.
yield verse_text
def highlight_search_terms(verse_text, regx_list, highlight_text,
color_tag='\033\[[\d+;]*m', *args):
""" Highlight search terms in the verse text.
"""
def highlight_group(match):
""" Highlight each word/Strong's Number/Morphological Tag in the
match.
"""
match_text = match.group()
for word in set(match.groups()):
if word: # and word != match_text:
# if word.lower() == 'strong' and word == match_text:
# continue
info_print(word, tag=20)
try:
match_text = re.sub('''
(
(?:{0}|\\b)+
{1}
(?:{0}|\\b)+
)
'''.format(color_tag, re.escape(word)),
highlight_text, match_text, flags=re.X)
except Exception as err:
info_print("Error with highlighting word %s: %s" % \
(word, err), tag=4)
#match_text = match_text.replace(word, '\033[7m%s\033[m' % word)
# print(match_text)
return match_text
# Strip any previous colors.
# match_text = strip_color_regx.sub('', match.group())
# return word_regx.sub(highlight_text, match_text)
verse_text = verse_text.strip()
# Apply each highlighting regular expression to the text.
for regx in regx_list:
verse_text = regx.sub(highlight_group, verse_text)
return verse_text
def build_highlight_regx(search_list, case_sensitive, sloppy=False,
color_tag='\033\[[\\\\d+;]*m', extra_tag='\033'):
""" Build a regular expression and highlight string to colorize the
items in search_list as they appear in a verse.
"""
if not search_list:
return []
regx_list = []
# Extra word boundry to catch ansi color escape sequences.
escaped_word_bound = '(?:{0}|\\\\b)+'.format(color_tag)
word_bound = '(?:{0}|\\b)+'.format(color_tag)
# Extra space filler to pass over ansi color escape sequences.
extra_space = '|{0}|{1}'.format(color_tag, extra_tag)
# print(word_bound, extra_space, '(?:\033\[[\d+;]*m|\\b)+')
for item in search_list:
item = item.strip()
is_regex = (('*' in item and ' ' not in item) or item.startswith('&'))
if ('*' in item and ' ' not in item) and not item.startswith('&'):
# Build a little regular expression to highlight partial words.
item = item[1:] if item[0] in '!^+|' else item
item = item.replace('*', '\w*')
item = r'{0}({1}){0}'.format(word_bound, item)
if item.startswith('&'):
# Just use a regular expression. ('&' marks the term as a regular
# expression.)
item = item[1:]
regx_list.append(Search.search_terms_to_regex(item, case_sensitive,
word_bound=escaped_word_bound, extra_space=extra_space,
sloppy=(sloppy or '~' in item), is_regex=is_regex))
return regx_list
def mod_lookup(mod, items):
""" Looks up items in a module and returns the formated text.
"""
item_lookup = Lookup(mod)
# Seperate all elements by a comma.
item_list = ','.join(items.split()).split(',')
text_list = []
for item in item_list:
item_text = item_lookup.get_formatted_text(item)
text_list.append('\033[1m%s\033[m:\n%s' % (item, item_text))
return '\n\n'.join(text_list)
class StdoutRedirect(object):
""" Redirect stdout to a specified output function.
"""
def __init__(self, output_func, *args):
""" Set the output function and get the extra arguments to pass to it.
"""
self._output_func = output_func
self._args = args
self._old_stdout = sys.stdout
def write(self, data):
""" Write data to the output function.
"""
if data.strip():
self._output_func(data, *self._args)
def __enter__(self):
""" Change sys.stdout to this class.
"""
try:
sys.stdout = self
return self
except Exception as err:
print("Error in __enter__: %s" % err, file=sys.stderr)
return None
def __exit__(self, exc_type, exc_value, traceback):
""" Change sys.stdout back to its old value.
"""
try:
sys.stdout = self._old_stdout
if exc_type:
return False
return True
except Exception as err:
print("Error in __exit__: %s" % err, file=sys.stderr)
return False
class IndexedVerseTextIter(object):
""" An iterable object for accessing verses in the Bible. Maybe it will
be easier maybe not.
"""
def __init__(self, reference_iter, strongs=False, morph=False,
module='KJV', italic_markers=False, added=True,
paragraph=True, notes=False, path=''):
""" Initialize.
"""
reg_list = []
if not strongs:
reg_list.append(r'\s*<([GH]\d+)>')
if not morph:
reg_list.append(r'\s*\{([\w-]+)\}')
if not added:
reg_list.append(r'\s?<i>\s?(.*?)\s?</i>')
if not italic_markers:
reg_list.append(r'(<i>\s?|\s?</i>)')
if not paragraph:
reg_list.append(r'\s?<p>\s?(.*?)\s?</p>')
else:
reg_list.append(r'(<p>\s?|\s?</p>)')
reg_str = r'(?:%s)' % r'|'.join(reg_list)
self._clean_regex = re.compile(reg_str, re.S)
self._notes_regex = re.compile(r'\s?<n>\s?(.*?)\s?</n>', re.S)
self._notes_str = ' (Notes: \\1)' if notes else ''
self._index_dict = IndexDict('%s' % module, path)
self._ref_iter = reference_iter
def next(self):
""" Returns the next verse reference and text.
"""
return self.__next__()
def __next__(self):
""" Returns a tuple of the next verse reference and text.
"""
# Retrieve the next reference.
verse_ref = next(self._ref_iter)
# Set the verse and render the text.
verse_text = self._get_text(verse_ref)
return (verse_ref, verse_text.strip())
def __iter__(self):
""" Returns an iterator of self.
"""
return self
def _get_text(self, verse_ref):
""" Returns the verse text. Override this to produce formatted verse
text.
"""
verse_text = self._index_dict[verse_ref]
verse_text = self._clean_regex.sub('', verse_text)
verse_text = self._notes_regex.sub(self._notes_str, verse_text)
return verse_text
class CombinedParse(object):
""" A parser for simple combined search parsing.
((in OR tree) AND the) AND (house OR bush) =>
['in the house', 'in the bush', 'tree the house', 'tree the bush']
Also it has a NOT word list.
created NOT (and OR but) => ['created'] ['and', 'but']
"""
def __init__(self, arg_str):
""" Initialize the parser and parse the arg string.
"""
self._arg_str = arg_str
self._arg_list = arg_str.split()
parsed_list = self.parse_string(list(arg_str))
self._word_list, self._not_list = self.parse_list(parsed_list)
# Make the results accesable via read-only properties.
word_list = property(lambda self: self._word_list)
not_list = property(lambda self: self._not_list)
def parse_list(self, arg_list):
""" Parse a list such as ['created', 'NOT', ['and', 'OR', 'but']] into
search_args = ['created'] not_list = ['and', 'but']
"""
# The list we're working on building.
working_list = []
# The list of words not to include.
not_list = []
for i in arg_list:
# Skip 'OR's
if i == 'OR':
continue
if isinstance(i, list):
# A list was found so parse it and get the results.
temp_list, temp_not_list = self.parse_list(i)
# Add the returned not list to the current not list.
not_list.extend(temp_not_list)
if working_list:
if working_list[-1] == 'AND':
# Pop the 'AND' off the end of the list.
working_list.pop()
# Combine each element of the working listh with each
# element of the returned list replace the working
# list with those combinations.
# (i.e. working_list = ['this', 'that']
# temp_list = ['tree', 'house']
# result = ['this tree', 'this house',
# 'that tree', 'that house']
working_list = ['%s %s' % j \
for j in product(working_list, temp_list)]
elif working_list[-1] == 'NOT':
# Take the 'NOT' off to show we've processed it.
working_list.pop()
# Add the returned list to the NOT list.
not_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
# Just extend the working list with the retuned list.
working_list.extend(temp_list)
else:
if i == 'AND':
# Put the 'AND' on the list for later processing.
working_list.append(i)
elif working_list:
if working_list[-1] == 'AND':
# Take the 'AND' off the list.
working_list.pop()
# Combine all the elements of working_list with i, and
# replace working list with the resulting list.
# (i.e. working_list = ['he', 'it'] i = 'said'
# result = ['he said', 'it said']
working_list = ['%s %s' % (j, i) for j in working_list]
elif working_list[-1] == 'NOT':
# Remove the 'NOT'.
working_list.pop()
# Add the word to the not list.
not_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
else:
# Add the word to the working list.
working_list.append(i)
# Split and then combine all the strings in working_list.
# Basically removes runs of whitespace.
working_list = [' '.join(i.split()) for i in working_list]
# Return the final list and not list.
return working_list, not_list
def parse_parenthesis(self, arg_list):
""" Recursively processes strings in parenthesis converting them
to nested lists of strings.
"""
# The return list.
return_list = []
# Temorary string.
temp_str = ''
while arg_list:
# Get the next character.
c = arg_list.pop(0)
if c == '(':
# An opening parenthesis was found so split the current string
# at the spaces putting them in the return list, and clean
# the string.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
# Process from here to the closing parenthesis.
return_list.append(self.parse_parenthesis(arg_list))
elif c == ')':
# The parenthesis is closed so return back to the calling
# function.
break
else:
# Append the current not parenthesis character to the string.
temp_str += c
if temp_str:
# Split and add the string to the return list.
return_list.extend(temp_str.split())
# Return what we found.
return return_list
def parse_string(self, arg_list):
""" Parse a combined search arg string. Convert a string such as:
'created NOT (and OR but)' => ['created', 'NOT', ['and', 'OR', 'but']]
"""
# This does the same thing only using json.
#
# Regular expression to group all words.
#word_regx = re.compile(r'\b(\w*)\b')
# Put quotes around all words and opening replace paranthesis with
# brackets, put all of that in brackets.
#temp_str = '[%s]' % word_regx.sub('"\\1"', arg_str).replace('(', '[')
# Replace closing parenthesis with brackets and replace a '" ' with
# '", '.
#temp_str = temp_str.replace(')', ']').replace('" ', '",')
# finally replace '] ' with '], '. The end result should be a valid
# json string that can be converted to a list.
#temp_str = temp_str.replace('] ', '],')
# Convert the string to a list.
#return_list = json.loads(temp_str)
#return return_list
# The return list.
return_list = []
# Temporary string.
temp_str = ''
while arg_list:
# Pop the next character.
c = arg_list.pop(0)
if c == '(':
# A parenthesis was found store and reset the string.
# And parse the what is in the parenthesis.
if temp_str:
return_list.extend(temp_str.split())
temp_str = ''
return_list.append(self.parse_parenthesis(arg_list))
else:
# Append the non parenthesis character to the string.
temp_str += c
if temp_str:
# Store the final string in the list.
return_list.extend(temp_str.split())
#info_print(return_list)
# Return the list.
return return_list
class Search(object):
""" Provides a simple way of searching an IndexDict for verses.
"""
# To check for spaces.
_whitespace_regx = re.compile(r'\s')
# Cleanup regular expressions.
_non_alnum_regx = re.compile(r'[^\w\*<>\{\}\(\)-]')
_fix_regx = re.compile(r'\s+')
# Match strongs numbers.
_strongs_regx = re.compile(r'[<]?(\b[GH]\d+\b)[>]?', re.I)
# It needs to match with braces or it will catch all capitalized
# word and words with '-'s in them.
_morph_regx = re.compile(r'[\(\{](\b[\w-]+\b)[\}\)]', re.I)
_word_regx = re.compile(r'\b([\w\\-]+)\b')
_space_regx = re.compile(r'\s+')
_non_word_regx = re.compile(r'[<>\(\)]')
_fix_strongs = classmethod(lambda c, m: '<%s>' % m.groups()[0].upper())
_fix_morph = classmethod(lambda c, m: '{%s}' % m.groups()[0].upper())
# Escape the morphological tags.
_escape_morph = classmethod(lambda c, m: \
'\{%s\}' % re.escape(m.groups()[0]).upper())
def __init__(self, module='KJV', path='', multiword=False):
""" Initialize the search.
"""
# The index dictionary.
self._index_dict = IndexDict(module, path)
self._module_name = module
self._multi = multiword
@classmethod
def search_terms_to_regex(cls, search_terms, case_sensitive,
word_bound='\\\\b', extra_space='',
sloppy=False, is_regex=False):
""" Build a regular expression from the search_terms to match a verse
in the Bible.
"""
# Set the flags for the regular expression.
flags = re.I if not case_sensitive else 0
if is_regex:
reg_str = search_terms
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
# This will skip words.
not_words_str = r'\b\w+\b'
# This will skip Strong's Numbers.
not_strongs_str = r'<[^>]*>'
# This wil skip Morphological Tags.
not_morph_str = r'\{[^\}]*\}'
# This will skip all punctuation. Skipping ()'s is a problem for
# searching Morphological Tags, but it is necessary for the
# parenthesized words. May break highlighting.
not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]'
# This will skip ansi color.
not_color_str = r'\033\[[\d;]*m'
# Match all *'s
star_regx = re.compile(r'\*')
# Hold the string that fills space between search terms.
space_str = ''
# Get stars past so we can replace them with '\w*' later.
temp_str, word_count = star_regx.subn(r'_star_', search_terms)
# Hack to get rid of unwanted characters.
temp_str = cls._non_alnum_regx.sub(' ', temp_str).split()
temp_str = ' '.join(temp_str)
# Phrases will have spaces in them
phrase = bool(cls._whitespace_regx.search(temp_str))
# Escape the morphological tags, and also find how many there are.
temp_str, morph_count = cls._morph_regx.subn(cls._escape_morph,
temp_str)
# Make all Strong's Numbers uppercase, also find how many there are.
temp_str, strongs_count = cls._strongs_regx.subn(cls._fix_strongs,
temp_str)
# Select all words.
#repl = '(\\\\b\\1\\\\b)'
# This works:
# temp_str, word_count = \
# cls._word_regx.subn('{0}(\\1){0}'.format(word_bound), temp_str)
repl = '(?:{0}(\\1){0})'.format(word_bound)
temp_str, word_count = cls._word_regx.subn(repl, temp_str)
# Replace what used to be *'s with '\w*'.
temp_str = temp_str.replace('_star_', '\w*')
# All the Strong's and Morphology were changed in the previous
# substitution, so if that number is greater than the number of
# Strong's plus Morphology then there were words in the search terms.
# I do this because I don't know how to only find words.
words_found = (strongs_count + morph_count) < word_count
if phrase:
# Build the string that is inserted between the items in the
# search string.
space_str = r'(?:%s%s' % (not_punct_str, extra_space)
if not bool(strongs_count) or sloppy:
# Skip over all Strong's Numbers.
space_str = r'%s|%s' % (space_str, not_strongs_str)
if not bool(morph_count) or sloppy:
# Skip all Morphological Tags.
space_str = r'%s|%s' % (space_str, not_morph_str)
if not words_found or bool(morph_count) or bool(strongs_count) or \
sloppy:
# Skip words. If word attributes are in the search we can
# skip over words and still keep it a phrase.
space_str = r'%s|%s' % (space_str, not_words_str)
# Finally make it not greedy.
space_str = r'%s)*?' % space_str
else:
space_str = ''
# Re-combine the search terms with the regular expression string
# between each element.
reg_str = space_str.join(temp_str.split())
info_print('\nUsing regular expression: %s\n' % reg_str, tag=2)
try:
return re.compile(reg_str, flags)
except Exception as err:
print("An error occured while compiling the highlight "
"regular expression %s: %s." % (reg_str, err),
" There will be no highlighting.\n", file=sys.stderr)
return re.compile(r'')
def _sorted_iter(self, verse_ref_set):
""" Returns an iterator over a sorted version of verse_ref_set.
"""
# Speed up the iteration by first sorting the range.
return iter(sorted(verse_ref_set, key=sort_key))
def _clean_text(self, text):
""" Return a clean (only alphanumeric) text of the provided string.
"""
# Do we have to use two regular expressions to do this.
# Replace all non-alphanumeric characters with a space.
temp_text = self._non_alnum_regx.sub(' ', text)
# Replace one or more spaces with one space.
clean_text = self._fix_regx.sub(' ', temp_text)
return clean_text.strip()
def _fix_strongs_morph(self, search_terms):
""" Make any Strong's or Morphology uppercase, put parenthesis around
the Morphological Tags, and put <>'s around the Strong's Numbers.
"""
# Capitalize all strongs numbers and remove the <> from them.
temp_str = self._strongs_regx.sub(self._fix_strongs, search_terms)
# Capitalize all morphological tags and make sure they are in
# parenthesis.
temp_str = self._morph_regx.sub(self._fix_morph, temp_str)
return temp_str
def _process_search(func):
""" Returns a wrapper function that processes the search terms, calls
the wrapped function, and, if applicable, confines the resulting verse
set to a range.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Process the search terms according to the wrapped functions
requirements, then apply the range, if given, to the returned set
of verses.
"""
if func.__name__ in ['sword_search']:
if not Sword:
print("Sword library not found.")
return
if not isinstance(search_terms, str):
# Combine the terms for use by the different methods.
search_terms = ' '.join(search_terms)
# Get a valid set of verse references that conform to the passed
# range.
range_set = parse_verse_range(range_str)
if func.__name__ not in ['regex_search', 'partial_word_search']:
# Try to catch and fix any Strong's Numbers or Morphological
# Tags.
search_terms = self._fix_strongs_morph(search_terms)
# Regular expression and combined searches get the search terms as
# they were passed.
if func.__name__ in ['multiword_search', 'anyword_search',
'phrase_search', 'mixed_phrase_search']:
# Get rid of any non-alphanumeric or '-' characters from
# the search string.
search_str = self._clean_text(search_terms).strip()
if strongs or morph:
# Strong's numbers and Morphological tags are all
# uppercase. This is only required if the Morphological
# Tags were not surrounded by parenthesis.
search_str = search_str.upper().strip()
else:
search_str = search_terms
# Get the set of found verses.
found_set = func(self, search_str, strongs, morph, added,
case_sensitive, range_set)
# The phrase, regular expression, and combined searches apply the
# range before searching, so only multi-word and any-word searches
# have it applied here.
if func.__name__ in ['multiword_search', 'anyword_search',
'partial_word_search']:
if range_set:
found_set.intersection_update(range_set)
return found_set
# Return wrapper function.
return wrapper
@_process_search
def combined_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str=''): ->
Perform a combined search. Search terms could be
'created NOT (and OR but)' and it would find all verses with the word
'created' in them and remove any verse that had either 'and' or 'but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
multiword_search = self.multiword_search
def combine_proc(str_list):
""" Performs combined search on the strings in str_list, and
returns a set of references that match.
"""
and_it = False
temp_set = set()
for word in str_list:
# A '+' before or after a word means it should have a phrase
# search done on it and the words with it.
if '+' in word:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
elif word == '&':
# Combine the next search results with this one.
and_it = True
continue
else:
# Do a multi-word search on the word string.
result_set = multiword_search(word, strongs, morph,
case_sensitive, range_str)
if and_it:
# The previous word said to find verses that match both.
temp_set.intersection_update(result_set)
and_it = False
else:
# Only keep the verses that have either one group or the
# other but not both.
temp_set.symmetric_difference_update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def combined_phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" combined_phrase_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str=''): ->
Perform a combined phrase search. Search terms could be
'created NOT (and AND but)' and it would find all verses with the word
'created' in them and remove any verse that had the phrase 'and but.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for '%s'..." % search_terms, tag=1)
# Process the search_terms.
arg_parser = CombinedParse(search_terms)
# Get the list of words and/or phrases to include.
word_list = arg_parser.word_list
# Get the list of words and/or phrases to NOT include.
not_list = arg_parser.not_list
phrase_search = self.phrase_search
def combine_proc(str_list):
""" Performs combined phrase search on the strings in str_list, and
returns a set of references that match.
"""
temp_set = set()
for word in str_list:
# Do a phrase search on the word string.
result_set = phrase_search(word.replace('+', ' '), strongs,
morph, case_sensitive,
range_str)
# Include all the verses that have any of the word groups.
temp_set.update(result_set)
return temp_set
# Remove any verses that have the NOT words in them.
found_set = combine_proc(word_list).difference(combine_proc(not_list))
return found_set
@_process_search
def multiword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" multiword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a multiword search using the search_terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with all these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# All that needs to be done is find all references with all the
# searched words in them.
found_set = self._index_dict.value_intersect(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def eitheror_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" eitheror_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one and only one of the terms
searched for.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with one and not all of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one and only one of the searched words.
found_set = self._index_dict.value_sym_diff(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def anyword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" anyword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more of the search
terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
# Any verse with one or more of the searched words.
found_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
return found_set
@_process_search
def partial_word_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" partial_word_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a search returning any verse with one or more words matching
the partial words given in the search terms. Partial words are markes
tih *'s (e.g. '*guil*' will match any word with 'guil' in it such as
'guilt' or 'beguile.'
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with any of these partial words "
"'%s'..." % ', '.join(search_terms.split()), tag=1)
#found_set = self._index_dict.value_union(
#self._words_from_partial(search_terms, case_sensitive),
#case_sensitive)
search_list = search_terms.split()
found_set = self._index_dict.from_partial(search_list, case_sensitive)
return found_set
def _words_from_partial(self, partial_word_list, case_sensitive=False):
""" Search through a list of partial words and yield words that match.
"""
flags = re.I if not case_sensitive else 0
# Split the search terms and search through each word key in the index
# for any word that contains the partial word.
word_list = partial_word_list.split()
for word in self._index_dict['_words_']:
for partial_word in word_list:
# A Regular expression that matches any number of word
# characters for every '*' in the term.
reg_str = '\\b%s\\b' % partial_word.replace('*', '\w*')
try:
word_regx = re.compile(reg_str, flags)
except Exception as err:
print('There is a problem with the regular expression '
'%s: %s' % (reg_str, err), file=sys.stderr)
exit()
if word_regx.match(word):
yield word
def _process_phrase(func):
""" Returns a wrapper function for wrapping phrase like searches.
"""
@wraps(func)
def wrapper(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" Gets a regular expression from the wrapped function, then
builds a set of verse references to search, finally it calls the
searching function with the regular expression and the verse
reference iterator, and returns the resulting set of references.
"""
search_regx = func(self, search_terms, strongs, morph, added,
case_sensitive, range_str)
# First make sure we are only searching verses that have all the
# search terms in them.
search_list = search_terms.split()
if '*' in search_terms:
ref_set = self._index_dict.from_partial(search_list,
case_sensitive,
common_limit=5000)
else:
ref_set = self._index_dict.value_intersect(search_list,
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
# No need to search for a single word phrase.
if len(search_terms.split()) == 1:
return ref_set
# Sort the list so it may be a little faster. Only needed if we're
# using the sword module to look them up.
ref_iter = self._sorted_iter(ref_set)
# Disable Strong's and Morphological if only words are used.
strongs = bool(self._strongs_regx.search(search_terms))
morph = bool(self._morph_regx.search(search_terms))
return self.find_from_regex(ref_iter, search_regx, strongs, morph)
return wrapper
@_process_search
@_process_phrase
def ordered_multiword_search(self, search_terms, strongs=False,
morph=False, added=True, case_sensitive=False,
range_str=''):
""" ordered_multiword_search(self, search_terms, strongs=False,
morph=False, case_sensitive=False, range_str='') ->
Perform an ordered multiword search. Like a multiword search, but all
the words have to be in order.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with these words in order "
"'%s'..." % search_terms, tag=1)
return self.search_terms_to_regex(search_terms, case_sensitive,
sloppy=True)
@_process_search
@_process_phrase
def phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" phrase_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a phrase search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with this phrase "
"'%s'..." % search_terms, tag=1)
# Make all the terms the same case if case doesn't matter.
flags = re.I if not case_sensitive else 0
if strongs:
# Match strongs phrases.
search_reg_str = search_terms.replace(' ', r'[^<]*')
elif morph:
# Match morphological phrases.
search_reg_str = search_terms.replace(' ', r'[^\{]*')
else:
# Match word phrases
search_reg_str = '\\b%s\\b' % search_terms.replace(' ',
r'\b(<[^>]*>|\{[^\}]*\}|\W)*\b')
# Make a regular expression from the search terms.
return re.compile(search_reg_str, flags)
@_process_search
@_process_phrase
def mixed_phrase_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" mixed_phrase_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a phrase search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for verses with this phrase "
"'%s'..." % search_terms, tag=1)
# Make a regular expression from the search terms.
return self.search_terms_to_regex(search_terms, case_sensitive)
@_process_search
def regex_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" regex_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a regular expression search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
info_print("Searching for regular expression '%s'..." % search_terms,
tag=1)
# re.I is case insensitive.
flags = re.I if not case_sensitive else 0
try:
# Make a regular expression from the search_terms.
search_regx = re.compile(r'%s' % search_terms, flags)
except Exception as err:
print('There is a problem with the regular expression "%s": %s' % \
(search_terms, err), file=sys.stderr)
exit()
if range_str:
# Only search through the supplied range.
ref_iter = self._sorted_iter(range_str)
else:
# Search the entire Bible.
ref_iter = VerseIter('Genesis 1:1')
return self.find_from_regex(ref_iter, search_regx, strongs, morph,
tag=1, try_clean=True)
def find_from_regex(self, ref_iter, search_regex, strongs=False,
morph=False, added=True, tag=3, try_clean=False):
""" Iterates through all the verses in the ref iter iterator and
returns a list of verses whose text matches search_regx.
"""
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = IndexedVerseTextIter(ref_iter, strongs=strongs,
morph=morph, added=added,
module=self._module_name)
found_set = set()
for verse_ref, verse_text in verse_iter:
info_print('\033[%dD\033[KSearching...%s' % \
(len(verse_ref) + 20, verse_ref), end='', tag=tag)
# Search for matches in the verse text.
if search_regex.search(verse_text):
found_set.add(verse_ref)
elif try_clean and not strongs and not morph:
# Should we do this or should we trust the user knows what
# puctuation are in the verses?
clean_verse_text = self._clean_text(verse_text)
if search_regex.search(clean_verse_text):
found_set.add(verse_ref)
info_print("...Done.", tag=tag)
return found_set
def mixed_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" mixed_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='') ->
Perform a mixed search.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
added - Search in the added text (i.e. italics).
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
"""
found_set = set()
not_set = set()
and_set = set()
or_set = set()
xor_set = set()
combine_dict = {
'!': not_set.update,
'+': and_set.intersection_update,
'|': or_set.update,
'^': xor_set.symmetric_difference_update,
}
for term in search_terms:
if term[0] in '!+^|':
# Set the correct combining function, and cleanup the item.
if term[0] == '+' and not and_set:
# All of these verses go in the output.
combine_func = and_set.update
else:
combine_func = combine_dict[term[0]]
term = term[1:]
else:
if self._multi and found_set:
# If multiword is default and found_set is not empty
# make all search terms appear in the output.
combine_func = found_set.intersection_update
else:
# Any of these verses could be in the output
combine_func = found_set.update
if term.startswith('&'):
# Allow regular expression searching.
term = term[1:]
search_func = self.regex_search
elif ' ' in term:
# Search term is a quoted string, so treat it like a phrase.
if term.startswith('~'):
# ~'s trigger ordered multiword or sloppy phrase search.
term = term[1:]
search_func = self.ordered_multiword_search
else:
search_func = self.mixed_phrase_search
elif '*' in term:
# Search for partial words.
search_func = self.partial_word_search
else:
# A single word should be (multi/any)-word.
search_func = self.multiword_search
# Perform a strongs search.
strongs = bool(self._strongs_regx.match(term.upper()))
# Perform a morpholagical search.
morph = bool(self._morph_regx.match(term.upper()))
# Search for words or phrases.
temp_set = search_func(term, strongs, morph, added, case_sensitive,
range_str)
# Add the results to the correct set.
combine_func(temp_set)
# Update the result set.
found_set.update(or_set)
found_set.update(xor_set)
if and_set and found_set:
# Make sure all the verses that are in the output have the words
# or phrases that hade a '+' in front of them.
found_set = and_set.union(found_set.intersection(and_set))
elif and_set:
# Found set must be empty to fill it with and_set's contents.
found_set.update(and_set)
# Finally remove all the verses that are in the not_set.
found_set.difference_update(not_set)
return found_set
def sword_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str='',
search_type='lucene'):
""" sword_search(self, search_terms, strongs=False, morph=False,
case_sensitive=False, range_str='', search_type=-4) ->
Use the sword module to search for the terms.
search_terms - Terms to search for.
strongs - Search for Strong's Number phrases.
morph - Search for Morphological Tag phrases.
case_sensitive - Perform a case sensitive search.
range_str - A verse range to limit the search to.
search_type - What search type to use.
"""
search_terms = ' '.join(search_terms)
info_print("Searching using the Sword library for "
"'%s'..." % search_terms, tag=1)
found_set = set()
search_type_dict = {
'regex': 0,
'phrase': -1,
'multiword': -2,
'entryattrib': -3, # (e.g. Word//Lemma//G1234)
'lucene': -4
}
try:
# Render the text as plain.
markup = Sword.MarkupFilterMgr(Sword.FMT_PLAIN)
# Don't own this or it will crash.
markup.thisown = False
mgr = Sword.SWMgr(markup)
# Load the module.
module = mgr.getModule(self._module_name)
# Set the search type based on the search_type argument.
search_type = search_type_dict.get(search_type.lower(), -4)
# Make sure we can search like this.
if not module.isSearchSupported(search_terms, search_type):
print("Search not supported", file=sys.stderr)
return found_set()
# Get the range key.
if not range_str:
range_str = 'Genesis-Revelation'
range_k = Sword.VerseKey().parseVerseList(range_str, 'Genesis 1:1',
True)
flags = re.I if not case_sensitive else 0
if strongs:
# Search for strongs numbers.
# I don't know how to search for morphological tags using
# Swords search function.
prefix = 'lemma:'
for term in ','.join(search_terms.split()).split(','):
if not term.startswith('lemma:'):
# Make the term start with lemma: so sword will find
# it.
term = '%s%s' % (prefix, term)
# Perform the search.
resource = module.doSearch(term, search_type, flags,
range_k)
# Get the list of references from the range text.
found_set.update(resource.getRangeText().split('; '))
else:
# Perform the search.
resource = module.doSearch(search_terms, search_type, flags,
range_k)
# Get the list of references from the range text.
found_set.update(resource.getRangeText().strip().split('; '))
except Exception as err:
print("There was a problem while searching: %s" % err,
file=sys.stderr)
found_set.discard('')
return found_set
@_process_search
def test_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
ref_list = sorted(ref_set, key=sort_key)
term_dict = defaultdict(list)
raw_dict = RawDict(iter(ref_list), self._module_name)
words_len = 0
for verse_ref, (verse_text, verse_dict) in raw_dict:
for term in search_terms.split():
if self._strongs_regx.match(term):
num = self._strongs_regx.sub('\\1', term)
words = set(verse_dict[num.upper()])
if words:
term_dict[num.upper()].append({verse_ref: words})
elif self._morph_regx.match(term):
tag = self._morph_regx.sub('\\1', term)
words = set(verse_dict[tag.upper()])
if words:
term_dict[tag.upper()].append({verse_ref: words})
else:
for key, value in verse_dict['_words'][0].items():
if ' %s ' % term.lower() in ' %s ' % key.lower():
attr_dict = value[0]
if strongs and 'strongs' in attr_dict:
attr_list = attr_dict['strongs']
attr_list.append(key)
term_dict[term].append({verse_ref: attr_list})
if morph and 'morph' in attr_dict:
attr_list = attr_dict['morph']
attr_list.append(key)
words_len = max(len(attr_list), words_len)
term_dict[term].append({verse_ref: attr_list})
len_longest_ref = len(max(ref_set, key=len))
for key, value in term_dict.items():
words_len = max([len(i) for d in value for i, v in d.items()])
print('%s:' % key)
for dic in value:
ref, words = tuple(dic.items())[0]
if isinstance(words, list):
w_str = '"%s"' % '", "'.join(words[:-1])
l_str = '"%s"' % words[-1]
words_str = '{0:{2}}: {1}'.format(w_str, l_str, words_len)
else:
words_str = '"%s"' % '", "'.join(words)
print('\t{0:{1}}: {2}'.format(ref, len_longest_ref, words_str))
#print('\t{0:{1}}: "{2}"'.format(ref, len_longest_ref,
# '", "'.join(words)))
exit()
@_process_search
def test2_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = IndexedVerseTextIter(ref_iter, strongs=True,
morph=morph, added=added,
module=self._module_name)
# This will skip words.
not_words_str = r'\b\w+\b'
# This will skip Strong's Numbers.
not_strongs_str = r'<[^>]*>'
# This wil skip Morphological Tags.
not_morph_str = r'\{[^\}]*\}'
# This will skip all punctuation. Skipping ()'s is a problem for
# searching Morphological Tags, but it is necessary for the
# parenthesized words. May break highlighting.
not_punct_str = r'[\s,\?\!\.;:\\/_\(\)\[\]"\'-]'
max_ref_len = len(max(ref_set, key=len))
found_set = set()
term_dict = defaultdict(list)
for verse_ref, verse_text in verse_iter:
for term in search_terms.split():
if self._strongs_regx.match(term):
test_regx = re.compile(r'''
\s
((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)
\s
((?:%s)+)
''' % term, re.I | re.X)
elif self._morph_regx.match(term):
test_regx = re.compile(r'''
\s((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)
(?:<[^>]*>|\s)+
((?:%s)+)
''' % term, re.I | re.X)
else:
test_regx = re.compile(r'''
((?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])*?
%s
(?:\b\w+\b|[\s,\?\!\.;:\\/_\(\)\[\]"\'-])+)+
((?:<[^>]*>|\{[^\}]*\}|\s)+)
''' % term, re.I | re.X)
for match in test_regx.finditer(verse_text):
phrase, num = match.groups()
phrase = phrase.strip(',').strip('.').strip()
phrase = phrase.strip(';').strip('?').strip(':').strip()
num = num.replace('<', '').replace('>', '')
num = num.replace('{', '').replace('}', '')
if not phrase or not num.strip():
if not strongs:
break
print(verse_ref, verse_text)
print(match.group(), match.groups())
exit()
num = '"%s"' % '", "'.join(num.split())
term_dict[term].append(
'\t{0:{1}}: {2:{4}}: "{3}"'.format(verse_ref,
max_ref_len,
num, phrase,
18)
)
for term, lst in term_dict.items():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
print('%s:\n%s' % (term, '\n'.join(lst)))
exit()
@_process_search
def test3_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
if not ref_set:
exit()
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = VerseTextIter(ref_iter, strongs=strongs,
morph=morph, render='raw',
module=self._module_name)
found_set = set()
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
tag_regx = re.compile(r'''
([^<]*) # Before tag.
<(?P<tag>q|w|transChange|note) # Tag name.
([^>]*)> # Tag attributes.
([\w\W]*?)</(?P=tag)> # Tag text and end.
([^<]*) # Between tags.
''', re.I | re.X)
divname_regx = re.compile(r'''
(?:<seg>)?
<(?:divineName)>+
([^<]*?)
([\'s]*)
</(?:divineName)>
(?:</seg>)?
''', re.I | re.X)
xadded_regx = re.compile(r'<seg subType="x-added"[^>]*>([^<]*)</seg>',
re.I)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
term_dict = defaultdict(list)
len_attrs = 0
for verse_ref, verse_text in verse_iter:
#print(render_raw(verse_text, strongs, morph))
#print(render_raw2(verse_text, strongs, morph))
#continue
for term in search_terms.split():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
v_text = ''
info_print('%s\n' % verse_text, tag=4)
term_regx = re.compile('\\b%s\\b' % term, re.I)
for match in tag_regx.finditer(verse_text):
opt, tag_name, tag_attr, tag_text, punct = match.groups()
tag_text = xadded_regx.sub('\\1', tag_text)
if match.re.search(tag_text):
match_list = match.re.findall(tag_text + punct)
else:
match_list = [match.groups()]
for tag_tup in match_list:
opt, tag_name, tag_attr, tag_text, punct = tag_tup
info_print(tag_tup, tag=4)
value_list = []
attr_list = []
strongs_list = []
morph_list = []
tag_text = divname_regx.sub(div_upper, tag_text)
v_text += marker_regx.sub('\\1 ', opt) + tag_text + \
punct
if term.upper() in tag_attr:
attr_list = [term.upper()]
elif term_regx.search(tag_text):
if strongs or not morph:
strongs_list = strong_regx.findall(tag_attr)
if morph:
morph_list = morph_regx.findall(tag_attr)
for lst in (strongs_list, morph_list, attr_list):
if lst:
attr_str = '%s"' % '", "'.join(lst)
value_list = [attr_str, tag_text.strip()]
term_dict[term].append({verse_ref: value_list})
len_attrs = max(len(attr_str), len_attrs)
info_print(v_text, tag=4)
max_len_ref = len(max(ref_set, key=len))
for term, lst in term_dict.items():
print('%s:' % term)
for dic in lst:
ref, (attrs, s) = list(dic.items())[0]
s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s)
print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l))
exit()
@_process_search
def test4_search(self, search_terms, strongs=False, morph=False,
added=True, case_sensitive=False, range_str=''):
""" A Test.
"""
ref_set = self._index_dict.value_union(search_terms.split(),
case_sensitive)
if range_str:
# Only search through the supplied range.
ref_set.intersection_update(range_str)
if not ref_set:
exit()
ref_iter = iter(sorted(ref_set, key=sort_key))
# Get an iterator that will return tuples
# (verse_reference, verse_text).
verse_iter = VerseTextIter(ref_iter, strongs=strongs,
morph=morph, render='raw',
module=self._module_name)
found_set = set()
strong_regx = re.compile(r'strong:([GH]\d+)', re.I)
morph_regx = re.compile(r'(?:Morph|robinson):([\w-]*)', re.I)
tag_regx = re.compile(r'''
([^<>]*) # Before tag.
<(?P<tag>seg|q|w|transChange|note|title)# Tag name.
([^>]*)> # Tag attributes.
([\w\W]*?)</(?P=tag)> # Tag text and end.
([^<]*) # Between tags.
''', re.I | re.X)
divname_regx = re.compile(r'''
<(?:divineName)>
([^<]*?)
([\'s]*)
</(?:divineName)>
''', re.I | re.X)
div_upper = lambda m: m.group(1).upper() + m.group(2)
marker_regx = re.compile(r'.*marker="(.)".*', re.I)
term_dict = defaultdict(list)
len_attrs = 0
def recurse_tag(text, term, verse_ref, ctag_attr=''):
""" Recursively parses raw verse text using regular expressions,
and a list of dictionaries of the search term and any attributes
with its text.
"""
term_list = []
for match in tag_regx.finditer(text):
value_list = []
attr_list = []
strongs_list = []
morph_list = []
opt, tag_name, tag_attr, tag_text, punct = match.groups()
if match.re.search(tag_text):
term_list.extend(recurse_tag(tag_text, term, verse_ref,
tag_attr))
else:
info_print((opt, tag_name, tag_attr, tag_text, punct),
tag=4)
if marker_regx.match(opt):
opt = ''
tag_text = opt + divname_regx.sub(div_upper,
tag_text) + punct
if term.upper() in tag_attr or term.upper() in ctag_attr:
attr_list = [term.upper()]
elif term_regx.search(tag_text):
if strongs or not morph:
strongs_list.extend(strong_regx.findall(tag_attr))
strongs_list.extend(strong_regx.findall(ctag_attr))
if morph:
morph_list.extend(morph_regx.findall(tag_attr))
morph_list.extend(morph_regx.findall(ctag_attr))
for lst in (strongs_list, morph_list, attr_list):
if lst:
a_str = '%s"' % '", "'.join(lst)
value_list = [a_str, tag_text.strip()]
term_list.append({verse_ref: value_list})
return term_list
for verse_ref, verse_text in verse_iter:
#print(render_raw(verse_text, strongs, morph))
#print(render_raw2(verse_text, strongs, morph))
#continue
for term in search_terms.split():
term = term.replace('<', '').replace('>', '')
term = term.replace('{', '').replace('}', '')
v_text = ''
info_print('%s\n' % verse_text, tag=4)
term_regx = re.compile('\\b%s\\b' % term, re.I)
value_list = recurse_tag(verse_text, term, verse_ref)
if value_list:
for i in value_list:
len_attrs = max(len(i[verse_ref][0]), len_attrs)
term_dict[term].extend(value_list)
max_len_ref = len(max(ref_set, key=len))
for term, lst in term_dict.items():
print('%s:' % term)
for dic in lst:
ref, (attrs, s) = list(dic.items())[0]
s_l = '{1:{0}}: "{2}'.format(len_attrs, attrs, s)
print('\t{0:{1}}: "{2}"'.format(ref, max_len_ref, s_l))
return set()
concordance_search = test4_search
class SearchCmd(Cmd):
""" A Command line interface for searching the Bible.
"""
def __init__(self, module='KJV'):
""" Initialize the settings.
"""
self.prompt = '\001[33m\002search\001[m\002> '
self.intro = '''
%s Copyright (C) 2011 Josiah Gordon <[email protected]>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
This is a Bible search program that searches the KJV
sword module. If you need help type 'help' to display a list of valid
commands. For help on a specific command type 'help <command>.'
Examples:
mixed 'jordan h03383' (Finds all verses with Strong's number 'H03383'
translated 'Jordan')
concordance live (Lists the references of all the verses with
the word 'live' in them, the Strong's number
that was used, and what the phrase is that
that Strong's number is translated as.)
concordance h02418 (Lists the references of all the verses with
the Strong's number 'H02418' and how it was
translated. It only occures six times and all
of them are in Daniel.)
strongs h02418 (Looks up and gives the definition of the
Strong's number 'H02418.')
set range gen-mal (Sets the range to the Old Testament.)
Just about everything has tab-completion, so you can hit tab a couple
of times to see all the completions to what you are typing.
If you want to see this intro again type: 'intro'
To find out more type 'help'
(example: 'help search' will list the help for the search command.)
To exit type 'quit' or hit 'CTRL+D'
''' % os.path.basename(argv[0])
super(SearchCmd, self).__init__()
self._quoted_regex = re.compile('''
((?P<quote>'|")
.*?
(?P=quote)|[^'"]*)
''', re.X)
# Perform the specified search.
self._search = Search(module=module)
self._results = set()
self._search_list = []
self._highlight_list = []
self._words = self._search._index_dict['_words_']
self._strongs = self._search._index_dict['_strongs_']
self._morph = self._search._index_dict['_morph_']
self._book_list = list(book_gen())
self._setting_dict = {
'search_type': 'mixed',
'search_strongs': False,
'search_morph': False,
'case_sensitive': False,
'context': 0,
'one_line': False,
'show_notes': False,
'show_strongs': False,
'show_morph': False,
'added': True,
'range': '',
'extras': (),
'module': module,
}
self._search_types = ['mixed', 'mixed_phrase', 'multiword', 'anyword',
'combined', 'partial_word', 'ordered_multiword',
'regex', 'eitheror', 'sword_lucene',
'sword_phrase', 'sword_multiword',
'sword_entryattrib']
def _complete(self, text, line, begidx, endidx, complete_list):
""" Return a list of matching text.
"""
retlist = [i for i in complete_list if i.startswith(text)]
if not retlist:
# If nothing was found try words that contain the text.
retlist = [i for i in complete_list if text in i]
if not retlist:
# Finally try matching misspelled words.
retlist = get_close_matches(text, complete_list, cutoff=0.7)
return retlist
def _get_list(self, args):
""" Split the args into quoted strings and seperate words.
"""
arg_list = []
# Split the arg string into quoted phrases and single words.
for i, c in self._quoted_regex.findall(args):
if c in ['"', "'"]:
arg_list.append(i.strip(c))
else:
arg_list.extend(i.split())
return arg_list
def do_test(self, args):
""" A Test.
"""
quoted_regex = re.compile('''((?P<quote>'|").*?(?P=quote)|[^'"]*)''')
print(quoted_regex.findall(args))
print(self._get_list(args))
def _print(self, text_iter):
""" Print all the text breaking it and screens so the user can read it
all.
"""
count = 0
for verse in text_iter:
count += len(verse.splitlines()) if '\n' in verse else 1
print(verse)
if count >= screen_size()[0] - 4:
count = 0
try:
input('[Press enter to see more, or CTRL+D to end.]')
print('[1A[K', end='')
except:
print('[G[K', end='')
break
def precmd(self, line):
""" Set the correct settings before running the line.
"""
if not line:
return line
cmd = line.split()[0]
if cmd in self._search_types:
search_type = cmd
if search_type.startswith('sword_'):
self._setting_dict['extras'] = (search_type[6:],)
search_type = search_type[:5]
else:
self._setting_dict['extras'] = ()
self._setting_dict['search_type'] = search_type
return line
def postcmd(self, stop, line):
""" If lookup was called then show the results.
"""
if not line:
return stop
cmd = line.split()[0]
if cmd == 'lookup':
self.onecmd('show_results')
return stop
def completedefault(self, text, line, begidx, endidx):
""" By default complete words in the Bible.
"""
words_list = self._words
return self._complete(text, line, begidx, endidx, words_list)
def do_shell(self, args):
""" Execute shell commands.
"""
os.system(args)
def do_concordance(self, args):
""" Perform a concordance like search.
"""
if not args:
return
arg_list = self._get_list(args)
# Search.
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
self._search.test4_search(arg_list, strongs_search, morph_search,
search_added, case_sensitive, search_range)
def do_show(self, args):
""" Show relevent parts of the GPL.
"""
if args.lower() in ['c', 'copying']:
# Show the conditions.
print(copying_str)
elif args.lower() in ['w', 'warranty']:
# Show the warranty.
print(warranty_str)
else:
# Show the entire license.
print('%s%s' % (copying_str, warranty_str))
def do_EOF(self, args):
""" Exit when eof is recieved.
"""
return True
def do_quit(self, args):
""" Exit.
"""
return True
def do_help(self, args):
""" Print the help.
"""
if args:
try:
self._print(getattr(self, 'do_%s' % args).__doc__.splitlines())
return
except:
pass
super(SearchCmd, self).do_help(args)
def do_intro(self, args):
""" Re-print the intro screen.
"""
self._print(self.intro.splitlines())
def complete_show_results(self, text, line, begidx, endidx):
""" Tab completion for the show_results command.
"""
cmd_list = ['strongs', 'morph', 'notes', 'one_line']
return self._complete(text, line, begidx, endidx, cmd_list)
def do_show_results(self, args):
""" Output the results.
Print out all the verses that were either found by searching or by
lookup.
Extra arguments:
+/-strongs - Enable/disable strongs in the output.
+/-morph - Enable/disable morphology in the output
+/-notes - Enable/disable foot notes in the output.
+/-added - Enable/disable added text in the output.
+/-one_line - Enable/disable one line output.
anything else - If the output is from looking up verses with
the lookup command, then any other words or
quoted phrases given as arguments will be
highlighted in the output.
"""
search_type = self._setting_dict['search_type']
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
module_name = self._setting_dict['module']
highlight_list = self._highlight_list
kwargs = self._setting_dict
results = self._results
# Get the output arguments.
show_strongs = self._setting_dict['show_strongs'] or strongs_search
show_morph = self._setting_dict['show_morph'] or morph_search
show_notes = self._setting_dict['show_notes']
one_line = self._setting_dict['one_line']
arg_list = self._get_list(args)
if '+strongs' in arg_list:
show_strongs = True
arg_list.remove('+strongs')
if '+morph' in args:
show_morph = True
arg_list.remove('+morph')
if '-strongs' in args:
show_strongs = False
arg_list.remove('-strongs')
if '-morph' in args:
show_strongs = False
arg_list.remove('-morph')
if '+notes' in args:
show_notes = True
arg_list.remove('+notes')
if '-notes' in args:
show_notes = False
arg_list.remove('-notes')
if '+one_line' in args:
one_line = True
arg_list.remove('+one_line')
if '-one_line' in args:
one_line = False
arg_list.remove('-one_line')
if '+added' in args:
search_added = True
arg_list.remove('+added')
if '-added' in args:
search_added = False
arg_list.remove('-added')
if search_range:
results.intersection_update(parse_verse_range(search_range))
if not highlight_list:
# Highlight anything else the user typed in.
highlight_list = arg_list
# Don't modify regular expression searches.
if search_type != 'regex':
regx_list = build_highlight_regx(highlight_list, case_sensitive,
(search_type == 'ordered_multiword'))
if kwargs['context']:
regx_list.extend(build_highlight_regx(results, case_sensitive))
else:
arg_str = ' '.join(arg_list)
regx_list = [re.compile(arg_str, re.I if case_sensitive else 0)]
# Flags for the highlight string.
flags = re.I if not case_sensitive else 0
# Add the specified number of verses before and after to provide
# context.
context_results = sorted(add_context(results, kwargs['context']),
key=sort_key)
# Get a formated verse string generator.
verse_gen = render_verses_with_italics(context_results,
not one_line,
show_strongs, show_morph,
search_added,
show_notes,
highlight_search_terms,
module_name, regx_list,
highlight_text, flags)
if one_line:
# Print it all on one line.
print(' '.join(verse_gen))
else:
# Print the verses on seperate lines.
self._print(verse_gen)
#print('\n'.join(verse_gen))
def complete_lookup(self, text, line, begidx, endidx):
""" Try to complete Verse references.
"""
name_list = self._book_list
text = text.capitalize()
return self._complete(text, line, begidx, endidx, name_list)
def do_lookup(self, args):
""" Lookup the verses by references.
Example: lookup gen1:3-5;mal3 (Look up Genesis chapter 1 verses
3-5 and Malachi chapter 3.)
"""
self._results = parse_verse_range(args)
self._highlight_list = []
def complete_strongs(self, text, line, begidx, endidx):
""" Tabe complete Strong's numbers.
"""
text = text.capitalize()
return self._complete(text, line, begidx, endidx, self._strongs)
def do_strongs(self, numbers):
""" Lookup one or more Strong's Numbers.
strongs number,number,number....
"""
# Lookup all the Strong's Numbers in the argument list.
# Make all the numbers seperated by a comma.
strongs_list = ','.join(numbers.upper().split()).split(',')
#TODO: Find what Strong's Modules are available and use the best,
# or let the user decide.
greek_strongs_lookup = Lookup('StrongsRealGreek')
hebrew_strongs_lookup = Lookup('StrongsRealHebrew')
for strongs_num in strongs_list:
# Greek Strong's Numbers start with a 'G' and Hebrew ones start
# with an 'H.'
if strongs_num.upper().startswith('G'):
mod_name = 'StrongsRealGreek'
else:
mod_name = 'StrongsRealHebrew'
print('%s\n' % mod_lookup(mod_name, strongs_num[1:]))
def complete_morph(self, text, line, begidx, endidx):
""" Tabe complete Morphological Tags.
"""
text = text.capitalize()
return self._complete(text, line, begidx, endidx, self._morph)
def do_morph(self, tags):
""" Lookup one or more Morphological Tags.
morph tag,tag,tag....
"""
# Lookup all the Morphological Tags in the argument list.
# I don't know how to lookup Hebrew morphological tags, so I
# only lookup Greek ones in 'Robinson.'
print('%s\n' % mod_lookup('Robinson', tags.upper()))
def do_websters(self, words):
""" Lookup one or more words in Websters Dictionary.
websters word,word,word...
"""
# Lookup words in the dictionary.
print('%s\n' % mod_lookup('WebstersDict', words))
def do_kjvd(self, words):
""" Lookup one or more words in the KJV Dictionary.
kjvd word,word,word...
"""
# Lookup words in the KJV dictionary.
print('%s\n' % mod_lookup('KJVD', words))
def do_daily(self, daily):
""" Display a daily devotional from 'Bagsters Daily light.'
daily date/today
Dates are given in the format Month.Day. The word 'today' is an alias
to today's date. The default is to lookup today's devotional.
"""
daily = 'today' if not daily else daily
# Lookup the specified daily devotional.
if daily.lower() == 'today':
# Today is an alias for today's date.
daily = strftime('%m.%d')
daily_lookup = Lookup('Daily')
# Try to make the output nicer.
print(daily_lookup.get_formatted_text(daily))
def complete_set(self, text, line, begidx, endidx):
""" Complete setting options.
"""
setting_list = self._setting_dict.keys()
return self._complete(text, line, begidx, endidx, setting_list)
def do_set(self, args):
""" Set settings.
Run without arguments to see the current settings.
set show_strongs = True/False - Enable strongs numbers in the
output.
set show_morph = True/False - Enable morphology in the output.
set context = <number> - Show <number> verses of context.
set case_sensitive = True/False - Set the search to case sensitive.
set range = <range> - Confine search/output to <range>.
set one_line = True/False - Don't break output at verses.
set added = True/False - Show/search added text.
set show_notes = True/False - Show foot-notes in output.
set search_type = <type> - Use <type> for searching.
set search_strongs = True/False - Search Strong's numbers
(deprecated).
set search_morph = True/False - Search Morphological Tags
(deprecated).
"""
if not args:
print("Current settings:\n")
max_len = len(max(self._setting_dict.keys(), key=len))
for setting, value in self._setting_dict.items():
if setting.lower() == 'range':
if not Sword:
value = VerseRange.parse_range(value)
value = '; '.join(str(i) for i in value)
else:
key = Sword.VerseKey()
range_list = key.parseVerseList(value, 'Genesis 1:1',
True, False)
value = range_list.getRangeText()
print('{1:{0}} = {2}'.format(max_len, setting, value))
print()
else:
for setting in args.split(';'):
if '=' in setting:
k, v = setting.split('=')
elif ' ' in setting:
k, v = setting.split()
else:
print(self._setting_dict.get(setting, ''))
continue
k = k.strip()
v = v.strip()
if isinstance(v, str):
if v.lower() == 'false':
v = False
elif v.lower() == 'true':
v = True
elif v.isdigit():
v = int(v)
self._setting_dict[k] = v
def complete_search(self, text, line, begidx, endidx):
""" Bible word completion to make searching easier.
"""
words_list = self._words
return self._complete(text, line, begidx, endidx, words_list)
complete_mixed = complete_search
complete_mixed_phrase = complete_search
complete_multiword = complete_search
complete_anyword = complete_search
complete_combined = complete_search
complete_partial_word = complete_search
complete_ordered_multiword = complete_search
complete_regex = complete_search
complete_eitheror = complete_search
complete_sword_lucene = complete_search
complete_sword_phrase = complete_search
complete_sword_multiword = complete_search
complete_sword_entryattrib = complete_search
def do_search(self, args):
""" Search the Bible.
Search types are:
mixed - A search made up of a mix of most of the
other search types. Put an '!' in front of
words/phrases that you don't want in any of
the results.
mixed_phrase - A phrase search that can include words,
Strong's, and Morphology. Can be used in
the mixed search by including words in
quotes.
multiword - Search for verses containing each word at
least once. Use in the mixed search by
putting a '+' in front of any word/phrase
you want to be in all the results.
anyword - Search for verses containing one or more of
any of the words. Use in the mixed search
by putting a '|' in front of any
word/phrase you want in any but not
necessarily all the results.
eitheror - Search for verses containing one and only
one of the words. In the mixed search put
a '^' in front of two or more words/phrases
to make the results contain one and only
one of the marked search terms.
combined - Search using a phrase like ('in' AND ('the'
OR 'it')) finding verses that have both
'in' and 'the' or both 'in' and 'it'.
To do the same thing with the mixed search
use a phrase like this:
(mixed '+in' '^the' '^it').
partial_word - Search for partial words (e.g. a search for
'begin*' would find all the words starting
with 'begin'.) Use in the mixed search to
make partial words in a phrase.
ordered_multiword - Search for words in order, but not
necessarily in a phrase. In the mixed
search put a '~' in front of any quoted
group of words you want to be in that
order, but you don't mind if they have
other words between them.
regex - A regular expression search (slow).
Examples:
mixed - (mixed '+~in the beg*' '!was') finds any
verse that has the words 'in', 'the', and
any word starting with 'beg', in order, but
not the word 'was.'
mixed_phrase - (mixed_phrase 'h011121 of gomer') finds any
verse with that phrase.
mixed search flags first column prefix (these should come first):
----------------------------------------------------------------
! = not (not in any of the results)
+ = all (in all the results)
| = or (in at least one result)
^ = exclusive or (only one in any of the results)
not example: (mixed 'in the beginning' !was) results will have the
phrase 'in the beginning' but will not have the word
'was.'
all example: (mixed 'in the beginning' +was) results may have the
phrase 'in the beginning' but all of them will have
the word 'was.' (note. this will find all verses with
the word 'was' in them if you want it to have the
phrase 'in the beginning' also you have to prefix it
with a '+' aswell)
or example: (mixed 'in the beginning' |was) results will be all the
verses with the phrase 'in the beginning' and all the
verses with the word 'was.' This is the default way
the mixed search operates, so the '|' can be excluded
in this case.
exclusive or example: (mixed '^in the beginning' '^was') results
will either have the phrase 'in the
beginning' or the word 'was', but not both.
To be effective you must have at least two
search terms prefixed with '^.'
mixed search flags second column prefix (these come after the first
column flags):
-------------------------------------------------------------------
~ = sloppy phrase or ordered multiword
& = regular expression search.
sloppy phrase example: (mixed '~in the beginning') results will
have all the words 'in', 'the', and
'beginning,' but they may have other words
between them.
regular expression example:
(mixed '&\\b[iI]n\\b\s+\\b[tT[Hh][eE]\\b\s+\\b[bB]eginning\\b')
results will be all the verses with the phrase 'in the beginning.'
"""
if not args:
return
arg_list = self._get_list(args)
arg_str = ' '.join(arg_list)
self._search_list = arg_list
extras = self._setting_dict['extras']
search_type = self._setting_dict['search_type']
try:
# Get the search function asked for.
search_func = getattr(self._search, '%s_search' % search_type)
except AttributeError as err:
# An invalid search type was specified.
print("Invalid search type: %s" % search_type, file=sys.stderr)
exit()
# Search.
strongs_search = self._setting_dict['search_strongs']
morph_search = self._setting_dict['search_morph']
search_range = self._setting_dict['range']
case_sensitive = self._setting_dict['case_sensitive']
search_added = self._setting_dict['added']
self._results = search_func(arg_list, strongs_search, morph_search,
search_added, case_sensitive, search_range,
*extras)
count = len(self._results)
info_print("\nFound %s verse%s.\n" % \
(count, 's' if count != 1 else ''),
tag=-10)
print("To view the verses type 'show_results.'")
if search_type in ['combined', 'combined_phrase']:
# Combined searches are complicated.
# Parse the search argument and build a highlight string from the
# result.
arg_parser = CombinedParse(arg_str)
parsed_args = arg_parser.word_list
not_l = arg_parser.not_list
# Remove any stray '+'s.
#highlight_str = highlight_str.replace('|+', ' ')
if search_type == 'combined_phrase':
# A phrase search needs to highlight phrases.
highlight_list = parsed_args
else:
highlight_list = ' '.join(parsed_args).split()
# Build the highlight string for the other searches.
elif search_type in ['anyword', 'multiword', 'eitheror',
'partial_word']:
# Highlight each word separately.
highlight_list = arg_str.split()
elif search_type == 'mixed':
# In mixed search phrases are in quotes so the arg_list should be
# what we want, but don't include any !'ed words.
highlight_list = [i for i in arg_list if not i.startswith('!')]
elif search_type in ['phrase', 'mixed_phrase', 'ordered_multiword']:
# Phrases should highlight phrases.
highlight_list = [arg_str]
elif search_type == 'sword':
highlight_list = arg_list
self._highlight_list = highlight_list
do_mixed = do_search
do_mixed_phrase = do_search
do_multiword = do_search
do_anyword = do_search
do_combined = do_search
do_partial_word = do_search
do_ordered_multiword = do_search
do_regex = do_search
do_eitheror = do_search
do_sword_lucene = do_search
do_sword_phrase = do_search
do_sword_multiword = do_search
do_sword_entryattrib = do_search
| zepto/biblesearch.web | sword_search.old/search.py | Python | gpl-3.0 | 146,124 |
# -*- coding: utf-8 -*-
from .common_settings import *
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dz(#w(lfve24ck!!yrt3l7$jfdoj+fgf+ru@w)!^gn9aq$s+&y'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
| Mustapha90/IV16-17 | tango_with_django_project/dev_settings.py | Python | gpl-3.0 | 375 |
#!/usr/bin/python
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for generating CTS test descriptions and test plans."""
import glob
import os
import re
import shutil
import subprocess
import sys
import xml.dom.minidom as dom
from cts import tools
from multiprocessing import Pool
def GetSubDirectories(root):
"""Return all directories under the given root directory."""
return [x for x in os.listdir(root) if os.path.isdir(os.path.join(root, x))]
def GetMakeFileVars(makefile_path):
"""Extracts variable definitions from the given make file.
Args:
makefile_path: Path to the make file.
Returns:
A dictionary mapping variable names to their assigned value.
"""
result = {}
pattern = re.compile(r'^\s*([^:#=\s]+)\s*:=\s*(.*?[^\\])$', re.MULTILINE + re.DOTALL)
stream = open(makefile_path, 'r')
content = stream.read()
for match in pattern.finditer(content):
result[match.group(1)] = match.group(2)
stream.close()
return result
class CtsBuilder(object):
"""Main class for generating test descriptions and test plans."""
def __init__(self, argv):
"""Initialize the CtsBuilder from command line arguments."""
if len(argv) != 6:
print 'Usage: %s <testRoot> <ctsOutputDir> <tempDir> <androidRootDir> <docletPath>' % argv[0]
print ''
print 'testRoot: Directory under which to search for CTS tests.'
print 'ctsOutputDir: Directory in which the CTS repository should be created.'
print 'tempDir: Directory to use for storing temporary files.'
print 'androidRootDir: Root directory of the Android source tree.'
print 'docletPath: Class path where the DescriptionGenerator doclet can be found.'
sys.exit(1)
self.test_root = sys.argv[1]
self.out_dir = sys.argv[2]
self.temp_dir = sys.argv[3]
self.android_root = sys.argv[4]
self.doclet_path = sys.argv[5]
self.test_repository = os.path.join(self.out_dir, 'repository/testcases')
self.plan_repository = os.path.join(self.out_dir, 'repository/plans')
self.definedplans_repository = os.path.join(self.android_root, 'cts/tests/plans')
def GenerateTestDescriptions(self):
"""Generate test descriptions for all packages."""
pool = Pool(processes=2)
# generate test descriptions for android tests
results = []
pool.close()
pool.join()
return sum(map(lambda result: result.get(), results))
def __WritePlan(self, plan, plan_name):
print 'Generating test plan %s' % plan_name
plan.Write(os.path.join(self.plan_repository, plan_name + '.xml'))
def GenerateTestPlans(self):
"""Generate default test plans."""
# TODO: Instead of hard-coding the plans here, use a configuration file,
# such as test_defs.xml
packages = []
descriptions = sorted(glob.glob(os.path.join(self.test_repository, '*.xml')))
for description in descriptions:
doc = tools.XmlFile(description)
packages.append(doc.GetAttr('TestPackage', 'appPackageName'))
# sort the list to give the same sequence based on name
packages.sort()
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
self.__WritePlan(plan, 'CTS')
self.__WritePlan(plan, 'CTS-TF')
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
plan.Exclude('android\.media\.cts\.StreamingMediaPlayerTest.*')
# Test plan to not include media streaming tests
self.__WritePlan(plan, 'CTS-No-Media-Stream')
plan = tools.TestPlan(packages)
plan.Exclude('android\.performance.*')
self.__WritePlan(plan, 'SDK')
plan.Exclude(r'android\.signature')
plan.Exclude(r'android\.core.*')
self.__WritePlan(plan, 'Android')
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.tests.*')
plan.Exclude(r'android\.core\.tests\.libcore.\package.\harmony*')
self.__WritePlan(plan, 'Java')
# TODO: remove this once the tests are fixed and merged into Java plan above.
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.tests\.libcore.\package.\harmony*')
self.__WritePlan(plan, 'Harmony')
plan = tools.TestPlan(packages)
plan.Include(r'android\.core\.vm-tests-tf')
self.__WritePlan(plan, 'VM-TF')
plan = tools.TestPlan(packages)
plan.Include(r'android\.tests\.appsecurity')
self.__WritePlan(plan, 'AppSecurity')
# hard-coded white list for PDK plan
plan.Exclude('.*')
plan.Include('android\.aadb')
plan.Include('android\.bluetooth')
plan.Include('android\.graphics.*')
plan.Include('android\.hardware')
plan.Include('android\.media')
plan.Exclude('android\.mediastress')
plan.Include('android\.net')
plan.Include('android\.opengl.*')
plan.Include('android\.renderscript')
plan.Include('android\.telephony')
plan.Include('android\.nativemedia.*')
plan.Include('com\.android\.cts\..*')#TODO(stuartscott): Should PDK have all these?
self.__WritePlan(plan, 'PDK')
flaky_tests = BuildCtsFlakyTestList()
# CTS Stable plan
plan = tools.TestPlan(packages)
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-stable')
# CTS Flaky plan - list of tests known to be flaky in lab environment
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.Include(package+'$')
plan.IncludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-flaky')
small_tests = BuildAospSmallSizeTestList()
medium_tests = BuildAospMediumSizeTestList()
new_test_packages = BuildCtsVettedNewPackagesList()
# CTS - sub plan for public, small size tests
plan = tools.TestPlan(packages)
plan.Exclude('.*')
for package, test_list in small_tests.iteritems():
plan.Include(package+'$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-kitkat-small')
# CTS - sub plan for public, medium size tests
plan = tools.TestPlan(packages)
plan.Exclude('.*')
for package, test_list in medium_tests.iteritems():
plan.Include(package+'$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-kitkat-medium')
# CTS - sub plan for hardware tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.hardware$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-hardware')
# CTS - sub plan for media tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.media$')
plan.Include(r'android\.view$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-media')
# CTS - sub plan for mediastress tests which is public, large
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.mediastress$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-mediastress')
# CTS - sub plan for new tests that is vetted for L launch
plan = tools.TestPlan(packages)
plan.Exclude('.*')
for package, test_list in new_test_packages.iteritems():
plan.Include(package+'$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-l-tests')
#CTS - sub plan for new test packages added for staging
plan = tools.TestPlan(packages)
for package, test_list in small_tests.iteritems():
plan.Exclude(package+'$')
for package, test_list in medium_tests.iteritems():
plan.Exclude(package+'$')
for package, tests_list in new_test_packages.iteritems():
plan.Exclude(package+'$')
plan.Exclude(r'android\.hardware$')
plan.Exclude(r'android\.media$')
plan.Exclude(r'android\.view$')
plan.Exclude(r'android\.mediastress$')
plan.Exclude(r'com\.android\.cts\.browserbench')
for package, test_list in flaky_tests.iteritems():
plan.ExcludeTests(package, test_list)
self.__WritePlan(plan, 'CTS-staging')
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'com\.drawelements\.')
self.__WritePlan(plan, 'CTS-DEQP')
plan = tools.TestPlan(packages)
plan.Exclude('.*')
plan.Include(r'android\.webgl')
self.__WritePlan(plan, 'CTS-webview')
def BuildAospMediumSizeTestList():
""" Construct a defaultdic that lists package names of medium tests
already published to aosp. """
return {
'android.app' : [],
'android.core.tests.libcore.package.libcore' : [],
'android.core.tests.libcore.package.org' : [],
'android.core.vm-tests-tf' : [],
'android.dpi' : [],
'android.host.security' : [],
'android.net' : [],
'android.os' : [],
'android.permission2' : [],
'android.security' : [],
'android.telephony' : [],
'android.webkit' : [],
'android.widget' : [],
'com.android.cts.browserbench' : []}
def BuildAospSmallSizeTestList():
""" Construct a defaultdict that lists packages names of small tests
already published to aosp. """
return {
'android.aadb' : [],
'android.acceleration' : [],
'android.accessibility' : [],
'android.accessibilityservice' : [],
'android.accounts' : [],
'android.admin' : [],
'android.animation' : [],
'android.bionic' : [],
'android.bluetooth' : [],
'android.calendarcommon' : [],
'android.content' : [],
'android.core.tests.libcore.package.com' : [],
'android.core.tests.libcore.package.conscrypt' : [],
'android.core.tests.libcore.package.dalvik' : [],
'android.core.tests.libcore.package.sun' : [],
'android.core.tests.libcore.package.tests' : [],
'android.database' : [],
'android.dreams' : [],
'android.drm' : [],
'android.effect' : [],
'android.gesture' : [],
'android.graphics' : [],
'android.graphics2' : [],
'android.jni' : [],
'android.keystore' : [],
'android.location' : [],
'android.nativemedia.sl' : [],
'android.nativemedia.xa' : [],
'android.nativeopengl' : [],
'android.ndef' : [],
'android.opengl' : [],
'android.openglperf' : [],
'android.permission' : [],
'android.preference' : [],
'android.preference2' : [],
'android.provider' : [],
'android.renderscript' : [],
'android.rscpp' : [],
'android.rsg' : [],
'android.sax' : [],
'android.signature' : [],
'android.speech' : [],
'android.tests.appsecurity' : [],
'android.text' : [],
'android.textureview' : [],
'android.theme' : [],
'android.usb' : [],
'android.util' : [],
'com.android.cts.dram' : [],
'com.android.cts.filesystemperf' : [],
'com.android.cts.jank' : [],
'com.android.cts.opengl' : [],
'com.android.cts.simplecpu' : [],
'com.android.cts.ui' : [],
'com.android.cts.uihost' : [],
'com.android.cts.videoperf' : [],
'zzz.android.monkey' : []}
def BuildCtsVettedNewPackagesList():
""" Construct a defaultdict that maps package names that is vetted for L. """
return {
'android.JobScheduler' : [],
'android.core.tests.libcore.package.harmony_annotation' : [],
'android.core.tests.libcore.package.harmony_beans' : [],
'android.core.tests.libcore.package.harmony_java_io' : [],
'android.core.tests.libcore.package.harmony_java_lang' : [],
'android.core.tests.libcore.package.harmony_java_math' : [],
'android.core.tests.libcore.package.harmony_java_net' : [],
'android.core.tests.libcore.package.harmony_java_nio' : [],
'android.core.tests.libcore.package.harmony_java_util' : [],
'android.core.tests.libcore.package.harmony_java_text' : [],
'android.core.tests.libcore.package.harmony_javax_security' : [],
'android.core.tests.libcore.package.harmony_logging' : [],
'android.core.tests.libcore.package.harmony_prefs' : [],
'android.core.tests.libcore.package.harmony_sql' : [],
'android.core.tests.libcore.package.jsr166' : [],
'android.core.tests.libcore.package.okhttp' : [],
'android.display' : [],
'android.host.theme' : [],
'android.jdwp' : [],
'android.location2' : [],
'android.print' : [],
'android.renderscriptlegacy' : [],
'android.signature' : [],
'android.tv' : [],
'android.uiautomation' : [],
'android.uirendering' : [],
'android.webgl' : [],
'com.drawelements.deqp.gles3' : [],
'com.drawelements.deqp.gles31' : []}
def BuildCtsFlakyTestList():
""" Construct a defaultdict that maps package name to a list of tests
that are known to be flaky in the lab or not passing on userdebug builds. """
return {
'android.app' : [
'cts.ActivityManagerTest#testIsRunningInTestHarness',],
'android.dpi' : [
'cts.DefaultManifestAttributesSdkTest#testPackageHasExpectedSdkVersion',],
'android.hardware' : [
'cts.CameraTest#testVideoSnapshot',
'cts.CameraGLTest#testCameraToSurfaceTextureMetadata',
'cts.CameraGLTest#testSetPreviewTextureBothCallbacks',
'cts.CameraGLTest#testSetPreviewTexturePreviewCallback',],
'android.media' : [
'cts.DecoderTest#testCodecResetsH264WithSurface',
'cts.StreamingMediaPlayerTest#testHLS',],
'android.net' : [
'cts.ConnectivityManagerTest#testStartUsingNetworkFeature_enableHipri',
'cts.DnsTest#testDnsWorks',
'cts.SSLCertificateSocketFactoryTest#testCreateSocket',
'cts.SSLCertificateSocketFactoryTest#test_createSocket_bind',
'cts.SSLCertificateSocketFactoryTest#test_createSocket_simple',
'cts.SSLCertificateSocketFactoryTest#test_createSocket_wrapping',
'cts.TrafficStatsTest#testTrafficStatsForLocalhost',
'wifi.cts.NsdManagerTest#testAndroidTestCaseSetupProperly',],
'android.os' : [
'cts.BuildVersionTest#testReleaseVersion',
'cts.BuildTest#testIsSecureUserBuild',],
'android.security' : [
'cts.BannedFilesTest#testNoSu',
'cts.BannedFilesTest#testNoSuInPath',
'cts.ListeningPortsTest#testNoRemotelyAccessibleListeningUdp6Ports',
'cts.ListeningPortsTest#testNoRemotelyAccessibleListeningUdpPorts',
'cts.PackageSignatureTest#testPackageSignatures',
'cts.SELinuxDomainTest#testSuDomain',
'cts.SELinuxHostTest#testAllEnforcing',],
'android.webkit' : [
'cts.WebViewClientTest#testOnUnhandledKeyEvent',],
'com.android.cts.filesystemperf' : [
'RandomRWTest#testRandomRead',
'RandomRWTest#testRandomUpdate',],
'' : []}
def LogGenerateDescription(name):
print 'Generating test description for package %s' % name
if __name__ == '__main__':
builder = CtsBuilder(sys.argv)
result = builder.GenerateTestDescriptions()
if result != 0:
sys.exit(result)
builder.GenerateTestPlans()
| s20121035/rk3288_android5.1_repo | cts/tools/utils/buildCts.py | Python | gpl-3.0 | 16,411 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
'''
# This file is part of Matching Pursuit Python program (python-MP).
#
# python-MP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# python-MP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with python-MP. If not, see <http://www.gnu.org/licenses/>.
author: Tomasz Spustek
e-mail: [email protected]
University of Warsaw, July 06, 2015
'''
import numpy as np
import scipy.stats as scp
import matplotlib.pyplot as plt
from scipy.io import loadmat
from src.dictionary import tukey
def generateTestSignal(gaborParams , sinusParams , asymetricWaveformsAParams , rectangularWaveformsAParams , numberOfSamples , samplingFrequency , noiseRatio , silenceFlag = 1):
'''
gaborParams - numpy array (as for gaborFunction) or None
sinusParams - numpy array of amplitude-frequency-phase trios or None
asymetricWaveformsA - numpy array of ...
rectangularWaveformsAParams - ...
noiseRatio - float (0 - 1)
'''
time = np.arange(0,numberOfSamples)
signal = np.squeeze(np.zeros((numberOfSamples,1)))
ind1 = 0
if gaborParams is not None:
for gabor in gaborParams:
(tmp,time) = gaborFunction(gabor)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} gabors generated'.format(ind1)
ind1 = 0
if sinusParams is not None:
for param in sinusParams:
freq = (param[1] / (0.5*samplingFrequency) ) * np.pi
signal += np.array(param[0] * np.sin(freq * time + param[2]))
ind1 += 1
if silenceFlag == 0:
print '{} sinusoids generated'.format(ind1)
ind1 = 0
if asymetricWaveformsAParams is not None:
for asym in asymetricWaveformsAParams:
amplitude = asym[0]
freq = (asym[1] / (0.5*samplingFrequency) ) * np.pi
pos = asym[2]
sigma = asym[3]
asymetry = asym[4]
x = np.linspace(scp.lognorm.ppf(0.0001, asymetry),scp.lognorm.ppf(0.9999, asymetry), sigma)
envelope = scp.lognorm.pdf(x, asymetry)
tmp = np.squeeze(np.zeros((numberOfSamples,1)))
tmp[pos:pos+sigma] = amplitude * envelope
tmp = tmp * np.cos(freq * time)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} asymmetrical waveforms generated'.format(ind1)
ind1 = 0
if rectangularWaveformsAParams is not None:
for rect in rectangularWaveformsAParams:
amplitude = rect[0]
freq = (rect[1] / (0.5*samplingFrequency) ) * np.pi
pos = rect[2]
sigma = rect[3]
r = rect[4]
envelope = tukey(sigma, r)
tmp = np.squeeze(np.zeros((numberOfSamples,1)))
tmp[pos:pos+sigma] = amplitude * envelope
tmp = tmp * np.cos(freq * time)
signal += tmp
ind1 += 1
if silenceFlag == 0:
print '{} rectangular waveforms generated'.format(ind1)
return (signal , time)
def gaborFunction(params):
'''
params:numpy Array containing:
numberOfSamples in [samples]
samplingFreq in [Hz]
atomFreq in [Hz]
width in [s]
position in [s]
amplitude in [au]
phase in [rad]
'''
numberOfSamples = params[0]
samplingFreq = params[1]
amplitude = params[2]
position = params[3] * samplingFreq
width = params[4] * samplingFreq
frequency = (params[5] / (0.5*samplingFreq) ) * np.pi
phase = params[6]
time = np.arange(0,numberOfSamples)
signal = np.array(amplitude * np.exp(-np.pi*((time-position)/width)**2) * np.cos(frequency*(time-position)+phase))
return (signal , time)
def simpleValues():
numberOfSamples = 1000
samplingFreq = 250.0
amplitude = 12.0
position1 = 3.0
position2 = 1.0
width = 0.5
frequency1 = 12.0
frequency2 = 15.0
phase = 0.0
gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude,position1,width,frequency1,phase],[numberOfSamples,samplingFreq,amplitude,position2,width,frequency2,phase]])
sinusParams = np.array([[5.0,5.0,0.0]])
noiseRatio = 0.0
return (gaborParams , sinusParams , None , None , noiseRatio , samplingFreq , numberOfSamples)
def advancedValues():
numberOfSamples = 1000
samplingFreq = 250.0
amplitude1 = 12
amplitude2 = 20
freq1 = 10.0
freq2 = 20.0
pos1 = 250
pos2 = 500
sigma = 500
asymetry = 0.45
asymetricParams = np.array([[amplitude1,freq1,pos1,sigma,asymetry],[amplitude2,freq2,pos2,sigma,asymetry]])
sinusParams = np.array([[2.0,5.0,0.0]])
noiseRatio = 0.0
return(None , sinusParams , asymetricParams , None , noiseRatio , samplingFreq , numberOfSamples)
def masterValues():
numberOfSamples = 2000
samplingFreq = 250.0
amplitude1 = 15
amplitude2 = 20
amplitude3 = 10
freq1 = 5.0
freq2 = 10.0
freq3 = 15.0
pos1 = 2.0
pos2 = 1000
pos3 = 1500
sigma1 = 0.5
sigma2 = 500
sigma3 = 300
asymetry = 0.45
rectangularity = 0.25
gaborParams = np.array([[numberOfSamples,samplingFreq,amplitude1,pos1,sigma1,freq1,0]])
asymetricParams = np.array([[amplitude2,freq2,pos2,sigma2,asymetry]])
rectParams = np.array([[amplitude3,freq3,pos3,sigma3,rectangularity]])
sinusParams = np.array([[2.0,5.0,0.0]])
noiseRatio = 0.0
return(gaborParams , sinusParams , asymetricParams , rectParams , noiseRatio , samplingFreq , numberOfSamples)
def loadSyntheticSigmalFromEEGLABFile(nameOfFile):
structure = loadmat(nameOfFile)
data = structure['EEG']['data'][0][0]
data = data.transpose([2,0,1])
info = {}
info['samplingFreq'] = structure['EEG']['srate'][0][0][0][0]
info['numberOfChannels'] = structure['EEG']['nbchan'][0][0][0][0]
info['numberOfSamples'] = structure['EEG']['pnts'][0][0][0][0]
info['numberOfSeconds'] = structure['EEG']['pnts'][0][0][0][0] / info['samplingFreq']
info['numberOfTrials'] = structure['EEG']['trials'][0][0][0][0]
# print structure['EEG']['chanlocs'][0][0][0,2]
time = np.arange(0 , info['numberOfSeconds'] , 1./info['samplingFreq'])
return (data , time , info) | tspus/python-matchingPursuit | data/signalGenerator.py | Python | gpl-3.0 | 6,621 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2007-2011 Tualatrix Chou <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import glob
import logging
import ConfigParser
from lxml import etree
log = logging.getLogger('CommonSetting')
class RawConfigSetting(object):
'''Just pass the file path'''
def __init__(self, path, type=type):
self._type = type
self._path = path
self.init_configparser()
def _type_convert_set(self, value):
if type(value) == bool:
if value == True:
value = 'true'
elif value == False:
value = 'false'
# This is a hard code str type, so return '"xxx"' instead of 'xxx'
if self._type == str:
value = "'%s'" % value
return value
def _type_convert_get(self, value):
if value == 'false':
value = False
elif value == 'true':
value = True
# This is a hard code str type, so return '"xxx"' instead of 'xxx'
if self._type == str or type(value) == str:
if (value.startswith('"') and value.endswith('"')) or \
(value.startswith("'") and value.endswith("'")):
value = eval(value)
return value
def init_configparser(self):
self._configparser = ConfigParser.ConfigParser()
self._configparser.read(self._path)
def sections(self):
return self._configparser.sections()
def options(self, section):
return self._configparser.options(section)
def set_value(self, section, option, value):
value = self._type_convert_set(value)
if not self._configparser.has_section(section):
self._configparser.add_section(section)
self._configparser.set(section, option, value)
with open(self._path, 'wb') as configfile:
self._configparser.write(configfile)
self.init_configparser()
def get_value(self, section, option):
if self._type:
if self._type == int:
getfunc = getattr(self._configparser, 'getint')
elif self._type == float:
getfunc = getattr(self._configparser, 'getfloat')
elif self._type == bool:
getfunc = getattr(self._configparser, 'getboolean')
else:
getfunc = getattr(self._configparser, 'get')
value = getfunc(section, option)
else:
log.debug("No type message, so use the generic get")
value = self._configparser.get(section, option)
value = self._type_convert_get(value)
return value
class Schema(object):
cached_schema = {}
cached_schema_tree = {}
cached_override = {}
@classmethod
def load_override(cls):
log.debug("\tLoading override")
for override in glob.glob('/usr/share/glib-2.0/schemas/*.gschema.override'):
try:
cs = RawConfigSetting(override)
for section in cs.sections():
cls.cached_override[section] = {}
for option in cs.options(section):
cls.cached_override[section][option] = cs.get_value(section, option)
except Exception, e:
log.error('Error while parsing override file: %s' % override)
@classmethod
def load_schema(cls, schema_id, key):
log.debug("Loading schema value for: %s/%s" % (schema_id, key))
if not cls.cached_override:
cls.load_override()
if schema_id in cls.cached_override and \
key in cls.cached_override[schema_id]:
return cls.cached_override[schema_id][key]
if schema_id in cls.cached_schema and \
key in cls.cached_schema[schema_id]:
return cls.cached_schema[schema_id][key]
schema_defaults = {}
for schema_path in glob.glob('/usr/share/glib-2.0/schemas/*'):
if not schema_path.endswith('.gschema.xml') and not schema_path.endswith('.enums.xml'):
#TODO deal with enums
continue
if schema_path in cls.cached_schema_tree:
tree = cls.cached_schema_tree[schema_path]
else:
tree = etree.parse(open(schema_path))
for schema_node in tree.findall('schema'):
if schema_node.attrib.get('id') == schema_id:
for key_node in schema_node.findall('key'):
if key_node.findall('default'):
schema_defaults[key_node.attrib['name']] = cls.parse_value(key_node)
else:
continue
cls.cached_schema[schema_id] = schema_defaults
if key in schema_defaults:
return schema_defaults[key]
else:
return None
@classmethod
def parse_value(cls, key_node):
log.debug("Try to get type for value: %s" % key_node.items())
value = key_node.find('default').text
#TODO enum type
if key_node.attrib.get('type'):
type = key_node.attrib['type']
if type == 'b':
if value == 'true':
return True
else:
return False
elif type == 'i':
return int(value)
elif type == 'd':
return float(value)
elif type == 'as':
return eval(value)
return eval(value)
| wzguo/youker-assistant | backends/youker-assistant-daemon/src/beautify/common.py | Python | gpl-3.0 | 6,224 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
#
# Authors: Eray Ozkural <[email protected]>
# Gurer Ozen <[email protected]>
# Bahadir Kandemir <[email protected]>
# Baris Metin <[email protected]>
"""
autoxml is a metaclass for automatic XML translation, using
a miniature type system. (w00t!) This is based on an excellent
high-level XML processing prototype that Gurer prepared.
Method names are mixedCase for compatibility with minidom,
an old library.
"""
# System
import locale
import codecs
import types
import formatter
import sys
from StringIO import StringIO
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
# PiSi
import pisi
from pisi.exml.xmlext import *
from pisi.exml.xmlfile import XmlFile
import pisi.context as ctx
import pisi.util as util
import pisi.oo as oo
class Error(pisi.Error):
pass
# requirement specs
mandatory, optional = range(2) # poor man's enum
# basic types
String = types.StringType
Text = types.UnicodeType
Integer = types.IntType
Long = types.LongType
Float = types.FloatType
#class datatype(type):
# def __init__(cls, name, bases, dict):
# """entry point for metaclass code"""
# # standard initialization
# super(autoxml, cls).__init__(name, bases, dict)
class LocalText(dict):
"""Handles XML tags with localized text"""
def __init__(self, tag = "", req = optional):
self.tag = tag
self.req = req
dict.__init__(self)
def decode(self, node, errs, where = ""):
# flags, tag name, instance attribute
assert self.tag != ''
nodes = getAllNodes(node, self.tag)
if not nodes:
if self.req == mandatory:
errs.append(where + ': ' + _("At least one '%s' tag should have local text") %
self.tag )
else:
for node in nodes:
lang = getNodeAttribute(node, 'xml:lang')
c = getNodeText(node)
if not c:
errs.append(where + ': ' + _("'%s' language of tag '%s' is empty") %
(lang, self.tag))
# FIXME: check for dups and 'en'
if not lang:
lang = 'en'
self[lang] = c
def encode(self, node, errs):
assert self.tag != ''
for key in self.iterkeys():
newnode = addNode(node, self.tag)
setNodeAttribute(newnode, 'xml:lang', key)
addText(newnode, '', self[key].encode('utf8'))
#FIXME: maybe more appropriate for pisi.util
@staticmethod
def get_lang():
try:
(lang, encoding) = locale.getlocale()
if not lang:
(lang, encoding) = locale.getdefaultlocale()
if lang==None: # stupid python means it is C locale
return 'en'
else:
return lang[0:2]
except:
raise Error(_('LocalText: unable to get either current or default locale'))
def errors(self, where = unicode()):
errs = []
langs = [ LocalText.get_lang(), 'en', 'tr', ]
if not util.any(lambda x : self.has_key(x), langs):
errs.append( where + ': ' + _("Tag should have at least the current locale, or failing that an English or Turkish version"))
#FIXME: check if all entries are unicode
return errs
def format(self, f, errs):
L = LocalText.get_lang()
if self.has_key(L):
f.add_flowing_data(self[L])
elif self.has_key('en'):
# fallback to English, blah
f.add_flowing_data(self['en'])
elif self.has_key('tr'):
# fallback to Turkish
f.add_flowing_data(self['tr'])
else:
errs.append(_("Tag should have at least the current locale, or failing that an English or Turkish version"))
#FIXME: factor out these common routines
def print_text(self, file = sys.stdout):
w = Writer(file) # plain text
f = formatter.AbstractFormatter(w)
errs = []
self.format(f, errs)
if errs:
for x in errs:
ctx.ui.warning(x)
def __str__(self):
L = LocalText.get_lang()
if self.has_key(L):
return self[L]
elif self.has_key('en'):
# fallback to English, blah
return self['en']
elif self.has_key('tr'):
# fallback to Turkish
return self['tr']
else:
return ""
class Writer(formatter.DumbWriter):
"""adds unicode support"""
def __init__(self, file=None, maxcol=78):
formatter.DumbWriter.__init__(self, file, maxcol)
def send_literal_data(self, data):
self.file.write(data.encode("utf-8"))
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
class autoxml(oo.autosuper, oo.autoprop):
"""High-level automatic XML transformation interface for xmlfile.
The idea is to declare a class for each XML tag. Inside the
class the tags and attributes nested in the tag are further
elaborated. A simple example follows:
class Employee:
__metaclass__ = autoxml
t_Name = [xmlfile.Text, xmlfile.mandatory]
a_Type = [xmlfile.Integer, xmlfile.optional]
This class defines a tag and an attribute nested in Employee
class. Name is a string and type is an integer, called basic
types.
While the tag is mandatory, the attribute may be left out.
Other basic types supported are: xmlfile.Float, xmlfile.Double
and (not implemented yet): xmlfile.Binary
By default, the class name is taken as the corresponding tag,
which may be overridden by defining a tag attribute. Thus,
the same tag may also be written as:
class EmployeeXML:
...
tag = 'Employee'
...
In addition to basic types, we allow for two kinds of complex
types: class types and list types.
A declared class can be nested in another class as follows
class Position:
__metaclass__ = autoxml
t_Name = [xmlfile.Text, xmlfile.mandatory]
t_Description = [xmlfile.Text, xmlfile.optional]
which we can add to our Employee class.
class Employee:
__metaclass__ = autoxml
t_Name = [xmlfile.Text, xmlfile.mandatory]
a_Type = [xmlfile.Integer, xmlfile.optional]
t_Position = [Position, xmlfile.mandatory]
Note some unfortunate redundancy here with Position; this is
justified by the implementation (kidding). Still, you might
want to assign a different name than the class name that
goes in there, which may be fully qualified.
There is more! Suppose we want to define a company, with
of course many employees.
class Company:
__metaclass__ = autoxml
t_Employees = [ [Employee], xmlfile.mandatory, 'Employees/Employee']
Logically, inside the Company/Employees tag, we will have several
Employee tags, which are inserted to the Employees instance variable of
Company in order of appearance. We can define lists of any other valid
type. Here we used a list of an autoxml class defined above.
The mandatory flag here asserts that at least one such record
is to be found.
You see, it works like magic, when it works of course. All of it
done without a single brain exploding.
"""
def __init__(cls, name, bases, dict):
"""entry point for metaclass code"""
#print 'generating class', name
# standard initialization
super(autoxml, cls).__init__(name, bases, dict)
xmlfile_support = XmlFile in bases
cls.autoxml_bases = filter(lambda base: isinstance(base, autoxml), bases)
#TODO: initialize class attribute __xml_tags
#setattr(cls, 'xml_variables', [])
# default class tag is class name
if not dict.has_key('tag'):
cls.tag = name
# generate helper routines, for each XML component
names = []
inits = []
decoders = []
encoders = []
errorss = []
formatters = []
# read declaration order from source
# code contributed by bahadir kandemir
from inspect import getsourcelines
from itertools import ifilter
import re
fn = re.compile('\s*([tas]_[a-zA-Z]+).*').findall
lines = filter(fn, getsourcelines(cls)[0])
decl_order = map(lambda x:x.split()[0], lines)
# there should be at most one str member, and it should be
# the first to process
order = filter(lambda x: not x.startswith('s_'), decl_order)
# find string member
str_members = filter(lambda x:x.startswith('s_'), decl_order)
if len(str_members)>1:
raise Error('Only one str member can be defined')
elif len(str_members)==1:
order.insert(0, str_members[0])
for var in order:
if var.startswith('t_') or var.startswith('a_') or var.startswith('s_'):
name = var[2:]
if var.startswith('a_'):
x = autoxml.gen_attr_member(cls, name)
elif var.startswith('t_'):
x = autoxml.gen_tag_member(cls, name)
elif var.startswith('s_'):
x = autoxml.gen_str_member(cls, name)
(name, init, decoder, encoder, errors, format_x) = x
names.append(name)
inits.append(init)
decoders.append(decoder)
encoders.append(encoder)
errorss.append(errors)
formatters.append(format_x)
# generate top-level helper functions
cls.initializers = inits
def initialize(self, uri = None, keepDoc = False, tmpDir = '/tmp',
**args):
if xmlfile_support:
if args.has_key('tag'):
XmlFile.__init__(self, tag = args['tag'])
else:
XmlFile.__init__(self, tag = cls.tag)
for base in cls.autoxml_bases:
base.__init__(self)
#super(cls, self).__init__(tag = tag) cooperative shit disabled for now
for init in inits:#self.__class__.initializers:
init(self)
for x in args.iterkeys():
setattr(self, x, args[x])
# init hook
if hasattr(self, 'init'):
self.init(tag)
if xmlfile_support and uri:
self.read(uri, keepDoc, tmpDir)
cls.__init__ = initialize
cls.decoders = decoders
def decode(self, node, errs, where = unicode(cls.tag)):
for base in cls.autoxml_bases:
base.decode(self, node, errs, where)
for decode_member in decoders:#self.__class__.decoders:
decode_member(self, node, errs, where)
if hasattr(self, 'decode_hook'):
self.decode_hook(node, errs, where)
cls.decode = decode
cls.encoders = encoders
def encode(self, node, errs):
for base in cls.autoxml_bases:
base.encode(self, node, errs)
for encode_member in encoders:#self.__class__.encoders:
encode_member(self, node, errs)
if hasattr(self, 'encode_hook'):
self.encode_hook(node, errs)
cls.encode = encode
cls.errorss = errorss
def errors(self, where = unicode(name)):
errs = []
for base in cls.autoxml_bases:
errs.extend(base.errors(self, where))
for errors in errorss:#self.__class__.errorss:
errs.extend(errors(self, where))
if hasattr(self, 'errors_hook'):
errs.extend(self.errors_hook(where))
return errs
cls.errors = errors
def check(self):
errs = self.errors()
if errs:
errs.append(_("autoxml.check: '%s' errors") % len(errs))
raise Error(*errs)
cls.check = check
cls.formatters = formatters
def format(self, f, errs):
for base in cls.autoxml_bases:
base.format(self, f, errs)
for formatter in formatters:#self.__class__.formatters:
formatter(self, f, errs)
cls.format = format
def print_text(self, file = sys.stdout):
w = Writer(file) # plain text
f = formatter.AbstractFormatter(w)
errs = []
self.format(f, errs)
if errs:
for x in errs:
ctx.ui.warning(x)
cls.print_text = print_text
if not dict.has_key('__str__'):
def str(self):
strfile = StringIO(u'')
self.print_text(strfile)
print 'strfile=',unicode(strfile)
s = strfile.getvalue()
strfile.close()
print 's=',s,type(s)
return s
cls.__str__ = str
if not dict.has_key('__eq__'):
def equal(self, other):
# handle None
if other ==None:
return False # well, must be False at this point :)
for name in names:
try:
if getattr(self, name) != getattr(other, name):
return False
except:
return False
return True
def notequal(self, other):
return not self.__eq__(other)
cls.__eq__ = equal
cls.__ne__ = notequal
if xmlfile_support:
def read(self, uri, keepDoc = False, tmpDir = '/tmp',
sha1sum = False, compress = None, sign = None, copylocal = False):
"read XML file and decode it into a python object"
self.readxml(uri, tmpDir, sha1sum=sha1sum,
compress=compress, sign=sign, copylocal=copylocal)
errs = []
self.decode(self.rootNode(), errs)
if errs:
errs.append(_("autoxml.read: File '%s' has errors") % uri)
raise Error(*errs)
if hasattr(self, 'read_hook'):
self.read_hook(errs)
if not keepDoc:
self.unlink() # get rid of the tree
errs = self.errors()
if errs:
errs.append(_("autoxml.read: File '%s' has errors") % uri)
raise Error(*errs)
def write(self, uri, keepDoc = False, tmpDir = '/tmp',
sha1sum = False, compress = None, sign = None):
"encode the contents of the python object into an XML file"
errs = self.errors()
if errs:
errs.append(_("autoxml.write: object validation has failed"))
raise Error(*errs)
errs = []
self.newDocument()
self.encode(self.rootNode(), errs)
if hasattr(self, 'write_hook'):
self.write_hook(errs)
if errs:
errs.append(_("autoxml.write: File encoding '%s' has errors") % uri)
raise Error(*errs)
self.writexml(uri, tmpDir, sha1sum=sha1sum, compress=compress, sign=sign)
if not keepDoc:
self.unlink() # get rid of the tree
cls.read = read
cls.write = write
def gen_attr_member(cls, attr):
"""generate readers and writers for an attribute member"""
#print 'attr:', attr
spec = getattr(cls, 'a_' + attr)
tag_type = spec[0]
assert type(tag_type) == type(type)
def readtext(node, attr):
return getNodeAttribute(node, attr)
def writetext(node, attr, text):
#print 'write attr', attr, text
setNodeAttribute(node, attr, text)
anonfuns = cls.gen_anon_basic(attr, spec, readtext, writetext)
return cls.gen_named_comp(attr, spec, anonfuns)
def gen_tag_member(cls, tag):
"""generate helper funs for tag member of class"""
#print 'tag:', tag
spec = getattr(cls, 't_' + tag)
anonfuns = cls.gen_tag(tag, spec)
return cls.gen_named_comp(tag, spec, anonfuns)
def gen_tag(cls, tag, spec):
"""generate readers and writers for the tag"""
tag_type = spec[0]
if type(tag_type) is types.TypeType and \
autoxml.basic_cons_map.has_key(tag_type):
def readtext(node, tagpath):
#print 'read tag', node, tagpath
return getNodeText(node, tagpath)
def writetext(node, tagpath, text):
#print 'write tag', node, tagpath, text
addText(node, tagpath, text.encode('utf8'))
return cls.gen_anon_basic(tag, spec, readtext, writetext)
elif type(tag_type) is types.ListType:
return cls.gen_list_tag(tag, spec)
elif tag_type is LocalText:
return cls.gen_insetclass_tag(tag, spec)
elif type(tag_type) is autoxml or type(tag_type) is types.TypeType:
return cls.gen_class_tag(tag, spec)
else:
raise Error(_('gen_tag: unrecognized tag type %s in spec') %
str(tag_type))
def gen_str_member(cls, token):
"""generate readers and writers for a string member"""
spec = getattr(cls, 's_' + token)
tag_type = spec[0]
assert type(tag_type) == type(type)
def readtext(node, blah):
#node.normalize() # piksemel doesn't have this
return getNodeText(node)
def writetext(node, blah, text):
#print 'writing', text, type(text)
addText(node, "", text.encode('utf-8'))
anonfuns = cls.gen_anon_basic(token, spec, readtext, writetext)
return cls.gen_named_comp(token, spec, anonfuns)
def gen_named_comp(cls, token, spec, anonfuns):
"""generate a named component tag/attr. a decoration of
anonymous functions that do not bind to variable names"""
name = cls.mixed_case(token)
token_type = spec[0]
req = spec[1]
(init_a, decode_a, encode_a, errors_a, format_a) = anonfuns
def init(self):
"""initialize component"""
setattr(self, name, init_a())
def decode(self, node, errs, where):
"""decode component from DOM node"""
#print '*', name
setattr(self, name, decode_a(node, errs, where + '.' + unicode(name)))
def encode(self, node, errs):
"""encode self inside, possibly new, DOM node using xml"""
if hasattr(self, name):
value = getattr(self, name)
else:
value = None
encode_a(node, value, errs)
def errors(self, where):
"""return errors in the object"""
errs = []
if hasattr(self, name) and getattr(self, name) != None:
value = getattr(self,name)
errs.extend(errors_a(value, where + '.' + name))
else:
if req == mandatory:
errs.append(where + ': ' + _('Mandatory variable %s not available') % name)
return errs
def format(self, f, errs):
if hasattr(self, name):
value = getattr(self,name)
f.add_literal_data(token + ': ')
format_a(value, f, errs)
f.add_line_break()
else:
if req == mandatory:
errs.append(_('Mandatory variable %s not available') % name)
return (name, init, decode, encode, errors, format)
def mixed_case(cls, identifier):
"""helper function to turn token name into mixed case"""
if identifier is "":
return ""
else:
if identifier[0]=='I':
lowly = 'i' # because of pythonic idiots we can't choose locale in lower
else:
lowly = identifier[0].lower()
return lowly + identifier[1:]
def tagpath_head_last(cls, tagpath):
"returns split of the tag path into last tag and the rest"
try:
lastsep = tagpath.rindex('/')
except ValueError, e:
return ('', tagpath)
return (tagpath[:lastsep], tagpath[lastsep+1:])
def parse_spec(cls, token, spec):
"""decompose member specification"""
name = cls.mixed_case(token)
token_type = spec[0]
req = spec[1]
if len(spec)>=3:
path = spec[2] # an alternative path specified
elif type(token_type) is type([]):
if type(token_type[0]) is autoxml:
# if list of class, by default nested like in most PSPEC
path = token + '/' + token_type[0].tag
else:
# if list of ordinary type, just take the name for
path = token
elif type(token_type) is autoxml:
# if a class, by default its tag
path = token_type.tag
else:
path = token # otherwise it's the same name as
# the token
return name, token_type, req, path
def gen_anon_basic(cls, token, spec, readtext, writetext):
"""Generate a tag or attribute with one of the basic
types like integer. This has got to be pretty generic
so that we can invoke it from the complex types such as Class
and List. The readtext and writetext arguments achieve
the DOM text access for this datatype."""
name, token_type, req, tagpath = cls.parse_spec(token, spec)
def initialize():
"""default value for all basic types is None"""
return None
def decode(node, errs, where):
"""decode from DOM node, the value, watching the spec"""
#text = unicode(readtext(node, token), 'utf8') # CRUFT FIXME
text = readtext(node, token)
#print 'decoding', token_type, text, type(text), '.'
if text:
try:
#print token_type, autoxml.basic_cons_map[token_type]
value = autoxml.basic_cons_map[token_type](text)
except Exception, e:
print 'exception', e
value = None
errs.append(where + ': ' + _('Type mismatch: read text cannot be decoded'))
return value
else:
if req == mandatory:
errs.append(where + ': ' + _('Mandatory token %s not available') % token)
return None
def encode(node, value, errs):
"""encode given value inside DOM node"""
if value:
writetext(node, token, unicode(value))
else:
if req == mandatory:
errs.append(_('Mandatory token %s not available') % token)
def errors(value, where):
errs = []
if value and not isinstance(value, token_type):
errs.append(where + ': ' + _('Type mismatch. Expected %s, got %s') %
(token_type, type(value)) )
return errs
def format(value, f, errs):
"""format value for pretty printing"""
f.add_literal_data(unicode(value))
return initialize, decode, encode, errors, format
def gen_class_tag(cls, tag, spec):
"""generate a class datatype"""
name, tag_type, req, path = cls.parse_spec(tag, spec)
def make_object():
obj = tag_type.__new__(tag_type)
obj.__init__(tag=tag, req=req)
return obj
def init():
return make_object()
def decode(node, errs, where):
node = getNode(node, tag)
if node:
try:
obj = make_object()
obj.decode(node, errs, where)
return obj
except Error:
errs.append(where + ': '+ _('Type mismatch: DOM cannot be decoded'))
else:
if req == mandatory:
errs.append(where + ': ' + _('Mandatory argument not available'))
return None
def encode(node, obj, errs):
if node and obj:
try:
#FIXME: this doesn't look pretty
classnode = newNode(node, tag)
obj.encode(classnode, errs)
addNode(node, '', classnode)
except Error:
if req == mandatory:
# note: we can receive an error if obj has no content
errs.append(_('Object cannot be encoded'))
else:
if req == mandatory:
errs.append(_('Mandatory argument not available'))
def errors(obj, where):
return obj.errors(where)
def format(obj, f, errs):
try:
obj.format(f, errs)
except Error:
if req == mandatory:
errs.append(_('Mandatory argument not available'))
return (init, decode, encode, errors, format)
def gen_list_tag(cls, tag, spec):
"""generate a list datatype. stores comps in tag/comp_tag"""
name, tag_type, req, path = cls.parse_spec(tag, spec)
pathcomps = path.split('/')
comp_tag = pathcomps.pop()
list_tagpath = util.makepath(pathcomps, sep='/', relative=True)
if len(tag_type) != 1:
raise Error(_('List type must contain only one element'))
x = cls.gen_tag(comp_tag, [tag_type[0], mandatory])
(init_item, decode_item, encode_item, errors_item, format_item) = x
def init():
return []
def decode(node, errs, where):
l = []
nodes = getAllNodes(node, path)
#print node, tag + '/' + comp_tag, nodes
if len(nodes)==0 and req==mandatory:
errs.append(where + ': ' + _('Mandatory list empty'))
ix = 1
for node in nodes:
dummy = newNode(node, "Dummy")
addNode(dummy, '', node)
l.append(decode_item(dummy, errs, where + unicode("[%s]" % ix, 'utf8')))
#l.append(decode_item(node, errs, where + unicode("[%s]" % ix)))
ix += 1
return l
def encode(node, l, errs):
if l and len(l) > 0:
for item in l:
if list_tagpath:
listnode = addNode(node, list_tagpath, branch = False)
else:
listnode = node
encode_item(listnode, item, errs)
#encode_item(node, item, errs)
else:
if req is mandatory:
errs.append(_('Mandatory list empty'))
def errors(l, where):
errs = []
ix = 1
for node in l:
errs.extend(errors_item(node, where + '[%s]' % ix))
ix += 1
return errs
def format(l, f, errs):
# TODO: indent here
ix = 1
length = len(l)
for node in l:
f.add_flowing_data(str(ix) + ': ')
format_item(node, f, errs)
if ix != length:
f.add_flowing_data(', ')
ix += 1
return (init, decode, encode, errors, format)
def gen_insetclass_tag(cls, tag, spec):
"""generate a class datatype that is highly integrated
don't worry if that means nothing to you. this is a silly
hack to implement local text quickly. it's not the most
elegant thing in the world. it's basically a copy of
class tag"""
name, tag_type, req, path = cls.parse_spec(tag, spec)
def make_object():
obj = tag_type.__new__(tag_type)
obj.__init__(tag=tag, req=req)
return obj
def init():
return make_object()
def decode(node, errs, where):
if node:
try:
obj = make_object()
obj.decode(node, errs, where)
return obj
except Error:
errs.append(where + ': ' + _('Type mismatch: DOM cannot be decoded'))
else:
if req == mandatory:
errs.append(where + ': ' + _('Mandatory argument not available'))
return None
def encode(node, obj, errs):
if node and obj:
try:
#FIXME: this doesn't look pretty
obj.encode(node, errs)
except Error:
if req == mandatory:
# note: we can receive an error if obj has no content
errs.append(_('Object cannot be encoded'))
else:
if req == mandatory:
errs.append(_('Mandatory argument not available'))
def errors(obj, where):
return obj.errors(where)
def format(obj, f, errs):
try:
obj.format(f, errs)
except Error:
if req == mandatory:
errs.append(_('Mandatory argument not available'))
return (init, decode, encode, errors, format)
basic_cons_map = {
types.StringType : str,
#TODO: python 3.x: same behavior?
#python 2.x: basic_cons_map[unicode](a) where a is unicode str yields
#TypeError: decoding Unicode is not supported
#types.UnicodeType : lambda x: unicode(x,'utf8'), lambda x:x?
types.UnicodeType : lambda x:x, #: unicode
types.IntType : int,
types.FloatType : float,
types.LongType : long
}
| examachine/pisi | pisi/exml/autoxml.py | Python | gpl-3.0 | 31,161 |
# urllib3/util.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from __future__ import absolute_import
from base64 import b64encode
from socket import error as SocketError
from hashlib import md5, sha1
from binascii import hexlify, unhexlify
import sys
from core.backports.collections import namedtuple
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
try: # Test for SSL features
SSLContext = None
HAS_SNI = False
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import SSLContext # Modern SSL?
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
from .packages import six
from .exceptions import LocationParseError, SSLError
class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example: ::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example: ::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this imeplementations does silly things to be optimal
# on CPython.
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
auth, url = url.split('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url[1:].split(']', 1)
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if not port.isdigit():
raise LocationParseError("Failed to parse: %s" % url)
port = int(port)
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = 'gzip,deflate'
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
return headers
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if not sock: # Platform-specific: AppEngine
return False
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except SocketError:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
# Maps the length of a digest to a possible hash function producing
# this digest.
hashfunc_map = {
16: md5,
20: sha1
}
fingerprint = fingerprint.replace(':', '').lower()
digest_length, rest = divmod(len(fingerprint), 2)
if rest or digest_length not in hashfunc_map:
raise SSLError('Fingerprint is of invalid length.')
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
hashfunc = hashfunc_map[digest_length]
cert_digest = hashfunc(cert).digest()
if not cert_digest == fingerprint_bytes:
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(hexlify(fingerprint_bytes),
hexlify(cert_digest)))
if SSLContext is not None: # Python 3.2+
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
"""
All arguments except `server_hostname` have the same meaning as for
:func:`ssl.wrap_socket`
:param server_hostname:
Hostname of the expected certificate
"""
context = SSLContext(ssl_version)
context.verify_mode = cert_reqs
if ca_certs:
try:
context.load_verify_locations(ca_certs)
# Py32 raises IOError
# Py33 raises FileNotFoundError
except Exception: # Reraise as SSLError
e = sys.exc_info()[1]
raise SSLError(e)
if certfile:
# FIXME: This block needs a test.
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
return context.wrap_socket(sock)
else: # Python 3.1 and earlier
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None):
return wrap_socket(sock, keyfile=keyfile, certfile=certfile,
ca_certs=ca_certs, cert_reqs=cert_reqs,
ssl_version=ssl_version)
| ryfx/modrana | core/backports/urllib3_python25/util.py | Python | gpl-3.0 | 11,071 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Janice Cheng
import boto3
# Get the service resource
sqs = boto3.resource('sqs')
# Get the queue. This returns an SQS.Queue instance
queue = sqs.get_queue_by_name(QueueName='test')
# You can now access identifiers and attributes
print(queue.url)
print(queue.attributes.get('DelaySeconds')) | jcchoiling/learningPython | aws/sqs/use_queue.py | Python | gpl-3.0 | 349 |
import cryspy.numbers
import cryspy.geo
class Goniometer:
def __init__(self, motiontype, axis, direction, parametername):
assert motiontype in ["translation", "rotation"], \
"First parameter for creating a Goniometer " \
"must be one of the strings " \
"'translation' or 'rotation'."
assert axis in ["x", "y", "z"], \
"Second parameter for creating a Goniometer " \
"must be one of the strings 'x', 'y' or 'z'"
if motiontype == "translation":
assert direction in ["positive", "negative"], \
"Third parameter for creating a Goniometer " \
"for translation must be one of the strings " \
"'positive' or 'negative'"
elif motiontype == "rotation":
assert direction in ["clockwise", "counterclockwise"], \
"Third parameter for creating a Goniometer for " \
"rotation must be one of the strings "\
"'clockwise' or 'counterclockwise'"
assert isinstance(parametername, str), \
"Fourth parameter for creating a Goniometer must be " \
"of type str. You can use any string."
self.composed = False
self.motiontype = motiontype
self.axis = axis
self.direction = direction
self.parameternames = [parametername]
def operator(self, parameters):
assert isinstance(parameters, dict), \
"Parameter of cryspy.lab.Goniometer.operator() must be a " \
"dictionary"
if not self.composed:
# assert len(parameters) == 1, \
# "A Goniometer which is not composed can have only one " \
# "parameter."
# parametername = list(parameters.keys())[0]
assert self.parameternames[0] in parameters.keys(), \
"You must specify the parameter called '%s'."\
%(self.parameternames[0])
parameter = parameters[self.parameternames[0]]
if self.motiontype == "translation":
if self.direction == "negative":
parameter = -parameter
if self.axis == "x":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, parameter],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "y":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, 1, 0, parameter],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "z":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, parameter],
[0, 0, 0, 1]
]
)
)
elif self.motiontype == "rotation":
if self.direction == "clockwise":
parameter = -parameter
cos = cryspy.numbers.dcos(parameter)
sin = cryspy.numbers.dsin(parameter)
if self.axis == "x":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[1, 0, 0, 0],
[0, cos, -sin, 0],
[0, sin, cos, 0],
[0, 0, 0, 1]
]
)
)
if self.axis == "y":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[ cos, 0, sin, 0],
[ 0, 1, 0, 0],
[-sin, 0, cos, 0],
[ 0, 0, 0, 1]
]
)
)
if self.axis == "z":
return cryspy.geo.Operator(
cryspy.numbers.Matrix(
[[cos, -sin, 0, 0],
[sin, cos, 0, 0],
[ 0, 0, 1, 0],
[ 0, 0, 0, 1]
]
)
)
else:
return cryspy.geo.Operator(
self.lower_gonio.operator(parameters).value
* self.upper_gonio.operator(parameters).value
)
def __str__(self):
if not self.composed:
if self.motiontype == "translation":
return " / translate by \\ \n" \
"| %16s |\n" \
"| along |\n" \
"| %s-axis |\n" \
" \\ %8s / "\
%(self.parameternames[0], self.axis, self.direction)
elif self.motiontype == "rotation":
return " / rotate by \\ \n" \
"| %16s |\n" \
"| around |\n" \
"| %s-axis |\n" \
" \\ %16s / "\
%(self.parameternames[0], self.axis, self.direction)
else:
return cryspy.blockprint.block([[str(self.lower_gonio), " \n \n*\n \n", str(self.upper_gonio)]])
def __mul__(self, right):
if isinstance(right, Goniometer):
for parametername in right.parameternames:
assert parametername not in self.parameternames, \
"Cannot multiply two Goniometers which have " \
"both the parameter '%s'."%(parametername)
result = Goniometer("translation", "x", "positive", "dummy")
result.composed = True
result.motiontype = None
result.axis = None
result.direction = None
result.parameternames = self.parameternames + right.parameternames
result.lower_gonio = self
result.upper_gonio = right
return result
else:
return NotImplemented
| tobias-froehlich/cryspy | cryspy/lab.py | Python | gpl-3.0 | 6,803 |
# Orbotor - arcade with orbit mechanics
# Copyright (C) 2014 mr555ru
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys # NOQA
import profile
import ConfigParser
import pygame
from pygame import *
from static_functions import *
import camera as camera
import planet as planet
from orbitable import GCD_Singleton, SoundSystem_Singleton
from helldebris_collection import HellDebrisCollection
from team import Team
from simplestats import SimpleStats
wwidth = 1024
wheight = 768
p1_name = "Player1"
p2_name = "Player2"
config = ConfigParser.RawConfigParser()
config.read('profile.cfg')
wwidth = config.getint("Screen", "width")
wheight = config.getint("Screen", "height")
p1_name = config.get("Player", "P1_name")
p2_name = config.get("Player", "P2_name")
display = (wwidth, wheight)
clock = pygame.time.Clock()
class Profile():
def __init__(self, is_player2_present=False,
is_player1_ai=False,
is_player2_ai=False,
player1_team="Green",
player2_team="Red",
greenteamsize=8,
redteamsize=8,
debris_min=6,
debris_max=20,
draw_planet=False,
name=""):
self.p2 = is_player2_present
self.p1_ai = is_player1_ai
self.p2_ai = is_player2_ai
self.p1_team = player1_team
self.p2_team = player2_team
mingreen = int(self.p1_team == "Green") + int(self.p2_team == "Green" and self.p2)
minred = int(self.p1_team == "Red") + int(self.p2_team == "Red" and self.p2)
self.green = max(mingreen, greenteamsize)
self.red = max(minred, redteamsize)
self.hellmin = debris_min
self.hellmax = debris_max
self.draw_planet = draw_planet
self.name = name
self.ERAD = 1000
self.MAXRAD = 1700
self.ORBHEIGHT = 350
def game_init(self):
pygame.init()
self.PROFILESTEP = False
self.UPDAE_GAME = pygame.USEREVENT + 1
pygame.time.set_timer(self.UPDAE_GAME, GAME_SPEED)
self.screen = pygame.display.set_mode(display)
if self.p2:
self.bg1 = Surface((wwidth, wheight/2))
self.bg2 = Surface((wwidth, wheight/2))
self.cam2 = camera.Camera(self.bg2, first_in_order=False)
self.bgs = (self.bg1, self.bg2)
else:
self.bg1 = Surface((wwidth, wheight))
self.bgs = (self.bg1,)
self.cam1 = camera.Camera(self.bg1)
if self.name == "":
pygame.display.set_caption("Orbotor")
else:
pygame.display.set_caption("Orbotor - %s" % self.name)
self.pl = planet.Planet(self.bgs, self.ERAD, self.MAXRAD, "planet.png" if self.draw_planet else None)
GCD_Singleton.set_planet(self.pl)
self.soundsys = SoundSystem_Singleton
self.spawn = (self.ERAD+self.ORBHEIGHT, 0)
self.team1 = Team("Green", "#009900", self.green, self.spawn, self.pl)
self.team2 = Team("Red", "#880000", self.red, self.spawn, self.pl)
self.team1.set_opponent_team(self.team2)
self.team2.set_opponent_team(self.team1)
self.hell = HellDebrisCollection(self.spawn, self.pl, self.hellmin, self.hellmax)
if self.p1_team == "Green":
self.player1 = self.team1.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[1]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[0]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
elif self.p1_team == "Red":
self.player1 = team2.guys[0]
if self.p2:
if self.p2_team == "Green":
self.player2 = self.team1.guys[0]
elif self.p2_team == "Red":
self.player2 = self.team2.guys[1]
else:
raise Exception("unknown team for p2: %s" % self.p2_team)
else:
raise Exception("unknown team for p1: %s" % self.p1_team)
self.player1.is_ai = self.p1_ai
if self.p1_ai:
self.player1.set_name("[bot] %s" % p1_name)
else:
self.player1.set_name("%s" % p1_name)
if self.p2:
self.player2.is_ai = self.p2_ai
if self.p2_ai:
self.player2.set_name("[bot] %s" % p2_name)
else:
self.player2.set_name("%s" % p2_name)
self.stats1 = SimpleStats(self.team1, self.team2, self.player1)
if self.p2:
self.stats2 = SimpleStats(self.team1, self.team2, self.player2)
def game_key_listen(self, event):
if event.type == KEYDOWN and event.key == K_F1:
self.PROFILESTEP = True
self.game_step()
elif event.type == KEYDOWN and event.key == K_F2:
print len(GCD_Singleton.orbitables)
elif event.type == KEYDOWN and event.key == K_F5:
self.soundsys.switch()
if not self.p1_ai:
self.player1.catch_kb_event(event)
if self.p2 and not self.p2_ai:
self.player2.catch_kb_event_hotseat(event)
self.cam1.keys_listen(event)
if self.p2:
self.cam2.keys_listen_hotseat(event)
def game_step(self):
if self.PROFILESTEP:
profile.runctx("self._step()", globals(), {"self": self})
else:
self._step()
def _step(self):
self.team2.step() # todo faster better stronger
self.team1.step()
self.hell.step()
self.player1.focus(self.cam1)
self.cam1.step()
if self.p2:
self.player2.focus(self.cam2)
self.cam2.step()
GCD_Singleton.step()
def game_draw(self):
if self.PROFILESTEP:
profile.runctx("self._draw()", globals(), {"self": self})
self.PROFILESTEP = False
else:
self._draw()
def _draw(self):
clock.tick(60)
tup = [self.pl, ] + self.team1.objectslist() + self.team2.objectslist()\
+ self.hell.objectslist() + self.pl.cities
tup = tuple(tup)
self.cam1.translate_coords(*tup)
if self.p2:
self.cam2.translate_coords(*tup)
self.stats1.draw(self.bg1)
self.screen.blit(self.bg1, (0, 0))
if self.p2:
self.stats2.draw(self.bg2)
self.screen.blit(self.bg2, (0, wheight/2))
pygame.display.update()
def DefaultProfile(draw_planet, hell):
return Profile(draw_planet=draw_planet, debris_min=hell[0], debris_max=hell[1])
def HotseatProfile(draw_planet, hell):
return Profile(is_player2_present=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def RivalProfile(draw_planet, hell):
return Profile(is_player2_present=True, is_player2_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def CoopProfile(draw_planet, hell):
return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SpectateProfile(draw_planet, hell):
return Profile(is_player1_ai=True, draw_planet=draw_planet,
debris_min=hell[0], debris_max=hell[1])
def SurvivalProfile(draw_planet):
return Profile(draw_planet=draw_planet, debris_min=35, debris_max=70,
greenteamsize=1, redteamsize=0)
def CoopSurvivalProfile(draw_planet):
return Profile(is_player2_present=True, player2_team="Green", draw_planet=draw_planet,
debris_min=35, debris_max=70, greenteamsize=2, redteamsize=0)
| mr555ru/orbotor | orbotor/gameprofile.py | Python | gpl-3.0 | 8,679 |
## Unquill: Copyright (C) 2003 Janez Demsar
##
## During development I peeked a lot at Unquill from John Elliott, 1996-2000.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import pickle
import time
from PyQt5 import QtCore, QtWidgets, QtWidgets
from random import randint
class Quill:
class Event:
NIL, LOC, MSG, OBJ, SWAP, PLC = tuple(range(100, 106))
cond_ops = [("AT", "data.location_no == param1"),
("NOT AT", "data.location_no != param1"),
("AT GT", "data.location_no > param1"),
("AT LT", "data.location_no < param1"),
("PRESENT",
"data.objects[param1].location == data.location_no"),
("ABSENT",
"data.objects[param1].location != data.location_no"),
("WORN",
"data.objects[param1].location == data.Object.WORN"),
("NOT WORN",
"data.objects[param1].location != data.Object.WORN"),
("CARRIED",
"data.objects[param1].location == data.Object.CARRIED"),
("NOT CARR",
"data.objects[param1].location != data.Object.CARRIED"),
("CHANCE", "param1 < randint(1, 100)"),
("ZERO", "not data.flags[param1]"),
("NOT ZERO", "data.flags[param1]"),
("EQ", "data.flags[param1]==param2"),
("GT", "data.flags[param1]>param2"),
("LT", "data.flags[param1]<param2")]
ptas = {
0: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"PAUSE", "GOTO", "MESSAGE", "REMOVE", "GET",
"DROP", "WEAR", "DESTROY", "CREATE", "SWAP",
"SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"],
[0] * 11 + [1] * 9 + [2, 1, 1] + [2]*16,
[NIL] * 12 + [LOC, MSG] + [OBJ] * 6 + [SWAP] + [NIL] * 18),
5: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"CLS", "DROPALL", "PAUSE", "PAPER", "INK",
"BORDER", "GOTO", "MESSAGE", "REMOVE", "GET",
"DROP", "WEAR", "DESTROY", "CREATE", "SWAP",
"PLACE", "SET", "CLEAR", "PLUS", "MINUS",
"LET", "BEEP"],
[0] * 13 + [1] * 12 + [2, 2, 1, 1] + [2] * 10,
[NIL] * 17 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL]*12),
7: (["INVEN", "DESC", "QUIT", "END", "DONE", "OK",
"ANYKEY", "SAVE", "LOAD", "TURNS", "SCORE",
"CLS", "DROPALL", "AUTOG", "AUTOD", "AUTOW",
"AUTOR", "PAUSE", "PAPER", "INK", "BORDER",
"GOTO", "MESSAGE", "REMOVE", "GET", "DROP",
"WEAR", "DESTROY", "CREATE", "SWAP", "PLACE",
"SET", "CLEAR", "PLUS", "MINUS", "LET", "BEEP"],
[0] * 17 + [1] * 12 + [2, 2, 1] + [2] * 7,
[NIL] * 21 + [LOC, MSG] + [OBJ] * 6 + [SWAP, PLC] + [NIL] * 8)}
def __init__(self, sna, ptr, dbver=0):
self.act_ops, self.nparams, self.types = self.ptas[dbver]
self.word1 = sna[ptr]
self.word2 = sna[ptr + 1]
p = sna[ptr + 2] + 256 * sna[ptr + 3]
self.conditions = []
while sna[p] != 0xff:
opcode = sna[p]
param1 = sna[p + 1]
if opcode > 12:
param2 = sna[p + 2]
p += 3
else:
param2 = None
p += 2
self.conditions.append((opcode, param1, param2))
p += 1
self.actions = []
while sna[p] != 0xff:
opcode = sna[p]
nparams = self.nparams[opcode]
params = tuple(sna[p + 1:p + 1 + nparams])
self.actions.append((opcode, params))
p += 1 + nparams
# returns: -1 for error,
# 0 for not matching,
# 1 for matching and done (no further processing),
# 2 for matching, but process further
def __call__(self, data, system, word1, word2):
def match(w, sw):
return w == sw or (not w and sw == 255)
if system or match(word1, self.word1) and match(word2, self.word2):
for op, param1, param2 in self.conditions:
if not eval(self.cond_ops[op][1]):
return 0
for action in self.actions:
meth = getattr(data,
"do_" + self.act_ops[action[0]].lower())
res = meth(*action[1])
if res:
return res
return 2
class Location:
def __init__(self, description, conn=None):
self.description = description
self.connections = conn or {}
class Object:
INVALID, CARRIED, WORN, NOT_CREATED = 0xff, 0xfe, 0xfd, 0xfc
def __init__(self, description, initial=NOT_CREATED):
self.description = description
self.initial = self.location = initial
#######################################
# Actions
def do_get(self, param1):
loc = self.objects[param1].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.printout("To vendar že nosim!")
return -1
elif loc != self.location_no:
self.printout("Saj ni tukaj.")
return -1
elif self.flags[1] == self.nobjects_carry:
return -1
else:
self.objects[param1].location = self.Object.CARRIED
self.flags[1] += 1
def do_wear(self, param1):
loc = self.objects[param1].location
if loc == self.Object.WORN:
self.printout("To vendar že nosim!")
return -1
elif loc != self.Object.CARRIED:
self.printout("Tega sploh nimam!")
return -1
else:
self.objects[param1].location = self.Object.WORN
def do_drop(self, param1):
loc = self.objects[param1].location
if (loc == self.Object.WORN) or (loc == self.Object.CARRIED):
self.objects[param1].location = self.location_no
else:
self.printout("Tega sploh nimam.")
return -1
def do_remove(self, param1):
loc = self.objects[param1].location
if loc != self.Object.WORN:
self.printout("Tega sploh ne nosim!")
return -1
else:
self.objects[param1].location = self.Object.CARRIED
def do_dropall(self):
for obj in self.objects:
if obj.location == self.Object.WORN or \
obj.location == self.Object.CARRIED:
obj.location = self.location_no
self.flags[1] = 0
def do_goto(self, locno):
self.location = self.locations[locno]
self.location_no = locno
self.flags[2] = locno
def do_create(self, objno):
loc = self.objects[objno].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.flags[1] -= 1
self.objects[objno].location = self.location_no
def do_destroy(self, objno):
loc = self.objects[objno].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.flags[1] -= 1
self.objects[objno].location = self.Object.NOT_CREATED
def do_place(self, objno, locno):
loc = self.objects[objno].location
if loc == self.Object.WORN or loc == self.Object.CARRIED:
self.flags[1] -= 1
self.objects[objno].location = locno
def do_print(self, flagno):
if flagno > 47:
self.printout(self.flags[flagno] + 256 * self.flags[flagno+1])
else:
self.printout(self.flags[flagno])
def do_plus(self, flagno, no):
self.flags[flagno] += no
if self.flags[flagno] > 255:
if flagno > 47:
self.flags[flagno] -= 256
self.flags[flagno + 1] = (self.flags[flagno + 1] + 1) % 256
else:
self.flags[flagno] = 255
def do_minus(self, flagno, no):
self.flags[flagno] -= no
if self.flags[flagno] < 0:
if flagno > 47:
self.flags[flagno] += 256
self.flags[flagno + 1] -= 1
if self.flags[flagno] == -1:
self.flags[flagno] = 0
else:
self.flags[flagno] = 0
def do_inven(self):
inv = ""
for obj in self.objects:
if obj.location == Quill.Object.CARRIED:
inv += "<LI>%s</LI>" % obj.description
elif obj.location == Quill.Object.WORN:
inv += "<LI>%s (nosim)</LI>" % obj.description
if inv:
inv = "Prenašam pa tole:<UL>"+inv+"</UL"
else:
inv = "Prenašam pa tole:<UL>pravzaprav nič</UL"
self.printout(inv)
def do_message(self, msgno):
self.printout(self.messages[msgno])
do_mes = do_message
def do_set(self, flagno):
self.flags[flagno] = 255
def do_clear(self, flagno):
self.flags[flagno] = 0
def do_let(self, flagno, no):
self.flags[flagno] = no
def do_add(self, flg1, flg2):
return self.do_plus(flg1, self.flags[flg2])
def do_sum(self, flg1, flg2):
return self.do_minus(flg1, self.flags[flg2])
def do_swap(self, obj1, obj2):
self.objects[obj1].location, self.objects[obj2].location = \
self.objects[obj2].location, self.objects[obj1].location
def do_desc(self):
self.update_location()
def do_quit(self):
self.reset()
self.update_location()
def do_end(self):
self.anykey()
self.reset()
self.update_location()
def do_ok(self):
self.printout("OK")
return 1
@staticmethod
def do_done():
return 1
def do_anykey(self):
self.anykey()
def do_save(self):
self.printout("Shranjevati pa še ne znam ...")
def do_load(self):
self.printout("Nalagati pa znam ...")
def do_star(self, _):
self.printout("'STAR' ni implementiran")
def do_jsr(self, *_):
self.printout("'JSR' ni implementiran")
def do_sound(self, lsb, msg):
pass
def do_beep(self, lsb, msg):
pass
def do_turns(self):
self.printout("Ukazov dal si %4i zares<br>" % self.turns)
def do_score(self):
self.printout("Nabral si %i odstotkov<br>" % self.flags[30])
@staticmethod
def do_pause(s50):
time.sleep(s50/50)
def do_cls(self):
pass
#######################################
# Initialization from an .sna file
def __init__(self, name="kontra.sna", dbver=0):
def single_string(ptr):
# TODO: Simplify
s = ""
while sna[ptr] != 0xe0:
s += chr(255 - sna[ptr])
ptr += 1
return s
def word(ptr):
return sna[ptr] + 256 * sna[ptr + 1]
def get_sign_ptr():
sign_ptr = -1
while True:
sign_ptr = sna.find(b"\x10", sign_ptr + 1)
if sign_ptr == -1:
raise ValueError("Quill signature not found")
if sna[sign_ptr+2:sign_ptr+12:2] == b"\x11\x12\x13\x14\x15":
return sign_ptr
def read_vocabulary():
vocabulary = {}
index_to_word = []
pv = self.pvocabulary
while sna[pv]:
index = sna[pv + 4]
w = "".join(chr(255 - x) for x in sna[pv:pv + 4]).strip()
vocabulary[w] = index
if index >= len(index_to_word):
index_to_word += [None] * (index - len(index_to_word) + 1)
if not index_to_word[index]:
index_to_word[index] = w
pv += 5
return vocabulary, index_to_word
def get_cond_table(ptr):
events = []
while sna[ptr]:
events.append(self.Event(sna, ptr))
ptr += 4
return events
colors = ["#000000", "#0000ff", "#ff0000", "#ff00ff", "#00ff00",
"#00ffff", "#ffff00", "#ffffff"]
replacs = {"&": "&", "<": "<", ">": ">", "\x60": "£",
"\x7f": "©", "\x95": "č", "\x94": "š", "\xa0": "ž",
"\x92": "Č", "\xa2": "Š", "\x90": "Ž"}
# How would these codes be reset?
# codes = {"\x12": "<big>", "\x13": "<b>", "\x14": "<i>", "\x15": "<u>"}
def get_items(ptr, n):
items = []
for i in range(n):
s = ""
xpos = 0
while 1:
c = chr(255 - sna[ptr])
ptr += 1
if c in replacs:
s += replacs[c]
xpos += 1
elif c >= ' ':
s += c
xpos += 1
elif c == "\x1f":
break
elif c == "\x06":
if 255 - sna[ptr] == 6:
s += "<P>"
xpos = 0
ptr += 1
else:
s += " "
xpos = 0
elif c == "\x10": # INK
cl = 255 - sna[ptr]
ptr += 1
if cl < 8:
s += "<FONT COLOR=%s>" % colors[cl]
elif c == "\x11": # PAPER
ptr += 1
# elif c in codes:
# if sna[ptr] != 255:
# s += "<%s>" % codes[c]
# else:
# s += "</%s>" % codes[c]
# ptr += 1
if xpos == 32:
if sna[ptr] != ' ':
s += " "
xpos = 0
items.append(s)
return items
def read_connections():
ptr = word(self.pconnections)
for location in self.locations:
while sna[ptr] != 0xff:
location.connections[sna[ptr]] = sna[ptr + 1]
ptr += 2
ptr += 1
def read_object_positions():
ptr = self.pobject_locations
for i in range(len(self.objects)):
self.objects[i].initial = sna[ptr + i]
sna = b"\x00" * (16384 - 27) + open(name, "rb").read()
ptr = get_sign_ptr() + 13
self.nobjects_carry = sna[ptr]
self.nobjects = sna[ptr+1]
self.nlocations = sna[ptr+2]
self.nmessages = sna[ptr+3]
if dbver:
ptr += 1
self.nsystem_messages = sna[ptr+3]
self.pdictionary = ptr + 29
self.presponse = word(ptr+4)
self.pprocess = word(ptr+6)
self.pobjects = word(ptr+8)
self.plocations = word(ptr+10)
self.pmessages = word(ptr+12)
off = 2 if dbver else 0
self.pconnections = word(ptr + 14 + off)
self.pvocabulary = word(ptr+16 + off)
self.pobject_locations = word(ptr+18 + off)
if dbver:
psystem_messages = word(ptr+14)
self.system_messages = \
get_items(word(psystem_messages), self.nsystem_messages)
self.pobject_map = word(ptr+22)
else:
self.system_messages = [single_string(ptr) for ptr in [
27132, 27152, 27175, 27209, 27238, 27260, 27317, 27349, 27368,
27390, 27397, 27451, 27492, 27525, 27551, 27568, 27573, 27584,
27590, 27613, 27645, 27666, 27681, 27707, 27726]]
self.pobject_map = None
self.vocabulary, self.index_to_word = read_vocabulary()
self.dir_codes = [self.vocabulary[i]
for i in ["SZ", "S", "SV", "Z", "V", "JZ", "J", "JV",
"NOTE", "VEN", "GOR", "DOL"]]
self.responses = get_cond_table(self.presponse)
self.process = get_cond_table(self.pprocess)
self.objects = [Quill.Object(x)
for x in get_items(word(self.pobjects), self.nobjects)]
read_object_positions()
self.locations = [Quill.Location(x)
for x in get_items(word(self.plocations),
self.nlocations)]
read_connections()
self.messages = get_items(word(self.pmessages), self.nmessages)
self.location = self.locations[1]
self.location_no = 1
self.flags = [0]*64
self.flags[1] = 255
self.flags[2] = self.location_no
self.cheat_locations = {}
self.turns = 0
self.izpisano = ""
self.dlg = self.izpis = self.ukazna = None
self.setup_ui()
self.goljufija_const()
self.reset()
#######################################
# Processing
def reset(self):
self.flags[2] = self.location_no = 0
self.location = self.locations[self.location_no]
self.turns = 0
for obj in self.objects:
obj.location = obj.initial
self.update_location()
self.process_events(self.process, 1)
self.goljufija()
def update_location(self):
self.izpisano = ""
if self.flags[0]:
self.set_location_description(
"Temno je kot v rogu. Nič ne vidim.", (0,) * 12)
return
desc = self.location.description
inv = [obj.description for obj in self.objects
if obj.location == self.location_no]
if len(inv) == 1:
desc += "<br>Vidim tudi " + inv[0] + "<br>"
elif inv:
desc += "<br>Vidim tudi: " + "".join("<br>- %s" % i for i in inv)
self.set_location_description(
desc, [direct in self.location.connections
for direct in self.dir_codes])
#######################################
# GUI
def setup_ui(self):
goljufam = True
dlg = self.dlg = QtWidgets.QWidget()
dlg.setWindowTitle("Kontrabant")
dlg.setEnabled(True)
dlg.resize(1024 if goljufam else 544, 380)
dlg.setLayout(QtWidgets.QHBoxLayout())
vbox1 = QtWidgets.QWidget()
vbox1.setFixedWidth(350)
vbox1.setLayout(QtWidgets.QVBoxLayout())
dlg.layout().addWidget(vbox1)
self.izpis = QtWidgets.QTextEdit()
self.izpis.setReadOnly(True)
self.izpis.setMinimumHeight(290)
self.izpis.setFocusPolicy(QtCore.Qt.NoFocus)
self.izpis.setStyleSheet(
"font-family: Arial; font-size: 14; color: white; background: blue")
self.izpisano = ""
self.ukazna = QtWidgets.QLineEdit()
self.ukazna.setFocus()
self.ukazna.returnPressed.connect(self.user_command)
vbox1.layout().addWidget(self.izpis)
vbox1.layout().addWidget(self.ukazna)
dlg.show()
tabs = QtWidgets.QTabWidget()
tabs.setMinimumSize(350, 290)
dlg.layout().addWidget(tabs)
self.g_lokacija = QtWidgets.QTreeWidget()
tabs.addTab(self.g_lokacija, "Lokacija")
self.g_lokacija.setHeaderHidden(True)
self.g_predmeti = QtWidgets.QTreeWidget()
tabs.addTab(self.g_predmeti, "Predmeti")
self.g_predmeti.setColumnCount(3)
# GPredmeti->setColumnAlignment(1, AlignHCenter);
# GPredmeti->setColumnAlignment(2, AlignHCenter);
self.g_predmeti.setColumnWidth(0, 340)
# self.g_predmeti.setColumnWidthMode(0, QListView::Manual);
self.g_predmeti.setSortingEnabled(True)
self.g_dogodki = QtWidgets.QTreeWidget()
tabs.addTab(self.g_dogodki, "Dogodki")
self.g_dogodki.setColumnCount(1)
self.g_dogodki.setHeaderHidden(True)
self.g_lokacije = QtWidgets.QTreeWidget()
tabs.addTab(self.g_lokacije, "Lokacije")
self.g_dogodki.setHeaderHidden(True)
self.g_zastavice = QtWidgets.QTreeWidget()
tabs.addTab(self.g_zastavice, "Zastavice")
self.g_zastavice.setColumnCount(1)
self.g_zastavice.setHeaderHidden(True)
self.g_sporocila = QtWidgets.QTreeWidget()
tabs.addTab(self.g_sporocila, "Ukazi")
self.g_sporocila.setColumnCount(1)
self.g_predmeti.setColumnWidth(0, 100)
self.g_sporocila.setHeaderHidden(True)
#######################################
# Controller
def process_events(self, table, system, word1=None, word2=None):
match = 0
for event in table:
res = event(self, system, word1, word2)
if res in [-1, 1]:
return res
elif res:
match = 1
return match
def user_command(self):
command = self.ukazna.text().upper()
if not command:
return
self.ukazna.setText("")
self.printout('<font color="yellow">> %s</font>' % command)
self.turns += 1
commsplit = command.split()
if commsplit and (commsplit[0] in ["SHRA", "SAVE"]):
self.save()
return
if commsplit and (commsplit[0] in ["NALO", "LOAD"]):
self.load()
self.goljufija()
return
trans = []
for w in commsplit:
t = self.vocabulary.get(w[:4], None)
if t:
trans.append(t)
if not len(trans):
self.printout("Tega sploh ne razumem. "
"Poskusi povedati kako drugače.")
elif len(trans) == 1 and trans[0] in self.location.connections:
self.flags[2] = self.location_no = \
self.location.connections[trans[0]]
self.location = self.locations[self.location_no]
self.update_location()
else:
if len(trans) == 1:
m = self.process_events(self.responses, 0, trans[0])
else:
m = self.process_events(self.responses, 0, trans[0], trans[1])
if m == 0:
if len(trans) == 1 and trans[0] < 16:
self.printout("Mar ne vidiš, da v to smer ni poti?")
else:
self.printout("Tega pa ne morem.")
self.process_events(self.process, 1)
self.goljufija()
def save_position(self, fname):
f = open(fname, "wb")
pickle.dump(self.flags, f, 1)
pickle.dump([o.location for o in self.objects], f, 1)
def load_position(self, fname):
f = open(fname, "rb")
self.flags = pickle.load(f)
object_locations = pickle.load(f)
self.location_no = self.flags[2]
self.location = self.locations[self.location_no]
for r in range(len(object_locations)):
self.objects[r].location = object_locations[r]
self.update_location()
def printout(self, msg):
self.izpisano += msg + "<br>"
self.izpis.setHtml(self.izpisano)
self.izpis.scrollContentsBy(0, 30000)
def anykey(self):
return
QtWidgets.QMessageBox.information(
None, "Čakam...", "Pritisni OK, pa bova nadaljevala")
def set_location_description(self, msg, dirs):
self.printout(msg)
#######################################
# Cheating
def ldesc(self, n):
return self.locations[n].description[:40]
def ldesci(self, n):
return self.ldesc(n), n
def lidesc(self, n):
return n, self.ldesc(n)
def repr_action(self, event, system, skipat=0, adddict=""):
ldesci = self.ldesci
lidesc = self.lidesc
if not system:
if event.word2 != 255:
tc = " ".join((self.index_to_word[event.word1],
self.index_to_word[event.word2], adddict))
elif event.word1 != 255:
tc = " ".join((self.index_to_word[event.word1], adddict))
else:
tc = adddict
else:
tc = adddict
ta = []
for op, param1, param2 in event.conditions:
if self.Event.cond_ops[op][0] == "AT":
if skipat:
continue
else:
if tc:
tc += " [AT %s (%i)]" % ldesci(param1)
else:
tc = "AT %s (%i)" % ldesci(param1)
else:
s = "--> %s " % self.Event.cond_ops[op][0]
if param1:
if op < 4:
s += "%i (%s...) " % lidesc(param1)
elif op < 10:
s += "%i (%s) " % (param1,
self.objects[param1].description)
elif op < 13:
s += "%i " % param1
else:
s += "%i %i " % (param1, param2)
ta.append(s)
for action in event.actions:
tt = event.act_ops[action[0]]
atype = event.types[action[0]]
param1, param2 = (action[1] + (None, None))[:2]
if atype == self.Event.LOC:
tt += " %i (%s...)" % lidesc(param1)
elif atype == self.Event.MSG:
tt += " '%s'" % self.messages[param1]
elif atype == self.Event.OBJ:
tt += " '%s' (%i)" % (
self.objects[param1].description, param1)
elif atype == self.Event.SWAP:
tt += " '%s' (%i) '%s' (%i)" % (
self.objects[param1].description, param1,
self.objects[param2].description, param2)
elif event.nparams[action[0]] == 1:
tt += " %i" % param1
elif event.nparams[action[0]] == 2:
tt += " %i %i" % (param1, param2)
ta.append(tt)
return tc, ta, not tc
@staticmethod
def parse_tree(tree_widget, tree):
tree_widget.clear()
for state, events in tree:
it = QtWidgets.QTreeWidgetItem(state)
tree_widget.addTopLevelItem(it)
for event in events:
text, subnodes, is_open = (event + (None, None))[:3]
if isinstance(text, str):
it2 = QtWidgets.QTreeWidgetItem([text])
it.addChild(it2)
if subnodes:
it2.addChildren([QtWidgets.QTreeWidgetItem([i])
for i in subnodes])
it2.setExpanded(True)
else:
it.addChildren(QtWidgets.QTreeWidgetItem([i]) for i in text)
def goljufija_const(self):
repr_act = self.repr_action
ldesci = self.ldesci
def getlocations():
def process_events(loc, table, system):
acts, spec_exits, spec_approaches = [], [], []
for event in table:
for op, param1, param2 in event.conditions:
if op <= 1 and param1 == loc:
for action in event.actions:
if event.act_ops[action[0]] == "GOTO":
if action[1][0] != loc:
spec_exits.append(
repr_act(event, system, 1,
"-> %s (%i)"
% ldesci(action[1][0])))
else:
spec_approaches.append(
repr_act(event, system, 1,
"<- %s (%i)"
% ldesci(param1)))
break
else:
# It is not an exit
acts.append(repr_act(event, system, 0))
break
else:
# There is no 'AT location';
# check whether this can be a special approach
for action in event.actions:
if event.act_ops[action[0]] == "GOTO" and \
action[1][0] == loc:
spec_approaches.append(repr_act(event, system))
break
# There is an 'AT location';
# check whether this is an exit event
return acts, spec_exits, spec_approaches
def process_exits(loc):
return ["%s -> %s (%i)" %
((self.index_to_word[d],) + ldesci(n))
for d, n in self.locations[loc].connections.items()]
def process_approaches(loc):
app = []
for src, location in enumerate(self.locations):
if loc in list(location.connections.values()):
for d, n in location.connections.items():
if n == loc:
app.append("%s (%i) -> %s" %
(ldesci(src) +
(self.index_to_word[d], )))
return app
self.cheat_locations = {}
for i in range(len(self.locations)):
exits = process_exits(i)
approaches = process_approaches(i)
responses, se, sa = process_events(i, self.responses, 0)
exits += se
approaches += sa
processes, se, sa = process_events(i, self.process, 1)
exits += se
approaches += sa
self.cheat_locations[i] = (responses, processes)
it = QtWidgets.QTreeWidgetItem(
["%s (%i)" % (self.locations[i].description, i)])
self.g_lokacije.addTopLevelItem(it)
for name, content in (
("Vhodi", approaches), ("Izhodi", exits),
("Ukazi", responses), ("Dogodki", processes)):
if not content:
continue
it2 = QtWidgets.QTreeWidgetItem([name])
it.addChild(it2)
for con in content:
if isinstance(con, str):
it3 = QtWidgets.QTreeWidgetItem([con])
else:
it3 = QtWidgets.QTreeWidgetItem([con[0]])
it3.addChildren([QtWidgets.QTreeWidgetItem([i])
for i in con[1]])
it3.setExpanded(True)
it2.addChild(it3)
it2.setExpanded(True)
def getmessages():
def process_events(msg_no, table, system):
acts = []
for event in table:
for action in event.actions:
if event.act_ops[action[0]][:3] == "MES" and \
action[1][0] == msg_no:
break
else:
continue
acts.append(repr_act(event, system))
return acts
return [("%s (%i)" % (self.messages[i], i),
process_events(i, self.responses, 0) +
process_events(i, self.process, 1))
for i in range(len(self.messages))]
def add_event_to_tree(tree, event, skip_at=0):
tc, ta, isopen = repr_act(event, skip_at)
it = QtWidgets.QTreeWidgetItem([tc])
tree.addTopLevelItem(it)
it.addChildren([QtWidgets.QTreeWidgetItem([i]) for i in ta])
def get_responses():
acts = []
trivial = {self.vocabulary["DAJ"]: "DROP",
self.vocabulary["VZEM"]: "GET",
self.vocabulary["OBLE"]: "WEAR",
self.vocabulary["SLEC"]: "REMOVE"}
for event in self.responses:
if (not event.conditions and len(event.actions) == 2 and
event.act_ops[event.actions[1][0]] in ["OK", "DONE"] and
trivial.get(event.word1, None) ==
event.act_ops[event.actions[0][0]]):
continue
if event.word1 < 16:
for op, param1, param2 in event.conditions:
if not op:
break
else:
self.g_sporocila.addTopLevelItem(
QtWidgets.QTreeWidgetItem([repr_act(event, 0)]))
continue
add_event_to_tree(self.g_sporocila, event)
def get_process():
for event in self.process:
add_event_to_tree(self.g_dogodki, event, 1)
return (getlocations(), getmessages(),
get_responses(), get_process(), None)
def goljufija(self):
repr_act = self.repr_action
def getlocation():
self.g_lokacija.clear()
conn = list(self.location.connections.items())
if conn:
it = QtWidgets.QTreeWidgetItem(["Izhodi"])
self.g_lokacija.addTopLevelItem(it)
it.addChildren([QtWidgets.QTreeWidgetItem(
["%s: %s (%i)" % (
self.index_to_word[dire],
self.locations[loc].description[:40], loc)])
for dire, loc in conn])
it.setExpanded(True)
responses, processes = self.cheat_locations[self.location_no]
if responses:
it = QtWidgets.QTreeWidgetItem(["Ukazi"])
self.g_lokacija.addTopLevelItem(it)
for content in responses:
it2 = QtWidgets.QTreeWidgetItem([content[0]])
it.addChild(it2)
it2.addChildren([QtWidgets.QTreeWidgetItem([i])
for i in content[1]])
it2.setExpanded(True)
it.setExpanded(True)
if processes:
it = QtWidgets.QTreeWidgetItem(["Dogodki"])
self.g_lokacija.addTopLevelItem(it)
for content in processes:
it2 = QtWidgets.QTreeWidgetItem([content[0]])
it.addChild(it2)
it2.addChildren([QtWidgets.QTreeWidgetItem([i])
for i in content[1]])
it2.setExpanded(True)
it.setExpanded(True)
objlocs = {self.Object.CARRIED: "imam",
self.Object.WORN: "nosim",
self.Object.NOT_CREATED: "ne obstaja",
self.Object.INVALID: "ne obstaja"}
def getobjects():
def process_events(object_no, table, system):
acts = []
trivial = {self.vocabulary["DAJ"]: "DROP",
self.vocabulary["VZEM"]: "GET",
self.vocabulary["OBLE"]: "WEAR",
self.vocabulary["SLEC"]: "REMOVE"}
for event in table:
if not system and not event.conditions and \
len(event.actions) == 2 and \
event.act_ops[event.actions[1][0]] in ["OK",
"DONE"] \
and trivial.get(event.word1, None) == \
event.act_ops[event.actions[0][0]]:
continue
for op, param1, param2 in event.conditions:
if 4 <= op <= 9 and param1 == object_no:
break
else:
for action in event.actions:
atype = event.types[action[0]]
if (atype in [event.OBJ, event.SWAP] and
action[1][0] == object_no or
atype == self.Event.SWAP and
action[1][1] == object_no):
break
else:
continue # not interesting, does not mention
# object_no neither in conditions nor
# in actions
acts.append(repr_act(event, system))
return acts
def objloc(objno):
loc = self.objects[objno].location
if loc < 0xfc:
return str(loc)
else:
return objlocs[loc]
if not hasattr(self, "cheatobjects"):
self.cheatobjects = [([self.objects[i].description, str(i),
objloc(i)],
process_events(i, self.responses, 0) +
process_events(i, self.process, 1))
for i in range(len(self.objects))]
else:
for i in range(len(self.objects)):
self.cheatobjects[i][0][2] = objloc(i)
return self.cheatobjects
def getflags():
flops = [Quill.Event.ptas[0][0].index(x)
for x in ["PLUS", "MINUS", "SET", "CLEAR", "LET"]]
def process_events(flag_no, table, system):
acts = []
for event in table:
for op, param1, param2 in event.conditions:
if op >= 11 and param1 == flag_no:
break
else:
for action in event.actions:
if action[0] in flops and flag_no == action[1][0]:
break
else:
continue # not interesting, does not mention the
# flag neither in conditions nor in action
acts.append(repr_act(event, system))
return acts
if not hasattr(self, "cheatflags"):
self.cheatflags = [(["%i = %i" % (i, self.flags[i])],
process_events(i, self.responses, 0) +
process_events(i, self.process, 1))
for i in range(len(self.flags))]
else:
self.cheatflags = [(["%i = %i" % (i, self.flags[i])],
self.cheatflags[i][1])
for i in range(len(self.flags))]
return self.cheatflags[:3] + [x for x in self.cheatflags[3:]
if x[1]]
getlocation()
self.parse_tree(self.g_zastavice, getflags())
self.parse_tree(self.g_predmeti, getobjects())
app = QtWidgets.QApplication([])
q = Quill()
app.exec()
| janezd/kontrabant | kontrabant.py | Python | gpl-3.0 | 40,758 |
#!/usr/bin/env python
import unittest
from app.md5py import MD5
class TddInPythonExample(unittest.TestCase):
def test_object_program(self):
m = MD5()
m.update("1234")
hexdigest = m.hexdigest()
self.assertEqual("81dc9bdb52d04dc20036dbd8313ed055", hexdigest)
if __name__ == '__main__':
unittest.main()
| davidam/python-examples | security/md5/test/test_md5py.py | Python | gpl-3.0 | 352 |
from django.db.models import Q
from django.forms.fields import CharField, MultiValueField
from django.forms.widgets import MultiWidget, TextInput
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_filters.filters import DateFilter, MethodFilter, ModelChoiceFilter
from rest_framework import serializers
from rest_framework.compat import django_filters
from rest_framework.filters import FilterSet
from sapl.base.models import Autor, TipoAutor
from sapl.parlamentares.models import Legislatura
from sapl.utils import generic_relations_for_model
class SaplGenericRelationSearchFilterSet(FilterSet):
q = MethodFilter()
def filter_q(self, queryset, value):
query = value.split(' ')
if query:
q = Q()
for qtext in query:
if not qtext:
continue
q_fs = Q(nome__icontains=qtext)
order_by = []
for gr in generic_relations_for_model(self._meta.model):
sgr = gr[1]
for item in sgr:
if item.related_model != self._meta.model:
continue
flag_order_by = True
for field in item.fields_search:
if flag_order_by:
flag_order_by = False
order_by.append('%s__%s' % (
item.related_query_name(),
field[0])
)
# if len(field) == 3 and field[2](qtext) is not
# None:
q_fs = q_fs | Q(**{'%s__%s%s' % (
item.related_query_name(),
field[0],
field[1]): qtext if len(field) == 2
else field[2](qtext)})
q = q & q_fs
if q:
queryset = queryset.filter(q).order_by(*order_by)
return queryset
class SearchForFieldWidget(MultiWidget):
def decompress(self, value):
if value is None:
return [None, None]
return value
def __init__(self, attrs=None):
widgets = (TextInput, TextInput)
MultiWidget.__init__(self, widgets, attrs)
class SearchForFieldField(MultiValueField):
widget = SearchForFieldWidget
def __init__(self, *args, **kwargs):
fields = (
CharField(),
CharField())
super(SearchForFieldField, self).__init__(fields, *args, **kwargs)
def compress(self, parameters):
if parameters:
return parameters
return None
class SearchForFieldFilter(django_filters.filters.MethodFilter):
field_class = SearchForFieldField
class AutorChoiceFilterSet(SaplGenericRelationSearchFilterSet):
q = MethodFilter()
tipo = ModelChoiceFilter(queryset=TipoAutor.objects.all())
class Meta:
model = Autor
fields = ['q',
'tipo',
'nome', ]
def filter_q(self, queryset, value):
return SaplGenericRelationSearchFilterSet.filter_q(
self, queryset, value).distinct('nome').order_by('nome')
class AutorSearchForFieldFilterSet(AutorChoiceFilterSet):
q = SearchForFieldFilter()
class Meta(AutorChoiceFilterSet.Meta):
pass
def filter_q(self, queryset, value):
value[0] = value[0].split(',')
value[1] = value[1].split(',')
params = {}
for key, v in list(zip(value[0], value[1])):
if v in ['True', 'False']:
v = '1' if v == 'True' else '0'
params[key] = v
return queryset.filter(**params).distinct('nome').order_by('nome')
class AutoresPossiveisFilterSet(FilterSet):
data_relativa = DateFilter(method='filter_data_relativa')
tipo = MethodFilter()
class Meta:
model = Autor
fields = ['data_relativa', 'tipo', ]
def filter_data_relativa(self, queryset, name, value):
return queryset
def filter_tipo(self, queryset, value):
try:
tipo = TipoAutor.objects.get(pk=value)
except:
raise serializers.ValidationError(_('Tipo de Autor inexistente.'))
qs = queryset.filter(tipo=tipo)
return qs
@property
def qs(self):
qs = super().qs
data_relativa = self.form.cleaned_data['data_relativa'] \
if 'data_relativa' in self.form.cleaned_data else None
tipo = self.form.cleaned_data['tipo'] \
if 'tipo' in self.form.cleaned_data else None
if not tipo and not data_relativa:
return qs
if tipo:
# não precisa de try except, já foi validado em filter_tipo
tipo = TipoAutor.objects.get(pk=tipo)
if not tipo.content_type:
return qs
filter_for_model = 'filter_%s' % tipo.content_type.model
if not hasattr(self, filter_for_model):
return qs
if not data_relativa:
data_relativa = timezone.now()
return getattr(self, filter_for_model)(qs, data_relativa).distinct()
def filter_parlamentar(self, queryset, data_relativa):
# não leva em conta afastamentos
legislatura_relativa = Legislatura.objects.filter(
data_inicio__lte=data_relativa,
data_fim__gte=data_relativa).first()
q = Q(
parlamentar_set__mandato__data_inicio_mandato__lte=data_relativa,
parlamentar_set__mandato__data_fim_mandato__isnull=True) | Q(
parlamentar_set__mandato__data_inicio_mandato__lte=data_relativa,
parlamentar_set__mandato__data_fim_mandato__gte=data_relativa)
if legislatura_relativa.atual():
q = q & Q(parlamentar_set__ativo=True)
return queryset.filter(q)
def filter_comissao(self, queryset, data_relativa):
return queryset.filter(
Q(comissao_set__data_extincao__isnull=True,
comissao_set__data_fim_comissao__isnull=True) |
Q(comissao_set__data_extincao__gte=data_relativa,
comissao_set__data_fim_comissao__isnull=True) |
Q(comissao_set__data_extincao__gte=data_relativa,
comissao_set__data_fim_comissao__isnull=True) |
Q(comissao_set__data_extincao__isnull=True,
comissao_set__data_fim_comissao__gte=data_relativa) |
Q(comissao_set__data_extincao__gte=data_relativa,
comissao_set__data_fim_comissao__gte=data_relativa),
comissao_set__data_criacao__lte=data_relativa)
def filter_frente(self, queryset, data_relativa):
return queryset.filter(
Q(frente_set__data_extincao__isnull=True) |
Q(frente_set__data_extincao__gte=data_relativa),
frente_set__data_criacao__lte=data_relativa)
def filter_bancada(self, queryset, data_relativa):
return queryset.filter(
Q(bancada_set__data_extincao__isnull=True) |
Q(bancada_set__data_extincao__gte=data_relativa),
bancada_set__data_criacao__lte=data_relativa)
def filter_bloco(self, queryset, data_relativa):
return queryset.filter(
Q(bloco_set__data_extincao__isnull=True) |
Q(bloco_set__data_extincao__gte=data_relativa),
bloco_set__data_criacao__lte=data_relativa)
def filter_orgao(self, queryset, data_relativa):
# na implementação, não havia regras a implementar para orgao
return queryset
| vitalti/sapl | sapl/api/forms.py | Python | gpl-3.0 | 7,717 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
PYPI_RST_FILTERS = (
# Replace code-blocks
(r'\.\.\s? code-block::\s*(\w|\+)+', '::'),
# Replace image
(r'\.\.\s? image::.*', ''),
# Remove travis ci badge
(r'.*travis-ci\.org/.*', ''),
# Remove pypip.in badges
(r'.*pypip\.in/.*', ''),
(r'.*crate\.io/.*', ''),
(r'.*coveralls\.io/.*', ''),
)
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = open(filename).read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content
def required(filename):
with open(filename) as f:
packages = f.read().splitlines()
return packages
setup(
name="serialkiller-plugins",
version="0.0.2",
description="Plugins for serialkiller project",
long_description=rst('README.rst') + rst('CHANGELOG.txt'),
author="Bruno Adelé",
author_email="Bruno Adelé <[email protected]>",
url="https://github.com/badele/serialkiller-plugins",
license="GPL",
install_requires=required('requirements/base.txt'),
setup_requires=[],
tests_require=[
'pep8',
'coveralls'
],
test_suite='tests',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
scripts=[],
classifiers=[
'Programming Language :: Python',
],
)
| badele/serialkiller-plugins | setup.py | Python | gpl-3.0 | 1,664 |
T = input()
if T%2==0:
print T/2
else:
print ((T-1)/2)-T
| Dawny33/Code | Code_Forces/Contest 277/A.py | Python | gpl-3.0 | 66 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by Erwin Marsi and TST-Centrale
#
# This file is part of the DAESO Framework.
#
# The DAESO Framework is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The DAESO Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
distutils setup script for distributing Timbl Tools
"""
# TODO:
# - docs, data and test are not installed when using bdist_wininst...
__authors__ = "Erwin Marsi <[email protected]>"
from distutils.core import setup
from glob import glob
from os import walk, path, remove
from os.path import basename, isdir, join, exists
from shutil import rmtree
if exists('MANIFEST'): remove('MANIFEST')
if exists("build"): rmtree("build")
name = "timbl-tools"
version = "0.5.0"
description = """Timbl Tools is a collection of Python modules and scripts for
working with TiMBL, the Tilburg Memory-based Learner."""
long_description = """
Timbl Tools is a collection of Python modules and scripts for working with
TiMBL, the Tilburg Memory-based Learner. It provides support for:
* creating Timbl servers and clients
* running (cross-validated) experiments
* lazy parsing of verbose Timbl ouput (e.g. NN distributions)
* down-sampling of instances
* writing ascii graphs of the feature weights
"""
packages = [ root[4:]
for (root, dirs, files) in walk("lib")
if not ".svn" in root ]
def get_data_files(data_dir_prefix, dir):
# data_files specifies a sequence of (directory, files) pairs
# Each (directory, files) pair in the sequence specifies the installation directory
# and the files to install there.
data_files = []
for base, subdirs, files in walk(dir):
install_dir = join(data_dir_prefix, base)
files = [ join(base, f) for f in files
if not f.endswith(".pyc") and not f.endswith("~") ]
data_files.append((install_dir, files))
if '.svn' in subdirs:
subdirs.remove('.svn') # ignore svn directories
return data_files
# data files are installed under sys.prefix/share/pycornetto-%(version)
data_dir = join("share", "%s-%s" % (name, version))
data_files = [(data_dir, ['CHANGES', 'COPYING', 'INSTALL', 'README'])]
data_files += get_data_files(data_dir, "doc")
data_files += get_data_files(data_dir, "data")
sdist_options = dict(
formats=["zip","gztar","bztar"])
setup(
name = name,
version = version,
description = description,
long_description = long_description,
license = "GNU Public License v3",
author = "Erwin Marsi",
author_email = "[email protected]",
url = "https://github.com/emsrc/timbl-tools",
requires = ["networkx"],
provides = ["tt (%s)" % version],
package_dir = {"": "lib"},
packages = packages,
scripts = glob(join("bin","*.py")),
data_files = data_files,
platforms = "POSIX, Mac OS X, MS Windows",
keywords = [
"TiMBL"],
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: GNU Public License (GPL)",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Natural Language :: English"
],
options = dict(sdist=sdist_options)
)
| emsrc/timbl-tools | setup.py | Python | gpl-3.0 | 3,905 |
from setuptools import setup, find_packages
import sys, os
version = '1.3'
long_description = """The raisin.restyler package is a part of Raisin, the web application
used for publishing the summary statistics of Grape, a pipeline used for processing and
analyzing RNA-Seq data."""
setup(name='raisin.restyler',
version=version,
description="A package used in the Raisin web application",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux'],
keywords='RNA-Seq pipeline ngs transcriptome bioinformatics ETL',
author='Maik Roder',
author_email='[email protected]',
url='http://big.crg.cat/services/grape',
license='GPL',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages = ['raisin'],
package_data = {'raisin.restyler':['templates/*.pt']},
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'configobj',
'zope.pagetemplate'
],
entry_points="""
# -*- Entry points: -*-
""",
)
| rna-seq/raisin.restyler | setup.py | Python | gpl-3.0 | 1,531 |
#
# Utility functions
#
import sys
from functools import partial
from uuid import UUID
from hashlib import sha1
from os import path, listdir
from zipfile import ZipFile
from subprocess import Popen, TimeoutExpired
import nacl.utils
import nacl.secret
def isValidUUID(uid):
"""
Validate UUID
@param uid: UUID value to be verfied, can be bytes or str
@return: True if UUID valid, else False
"""
try:
# attempt convertion from bytes to str
uid = uid.decode('ascii')
except AttributeError:
# is already bytes object
pass
except UnicodeDecodeError:
# uid contains non-ascii characters, invalid UUID
return False
try:
out = UUID(uid, version=4)
except ValueError:
return False
# check converted value from UUID equals original value. UUID class is not strict on input
return str(out) == uid
def encrypt(safe, *args):
"""
Encrypt all provided data
@param safe: encryption class
@param args: data to be encrypted
@return: encryption output iterable
"""
return (safe.encrypt(a, nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)) for a in args)
def sha1sum(filePath, blocksize=1024):
"""
Calculate SHA1 hash of file
@param filePath: Path to hashable file
@param blocksize: Amount of bytes to read into memory before hashing
@return: SHA1 hash value (bytes)
"""
with open(filePath, mode='rb') as f:
out = sha1()
for buf in iter(partial(f.read, blocksize), b''):
out.update(buf)
return bytes(out.hexdigest(), encoding='ascii')
def checkCerts():
"""
Checks to see if required TLS certificates exist in Resources directory. Attempts to generate certificates if not found
@returns: Boolean value based on success
"""
resDir = absolutePath('Resources')
command = None
success = False
# check to see if required certificates exist
if not all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
############
# Check OS
############
if sys.platform in ('linux', 'darwin'):
# bash script run
command = 'sh {}'.format('create_certs_linux.sh')
elif sys.platform == 'win32':
hasOpenSSL = False
# check for openssl requirement (downloaded during installer run)
files = sorted((path.isdir(f), f) for f in listdir(resDir) if f.lower().startswith('openssl-'))
# check for expanded directory and executable
for isDir, ofile in files:
if isDir and path.isfile(path.join(resDir, ofile, 'openssl.exe')):
hasOpenSSL = True
newDir = ofile
break
if not hasOpenSSL and files:
# sorted filename to list newest version first)
for ofile in sorted(f for isDir, f in files if not isDir and path.splitext(f)[1] == '.zip'):
# extract archive
with ZipFile(path.join(resDir, ofile), 'r') as ozip:
newDir = path.join(resDir, path.splitext(ofile)[0])
ozip.extractall(path=newDir)
# verify openssl.exe exists in directory
if path.isfile(path.join(newDir, 'openssl.exe')):
hasOpenSSL = True
break
if hasOpenSSL:
# write openssl directory to config file
with open(path.join(resDir, 'openssl.cfg'), 'w') as config:
config.writelines([newDir])
# windows bat command file
command = r'cmd /c {}'.format('create_certs_windows.bat')
if command:
proc = Popen([command], cwd=resDir, shell=True)
try:
proc.wait(180)
except TimeoutExpired:
proc.kill()
# check command has generated correct files
if all(True if path.isfile(path.join(resDir, cert)) else False for cert in ('server.crt', 'server.key.orig')):
success = True
else:
success = True
return success
def absolutePath(pathname):
"""
Return the absolute path of the given file or directory
@return: absolute path
"""
if getattr(sys, 'frozen', False):
# Frozen application denotes packaged application, modules are moved into a zip
datadir = path.dirname(sys.executable)
else:
# Source based installation, use parent directory of this module's directory
datadir = path.join(path.dirname(__file__), path.pardir)
return path.abspath(path.join(datadir, pathname))
| cbrunker/quip | lib/Utils.py | Python | gpl-3.0 | 4,795 |
'''
' configurationGui.py
' Author: Iker Pedrosa
'
' License:
' This file is part of orderedFileCopy.
'
' orderedFileCopy is free software: you can redistribute it and/or modify
' it under the terms of the GNU General Public License as published by
' the Free Software Foundation, either version 3 of the License, or
' (at your option) any later version.
'
' orderedFileCopy is distributed in the hope that it will be useful,
' but WITHOUT ANY WARRANTY; without even the implied warranty of
' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
' GNU General Public License for more details.
'
' You should have received a copy of the GNU General Public License
' along with orderedFileCopy. If not, see <http://www.gnu.org/licenses/>.
'
'''
#Imported modules
from Tkinter import *
from fileManager import *
import tkFileDialog
import globals
#Global variables
class configurationGUI:
def __init__(self, master):
master.grab_set()
#The contrary is master.grab_release()
#Window title
self.master = master
master.title("Configuration menu")
#Window position and size
windowWidth = 600
windowHeight = 150
screenWidth = master.winfo_screenwidth()
screenHeight = master.winfo_screenheight()
print("configurationGui: screenWidth %d" % screenWidth)
print("configurationGui: screenHeight %d" % screenHeight)
windowWidthPosition = (screenWidth - windowWidth) / 2
windowHeightPosition = ((screenHeight - windowHeight) / 2) - windowHeight
print("configurationGui: windowWidthPosition %d" % windowWidthPosition)
print("configurationGui: windowHeightPosition %d" % windowHeightPosition)
master.geometry("%dx%d+%d+%d" % (windowWidth, windowHeight, windowWidthPosition, windowHeightPosition))
#Create layouts
top_frame = Frame(master, width = 600, height = 50)
centre_frame = Frame(master, width = 600, height = 50)
below_frame = Frame(master, width = 600, height = 50)
bottom_frame = Frame(master, width = 600, height = 50)
top_frame.grid(row = 0)
centre_frame.grid(row = 1)
below_frame.grid(row = 2)
bottom_frame.grid(row = 3)
#Extension information
self.labelExtension = Label(top_frame, height = 1, width = 30, font = ("Helvetica", 11), text = "File extension to copy:")
self.labelExtension.grid(row = 0, column = 0)
self.textExtension = Text(top_frame, height = 1, width = 5, font = ("Helvetica", 11))
self.textExtension.grid(row = 0, column = 1)
self.textExtension.insert(END, globals.extension)
#Default origin information
globals.windowDefaultOrigin = StringVar()
globals.windowDefaultOrigin.set(globals.selectedDefaultOrigin)
self.textDefaultOriginPath = Entry(centre_frame, width = 55, font = ("Helvetica", 11), textvariable = globals.windowDefaultOrigin)
self.textDefaultOriginPath.grid(row = 1, column = 0)
self.buttonDefaultOriginPath = Button(centre_frame, text = "...", command = self.defaultOriginFileChooser)
self.buttonDefaultOriginPath.grid(row = 1, column = 1, padx = 10)
#Destination by USB information
self.labelUsb = Label(below_frame, width = 15, font = ("Helvetica", 11), text = "Destination by USB")
self.labelUsb.grid(row = 0, column = 0)
self.localUsbState = IntVar()
self.localUsbState.set(globals.selectedUsbState)
self.checkboxUsb = Checkbutton(below_frame, command = self.activateUsbName, variable = self.localUsbState, onvalue=1, offvalue=0)
self.checkboxUsb.grid(row = 0, column = 1)
self.textUsb = Text(below_frame, height = 1, width = 25, font = ("Helvetica", 11), state = "disabled")
self.textUsb.grid(row = 0, column = 2)
if globals.selectedUsbState == 1:
self.textUsb.configure(state = "normal")
else:
self.textUsb.configure(state = "disabled")
self.textUsb.insert(END, globals.selectedUsbName)
#Buttons
self.buttonAccept = Button(bottom_frame, text = "Accept", command = self.accept)
self.buttonAccept.grid(row = 2, column = 0, padx = 25, pady = 20)
self.buttonCancel = Button(bottom_frame, text = "Cancel", command = self.cancel)
self.buttonCancel.grid(row = 2, column = 1, padx = 25, pady = 20)
#Finished __init__
def defaultOriginFileChooser(self):
resultPath = tkFileDialog.askdirectory(initialdir = globals.selectedDefaultOrigin) + "/"
if resultPath != "/" and resultPath != "":
globals.selectedDefaultOrigin = resultPath.encode("utf-8")
globals.windowDefaultOrigin.set(globals.selectedDefaultOrigin)
#Finished originFileChooser
def accept(self):
globals.extension = self.textExtension.get("1.0", "end-1c")
globals.selectedUsbName = self.textUsb.get("1.0", "end-1c")
writeConfiguration()
print("accept: globals.selectedDefaultOrigin '%s'" % globals.selectedDefaultOrigin)
print("accept: globals.extension '%s'" % globals.extension)
self.master.destroy()
#Finished accept
def activateUsbName(self):
if self.localUsbState.get() == 1:
globals.selectedUsbState = 1
self.textUsb.configure(state = "normal")
self.textUsb.insert(END, globals.selectedUsbName)
else:
globals.selectedUsbState = 0
self.textUsb.delete("1.0", END)
self.textUsb.configure(state = "disabled")
#Finished activateUsbName
def cancel(self):
self.master.destroy()
#Finished cancel
#Finished configurationGUI
| ikerexxe/orderedFileCopy | configurationGui.py | Python | gpl-3.0 | 5,243 |
# -*- coding:utf-8 -*-
# @auth ivan
# @time 2016-10-14 16:36:15
# @goal test Abstract Factory Pattern
class Shape:
def __init__(self):
return
def draw(self):
return
# Circle Rectangle Square
class Circle(Shape):
def draw(self):
print("Inside Circle::draw() method.")
class Rectangle(Shape):
def draw(self):
print("Inside Rectangle::draw() method.")
class Square(Shape):
def draw(self):
print("Inside Square::draw() method.")
class Color:
def __init__(self):
return
def fill(self):
return
# Blue Green Red
class Blue(Color):
def fill(self):
print("Inside Blue::fill() method.")
class Green(Color):
def fill(self):
print("Inside Green::fill() method.")
class Red(Color):
def fill(self):
print("Inside Red::fill() method.")
class AbstractFactory:
def __init__(self):
return
def getShape(self, shapeType):
return
def getColor(self, colorType):
return
# ShapeFactory ColorFactory
class ColorFactory(AbstractFactory):
def getColor(self, colorType):
if not colorType:
return
elif colorType == 'BLUE':
return Blue()
elif colorType == 'GREEN':
return Green()
elif colorType == 'RED':
return Red()
return
def getShape(self, shapeType):
return
class ShapeFactory(AbstractFactory):
def getShape(self, shapeType):
if not shapeType:
return
elif shapeType == 'CIRCLE':
return Circle()
elif shapeType == 'RECTANGLE':
return Rectangle()
elif shapeType == 'SQUARE':
return Square()
return
def getColor(self, colorType):
return
class FactoryProducer:
def getFactory(self, choice):
if choice == 'SHAPE':
return ShapeFactory()
elif choice == 'COLOR':
return ColorFactory()
return
class AbstractFactoryPatternDemo:
def __init__(self):
self.shapeFactory = FactoryProducer().getFactory("SHAPE")
self.colorFactory = FactoryProducer().getFactory("COLOR")
self.shape_list = ["CIRCLE", "RECTANGLE", "SQUARE"]
self.color_list = ["BLUE", "GREEN", "RED"]
def run(self):
for i in self.shape_list:
shape = self.shapeFactory.getShape(i)
shape.draw()
for i in self.color_list:
color1 = self.colorFactory.getColor(i)
color1.fill()
A = AbstractFactoryPatternDemo()
A.run()
| IvanaXu/Test_Class_GOF | tPatterns/Creational_Patterns/test_Abstract_Factory_Pattern.py | Python | gpl-3.0 | 2,597 |
import mutagen
import os
import re
import sys
from optparse import OptionParser
music_file_exts = ['.mp3', '.wav', '.ogg']
seconds_re = re.compile('(\d+)(\.\d+)? seconds')
def main(argv):
(options, args) = build_parser().parse_args(argv)
validate_options(options)
print('playlist(s) will be written to ', options.outdir)
if not options.contains and not options.regex:
playlists = build_top_10_playlists(options.start_at, [], options.extended,
options.absolute, options.depth)
else:
predicates = build_match_predicates(options.contains, options.regex)
playlists = [build_playlist(options.start_at, predicates, options.extended,
options.absolute, options.depth, options.name)]
outdir = options.outdir.rstrip(os.path.sep)
write_playlists(playlists, outdir)
def build_match_predicates(contains, regex):
predicates = []
if contains:
c = contains.lower()
predicates.append(
lambda x: c in os.path.basename(x['path']).lower() or c in x['title'].lower() or c in x['artist'].lower()
)
if regex:
r = re.compile(regex)
predicates.append(
lambda x: re.search(r, os.path.basename(x['path'])) or re.search(r, x['title']) or re.search(r, x['artist'])
)
return predicates
def build_parser():
parser = OptionParser()
parser.add_option('-n', '--name', dest='name', default=os.path.basename(os.getcwd()),
help='NAME of playlist', metavar='NAME')
parser.add_option('-s', '--start-at', dest='start_at', default=os.getcwd(),
help='DIR location to start media file search from (default is current DIR)',
metavar='DIR')
parser.add_option('-e', '--extended', dest='extended',
action='store_true', default=False,
help='use m3u extended format (has additional media metadata)')
parser.add_option('-a', '--absolute', dest='absolute',
action='store_true', default=False,
help='use absolute file paths (default is relative paths)')
parser.add_option('-d', '--depth', dest='depth', type="int", default=-1,
help='depth to search, 0 for target dir only (default is fully recursive)')
parser.add_option('-o', '--outdir', dest='outdir', default=os.getcwd(),
help='DIR location of output file(s) (default is current DIR)',
metavar='DIR')
parser.add_option('-c', '--contains', dest='contains', default=None,
help='case insensitive match on given string, i.e. "string contains SUBSTR". ' +
'Checks file names and metadata.', metavar='SUBSTR')
parser.add_option('-r', '--regex', dest='regex', default=None,
help='regex match. checks file name and metadata',
metavar='EXP')
parser.add_option('-f', '--force', dest='force', default=False,
action='store_true', help='force execution through warnings')
return parser
def validate_options(options):
if not os.path.isdir(options.outdir):
print('output directory does not exist!')
sys.exit(1)
if not os.path.isdir(options.start_at):
print('starting directory does not exist!')
sys.exit(1)
if options.depth != -1:
print('invalid depth: ' + str(options.depth))
sys.exit(1)
if os.path.exists(
os.path.join(options.outdir,
options.name if options.name.endswith('.m3u') else options.name + '.m3u')):
if options.force:
print('overwriting playlist: ' + options.name)
else:
print('playlist already exists with name: ' + options.name)
print('run with option -f to overwrite existing playlist')
sys.exit(1)
class Playlist:
def __init__(self, path, extended, absolute, name):
self.items = []
self.predicates = []
self.path = path
self.isExtended = extended
self.isAbsolute = absolute
self.name = name if name.endswith('.m3u') else name + '.m3u'
def __str__(self):
return self.name + ' items: ' + str(len(self.items))
def get_out_str(self, item, outdir):
x = 0
if not self.isAbsolute:
while x < len(outdir) and x < len(item['path']) \
and outdir[x] == item['path'][x]:
x += 1
if x == 0:
x = -1
if self.isExtended:
return '\n' + '#EXTINF:' + item['seconds'] + ', ' + item['artist'] + ' - ' + item['title'] \
+ '\n' + item['path'][x + 1:]
else:
return '\n' + item['path'][x + 1:]
def write_playlists(playlists, outdir):
for p in playlists:
print('writing playlist: ' + str(p))
with open(os.path.join(outdir, p.name), mode='w') as p_out:
if p.isExtended:
p_out.write('#EXTM3U')
else:
p_out.write('#STDM3U')
for i in p.items:
p_out.write(p.get_out_str(i, outdir))
def all_pass(x, predicates):
for p in predicates:
if not p(x):
return False
return True
def extract_metadata(path, extended=False):
meta = {'path': path, 'title': '', 'artist': '', 'seconds': '0'}
if extended:
f = mutagen.File(path)
if f:
match = re.search(seconds_re, f.info.pprint())
meta['seconds'] = match.group(1) if match else '0'
else:
f = {}
meta['title'] = f.get('title',
[os.path.basename(path)])[0]
meta['artist'] = f.get('artist',
[path.split(os.path.sep)[-2]])[0]
return meta
def build_top_10_playlists(root_path, predicates, extended, absolute, depth):
playlists = []
predicates.append(
lambda x: re.search('^\d{2}_\d{2} ', os.path.basename(x['path']))
)
predicates.append(
lambda x: int(os.path.basename(x['path'])[3:5]) < 11
)
for d in os.listdir(root_path):
dpath = os.path.join(root_path, d)
if os.path.isdir(dpath) \
and re.search('^\d{4}$', d) \
and 2100 > int(d) > 1900:
playlists.append(build_playlist(dpath, predicates,
extended, absolute,
0, os.path.basename(dpath)))
return playlists
def build_playlist(root_path, predicates, extended, absolute, depth, name):
playlist = Playlist(root_path, extended, absolute, name)
for root, dirs, files in os.walk(root_path):
for f in files:
path = os.path.join(root, f)
if os.path.splitext(path)[1].lower() in music_file_exts:
item = extract_metadata(path, extended)
if all_pass(item, predicates):
playlist.items.append(item)
return playlist
if __name__ == "__main__":
main(sys.argv[1:])
| mangosmoothie/dnla-playlists | dnla-playlists/playlists.py | Python | gpl-3.0 | 7,175 |
"""Configuration for a load testing using Locust.
To start load testing, run `make server` and `make test-load`.
"""
import random
from json.decoder import JSONDecodeError
from django.urls import reverse
from locust import HttpLocust, TaskSet, task
class SolvingTaskBehavior(TaskSet):
"""Describes interaction of a simulated user with a single task.
The users requests a randomly choosen task,
then she does random number of edits and unsuccessful executions,
and finally she solves the task.
"""
SOLVE_PROBABILITY = 0.3
def on_start(self):
selected_task = random.choice(self.parent.task_names)
self.start_task(selected_task)
def start_task(self, task_name):
url = self.parent.action_urls['start_task']
data = {'task': task_name}
response = self.parent.post_with_cookies(url, data)
self.task_session_id = response.json()['task_session_id']
self.edit_program()
@task(1)
def run_program(self):
url = self.parent.action_urls['run_program']
solved = random.random() < self.SOLVE_PROBABILITY
data = {
'task-session-id': self.task_session_id,
'program': 'f',
'correct': solved}
self.parent.post_with_cookies(url, data)
if solved:
self.interrupt()
@task(5)
def edit_program(self):
url = self.parent.action_urls['edit_program']
data = {
'task-session-id': self.task_session_id,
'program': 'f'}
self.parent.post_with_cookies(url, data)
class UserBehavior(TaskSet):
"""Describes interaction of a simulated user with the server.
"""
tasks = [SolvingTaskBehavior]
def __init__(self, parent):
super().__init__(parent)
self.cookies = {}
self.action_urls = {}
self.task_names = None
def on_start(self):
"""Fill in cookies so that post request can be made later.
"""
response = self.visit_homepage()
self.save_cookies(response)
self.save_tasks()
self.save_action_urls()
def visit_homepage(self):
response = self.client.get('/')
return response
def save_tasks(self):
response = self.client.get('/learn/api/tasks/')
self.save_cookies(response)
self.task_names = [task['name'] for task in response.json()]
def save_action_urls(self):
"""The session and lazy user is created. Now tasks can be solved.
"""
user_response = self.client.get('/learn/api/users/current')
self.save_cookies(user_response)
student_url = user_response.json()['student']
student_response = self.client.get(student_url)
self.save_cookies(student_response)
self.action_urls['start_task'] = student_response.json()['start_task']
self.action_urls['edit_program'] = student_response.json()['edit_program']
self.action_urls['run_program'] = student_response.json()['run_program']
def save_cookies(self, response):
"""Stores cookies for later usage.
"""
self.cookies.update(response.cookies.get_dict())
def post_with_cookies(self, url, data):
"""Post request with correctly set cookies and headers.
"""
csrf_token = self.cookies['csrftoken']
data['csrfmiddlewaretoken'] = csrf_token
headers = {'X-CSRFToken': csrf_token, 'Referer': self.client.base_url}
response = self.client.post(url, data, headers=headers, cookies=self.cookies)
self.save_cookies(response)
self.log_errors(response)
return response
@staticmethod
def log_errors(response):
if not response.ok:
with open('request_errors.log', 'a') as f:
f.writelines(response.text)
class WebsiteUser(HttpLocust):
task_set = UserBehavior
min_wait = 500
max_wait = 5000
| adaptive-learning/robomission | backend/learn/tests/locustfile.py | Python | gpl-3.0 | 3,918 |
# -*- coding: utf-8 -*-
# Copyright (c) 2004 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing templates for the documentation generator (lists style).
"""
from __future__ import unicode_literals
#################################################
## Common templates for index and docu files ##
#################################################
headerTemplate = \
'''<!DOCTYPE html>
<html><head>
<title>{{Title}}</title>
<meta charset="UTF-8">
</head>
<body style="background-color:{BodyBgColor};color:{BodyColor}">'''
footerTemplate = '''
</body></html>'''
#########################################
## Templates for documentation files ##
#########################################
moduleTemplate = \
'''<a NAME="top" ID="top"></a>
<h1 style="background-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Module}}</h1>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Global Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionList}}
<hr />'''
rbFileTemplate = \
'''<a NAME="top" ID="top"></a>
<h1 style="background-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Module}}</h1>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Global Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Modules</h3>
{{RbModulesList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionList}}
<hr />'''
classTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Class}}</h2>
{{ClassDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Derived from</h3>
{{ClassSuper}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Class Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Class Methods</h3>
{{ClassMethodList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Methods</h3>
{{MethodList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Static Methods</h3>
{{StaticMethodList}}
{{MethodDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
methodTemplate = \
'''<a NAME="{{Anchor}}.{{Method}}" ID="{{Anchor}}.{{Method}}"></a>
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
{{Class}}.{{Method}}{{MethodClassifier}}</h3>
<b>{{Method}}</b>(<i>{{Params}}</i>)
{{MethodDescription}}'''
constructorTemplate = \
'''<a NAME="{{Anchor}}.{{Method}}" ID="{{Anchor}}.{{Method}}"></a>
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
{{Class}} (Constructor)</h3>
<b>{{Class}}</b>(<i>{{Params}}</i>)
{{MethodDescription}}'''
rbModuleTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Module}}</h2>
{{ModuleDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Module Attributes</h3>
{{GlobalsList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Classes</h3>
{{ClassesList}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Functions</h3>
{{FunctionsList}}
<hr />
{{ClassesDetails}}
{{FunctionsDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
rbModulesClassTemplate = \
'''<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Class}}</h2>
{{ClassDescription}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Derived from</h3>
{{ClassSuper}}
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Methods</h3>
{{MethodList}}
{{MethodDetails}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
functionTemplate = \
'''<hr />
<a NAME="{{Anchor}}" ID="{{Anchor}}"></a>
<h2 style="background-color:{CFBgColor};color:{CFColor}">{{Function}}</h2>
<b>{{Function}}</b>(<i>{{Params}}</i>)
{{FunctionDescription}}
<div align="right"><a style="color:{LinkColor}" href="#top">Up</a></div>
<hr />'''
listTemplate = \
'''<table>
{{Entries}}
</table>'''
listEntryTemplate = \
'''<tr>
<td><a style="color:{LinkColor}" href="#{{Link}}">{{Name}}</a></td>
<td>{{Deprecated}}{{Description}}</td>
</tr>'''
listEntryNoneTemplate = '''<tr><td>None</td></tr>'''
listEntryDeprecatedTemplate = '''<b>Deprecated.</b>'''
listEntrySimpleTemplate = '''<tr><td>{{Name}}</td></tr>'''
paragraphTemplate = \
'''<p>
{{Lines}}
</p>'''
parametersListTemplate = \
'''<dl>
{{Parameters}}
</dl>'''
parametersListEntryTemplate = \
'''<dt><i>{{Name}}</i></dt>
<dd>
{{Description}}
</dd>'''
parameterTypesListEntryTemplate = \
'''<dt><i>{{Name}}</i> ({{Type}})</dt>
<dd>
{{Description}}
</dd>'''
returnsTemplate = \
'''<dl>
<dt>Returns:</dt>
<dd>
{{0}}
</dd>
</dl>'''
returnTypesTemplate = \
'''<dl>
<dt>Return Type:</dt>
<dd>
{{0}}
</dd>
</dl>'''
exceptionsListTemplate = \
'''<dl>
{{Exceptions}}
</dl>'''
exceptionsListEntryTemplate = \
'''<dt>Raises <b>{{Name}}</b>:</dt>
<dd>
{{Description}}
</dd>'''
signalsListTemplate = \
'''<h4>Signals</h4>
<dl>
{{Signals}}
</dl>'''
signalsListEntryTemplate = \
'''<dt>{{Name}}</dt>
<dd>
{{Description}}
</dd>'''
eventsListTemplate = \
'''<h4>Events</h4>
<dl>
{{Events}}
</dl>'''
eventsListEntryTemplate = \
'''<dt>{{Name}}</dt>
<dd>
{{Description}}
</dd>'''
deprecatedTemplate = \
'''<p>
<b>Deprecated.</b>
{{Lines}}
</p>'''
authorInfoTemplate = \
'''<p>
<i>Author(s)</i>:
{{Authors}}
</p>'''
seeListTemplate = \
'''<dl>
<dt><b>See Also:</b></dt>
{{Links}}
</dl>'''
seeListEntryTemplate = \
'''<dd>
{{Link}}
</dd>'''
seeLinkTemplate = '''<a style="color:{LinkColor}" {{Link}}'''
sinceInfoTemplate = \
'''<p>
<b>since</b> {{Info}}
</p>'''
#################################
## Templates for index files ##
#################################
indexBodyTemplate = '''
<h1 style="background-color:{Level1HeaderBgColor};color:{Level1HeaderColor}">
{{Title}}</h1>
{{Description}}
{{Subpackages}}
{{Modules}}'''
indexListPackagesTemplate = '''
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Packages</h3>
<table>
{{Entries}}
</table>'''
indexListModulesTemplate = '''
<h3 style="background-color:{Level2HeaderBgColor};color:{Level2HeaderColor}">
Modules</h3>
<table>
{{Entries}}
</table>'''
indexListEntryTemplate = \
'''<tr>
<td><a style="color:{LinkColor}" href="{{Link}}">{{Name}}</a></td>
<td>{{Description}}</td>
</tr>'''
| testmana2/test | DocumentationTools/TemplatesListsStyle.py | Python | gpl-3.0 | 7,017 |
from flask import render_template
from app import app, db, models
import json
@app.route('/')
@app.route('/index')
def index():
# obtain today's words
# words = models.Words.query.all()
# words = list((str(word[0]), word[1]) for word in db.session.query(models.Words, db.func.count(models.Words.id).label("total")).group_by(models.Words.word).order_by("total DESC"))
data = db.session.query(models.Words, db.func.count(models.Words.id).label("total")).group_by(models.Words.word).order_by("total DESC").all()[:50]
words = [_[0].word for _ in data]
count = [_[1] for _ in data]
return render_template('index.html', words=words, count = count) | matbra/radio_fearit | app/views.py | Python | gpl-3.0 | 670 |
'''
Copyright 2015 Travel Modelling Group, Department of Civil Engineering, University of Toronto
This file is part of the TMG Toolbox.
The TMG Toolbox is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The TMG Toolbox is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>.
'''
#---METADATA---------------------
'''
Export Count Station Link Correspondence File
Authors: David King
Latest revision by:
[Description]
'''
#---VERSION HISTORY
'''
0.0.1 Created
0.1.1 Created on 2015-03-13 by David King
'''
import inro.modeller as _m
import csv
import traceback as _traceback
from contextlib import contextmanager
from contextlib import nested
_mm = _m.Modeller()
net =_mm.scenario.get_network()
_util = _mm.module('tmg.common.utilities')
_tmgTPB = _mm.module('tmg.common.TMG_tool_page_builder')
class ExportCountStationLocation(_m.Tool()):
version = '0.1.1'
tool_run_msg = ""
number_of_tasks = 1
Scenario = _m.Attribute(_m.InstanceType)
CordonExportFile = _m.Attribute(str)
def __init__(self):
#---Init internal variables
self.TRACKER = _util.ProgressTracker(self.number_of_tasks) #init the ProgressTracker
#---Set the defaults of parameters used by Modeller
self.Scenario = _mm.scenario #Default is primary scenario
def page(self):
pb = _tmgTPB.TmgToolPageBuilder(self, title="Export Count Station-Link Correspondence File v%s" %self.version,
description="Exports a link and countpost correspondence file.\
Contained witin, is the link on which each countpost is found.\
Assumes that count stations are defined by '@stn1'.",
branding_text="- TMG Toolbox")
if self.tool_run_msg != "": # to display messages in the page
pb.tool_run_status(self.tool_run_msg_status)
pb.add_header("EXPORT CORDON DATA FILE")
pb.add_select_file(tool_attribute_name='CordonExportFile',
window_type='save_file', file_filter='*.csv',
title="Cordon Count File",
note="Select Export Location:\
<ul><li>countpost_id</li>\
<li>link id (inode-jnode)</li>\
</ul>")
return pb.render()
def __call__(self, Scen, TruthTable):
self.tool_run_msg = ""
self.TRACKER.reset()
self.Scenario = Scen
self.CordonTruthTable = TruthTable
try:
self._Execute()
except Exception as e:
self.tool_run_msg = _m.PageBuilder.format_exception(
e, _traceback.format_exc())
raise
self.tool_run_msg = _m.PageBuilder.format_info("Done.")
def run(self):
self.tool_run_msg = ""
self.TRACKER.reset()
try:
self._Execute()
except Exception as e:
self.tool_run_msg = _m.PageBuilder.format_exception(
e, _traceback.format_exc())
raise
self.tool_run_msg = _m.PageBuilder.format_info("Done.")
def _Execute(self):
with _m.logbook_trace(name="{classname} v{version}".format(classname=(self.__class__.__name__), version=self.version),
attributes=self._GetAtts()):
lines =[]
for link in net.links():
if int(link['@stn1']) > 0:
lines.append((link['@stn1'],link.id))
with open(self.CordonExportFile, 'w') as writer:
writer.write("Countpost ID ,Link (i-node j-node)")
for line in lines:
line = [str(c) for c in line]
writer.write("\n" + ','.join(line))
#----SUB FUNCTIONS---------------------------------------------------------------------------------
def _GetAtts(self):
atts = {
"Scenario" : str(self.Scenario.id),
"Version": self.version,
"self": self.__MODELLER_NAMESPACE__}
return atts
@_m.method(return_type=_m.TupleType)
def percent_completed(self):
return self.TRACKER.getProgress()
@_m.method(return_type=unicode)
def tool_run_msg_status(self):
return self.tool_run_msg | TravelModellingGroup/TMGToolbox | TMGToolbox/src/analysis/traffic/Export_Count_Station_Location.py | Python | gpl-3.0 | 5,176 |
"""
Compatibility module.
This module contains duplicated code from Python itself or 3rd party
extensions, which may be included for the following reasons:
* compatibility
* we may only need a small subset of the copied library/module
"""
import _inspect
import py3k
from _inspect import getargspec, formatargspec
from py3k import *
__all__ = []
__all__.extend(_inspect.__all__)
__all__.extend(py3k.__all__)
| beiko-lab/gengis | bin/Lib/site-packages/numpy/compat/__init__.py | Python | gpl-3.0 | 434 |
#!/bin/python
# -*- coding: utf-8 -*-
# ####################################################################
# gofed-ng - Golang system
# Copyright (C) 2016 Fridolin Pokorny, [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# ####################################################################
import sys
from tqdm import tqdm
from common.helpers.output import log
from scenario import Scenario, SwitchAttr, Flag
class GolangDepsUpdate(Scenario):
''' update dependencies of Golang projects packaged in Fedora '''
max_depth = SwitchAttr("--max-depth", int, default=None,
help="specify commit depth limit")
since_date = SwitchAttr("--since-date", str, default=None,
help="specify since date")
skip_errors = Flag("--skip-errors",
help="errors will be reported, but the computation will not be interrupted")
def main(self):
with self.get_system() as system:
golang_pkgs = system.async_call.goland_package_listing()
stored_projects = system.async_call.deps_project_listing()
for pkg in golang_pkgs.result:
if not pkg['name'].startswith('golang-github-'):
log.warning("Skipping %s" % pkg['name'])
# TODO: remove once support for mercurial and full package->upstream translation will be available
continue
try:
raise ValueError("value error")
print("Inspecting '%s'" % pkg['name'])
upstream_url = system.async_call.golang_package2upstream(pkg['name'])
if pkg['name'] in stored_projects.result:
stored_commits = system.async_call.deps_project_commit_listing(pkg['name'])
else:
stored_commits = None
scm_log = system.async_call.scm_log(upstream_url.result,
max_depth=self.max_depth,
since_date=self.since_date)
for commit in tqdm(scm_log.result):
log.debug("Commit %s project %s" % (commit['hash'], pkg['name']))
if not stored_commits or commit not in stored_commits.result:
file_id = system.async_call.scm_store(upstream_url.result, commit['hash'])
deps = system.async_call.deps_analysis(file_id.result)
system.async_call.deps_store_project(pkg['name'], commit['hash'], commit['time'],
deps.result, deps.meta)
except:
exc_info = sys.exc_info()
if self.skip_errors:
log.error(exc_info[2].print_exc())
else:
raise exc_info
if __name__ == '__main__':
sys.exit(1)
| gofed/gofed-ng | scenarios/golangDepsUpdate.py | Python | gpl-3.0 | 3,711 |
"""
matstat docstrings
"""
# from supergame import supergame
# from supergametools import supergame
| btengels/supergametools | __init__.py | Python | gpl-3.0 | 101 |
# log moments (mean, variance, skewness, kurtosis) and quantiles
# why am I spending time creating a complex quantile and histogram
# estimator when I only need average, so far
from math import sqrt
from bisect import bisect_left
import scipy.stats as st
maxlong = 9223372036854775807
class RunningStat(object):
'''Gather single-pass statistical data from an iterable'''
__slots__ = ('count', 'moments', 'min', 'max')
def __init__(object, moments=1, buckets=1, sorted=False):
self.count = 0
self.moments = [0] * moments # statistical moments
#self.buckets = [0] * buckets # count of items in each bucket
#self.percentiles = [0] * (buckets + 1) # border values between buckets
#self.vk = 0
self.min = None
self.max = None
def __call__(self, iterable, quantifier=float):
'''Wrap an iterable'''
item = next(iterable)
self.count += 1
num = quantifier(item)
if num < self.min: self.min = num
else if num > self.max: self.max = num
#index = bisect_left(self.percentiles, num)
#self.bucket[index] += 1
yield item
def add_to_moments(self, num):
oldmean = self.moments[0]
try: newmean = oldmean + (num - oldmean) / self.count
except ZeroDivisionError: newmean = num
vk = vk + (num - oldmean)(num - newmean)
self.moments[0] = newmean
def __len__(self):
return self.count
def __iadd__(self, other):
if type(other) is str:
_addstr(self, other)
return
for string in other: _addstr(self, string)
#def __enter__(self): pass
def __exit__(self): self._mean = float(self._mean / self.count)
def _addstr(self, string):
words = string.split()
self.count = len(words)
for w in words: self._mean += len(w)
def _mean_(self):
if type(self._mean) is int: __exit__(self)
return self._mean
def append(self, other):
self.count += 1
self.accumulated += len(other)
@property
def mean(self): return self.moments[0]
@property
def variance(self): return self.moments[1]
@property
def kurtosis(self): return self.moments[2]
class Gen(object):
__slots__ = ('inner')
def __init__(self, inner): self.inner = inner
def __iter__(self): return Iter(self, self.inner)
def __len__(self): return len(self.inner)
class Iter(object):
__slots__ = ('generator', 'count', 'inner')
def __new__(cls, gen, iterable, action=None):
if isinstance(iterable, cls):
return iterable
return super().__new__(cls, gen, iterable)
def __init__(self, gen, iterable, action=None):
self.generator = gen
self.count = 0
self.actions = [] if action is None else [action]
self.inner = iterable \
if hasattr(iterable, '__next__') \
else iterable.__iter__()
def __iter__(self): return self
def __next__(self):
r = self.inner.__next__()
for a in self.actions: r = a(r)
self.count += 1
return r
def __len__(self): return self.generator.__len__() - self.count
z_score = st.norm.ppf((1+.95)/2)
z_sqr = z_score*z_score
def wilson_score(positive, n):
'''returns lower bound of Wilson score confidence interval for a Bernoulli
parameter
resource: http://www.evanmiller.org/how-not-to-sort-by-average-rating.html'''
assert positive <= n
if n is 0: return float('NaN')
p = positive / n
zz÷n = z_sqr / n
return (p + zz÷n/2 - z * sqrt((p * (1 - p) + zz÷n/4) / n)) \
/ (1 + zz÷n)
# trying using closure instead
def stats(gen, moments=2, readers=[]):
def generator():
| PillowLounge/lolibot | hardcoded/statistics.py | Python | gpl-3.0 | 3,782 |
#!/usr/bin/env python
#
# Author: Pablo Iranzo Gomez ([email protected])
#
# Description: Script for monitoring host Memory status and VM's rhevm-sdk
# api and produce NAGIOS valid output
#
# Requires rhevm-sdk to work
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import sys
import optparse
from ovirtsdk.xml import params
description = """
RHEV-nagios-table-host-mem output is a script for querying RHEVM via API to get host status
It's goal is to output a table of host/vm status for simple monitoring via external utilities
"""
# Option parsing
p = optparse.OptionParser("rhev-nagios-table-host-mem.py [arguments]", description=description)
p.add_option('-v', "--verbosity", dest="verbosity", help="Show messages while running", metavar='[0-n]', default=0,
type='int')
p.add_option("--host", dest="host", help="Show messages while running", metavar='host')
p.add_option("-t", "--table", dest="table", help="Input file in CSV format", metavar='table')
(options, args) = p.parse_args()
# MAIN PROGRAM
if not options.host:
print("Host not defined, exiting")
sys.exit(1)
if not options.table:
print("CSV table not defined, exiting")
sys.exit(1)
try:
f = file(options.table) # fichero a procesar
except:
print("Problem opening the file %s" % options.table)
sys.exit(1)
# NAGIOS PRIOS:
# 0 -> ok
# 1 -> warning
# 2 -> critical
# 3 -> unknown
# By default, return unknown
# TYPE;HOST;STATE;CPU;MEM
# host;rhev01.lab.local;up;16;0.0
for line in f:
if line.split(";")[0] == "host":
if line.split(";")[1] == options.host:
usage = int(line.split(";")[4])
retorno = 3
if usage >= 90:
retorno = 1
if usage >= 95:
retorno = 2
else:
retorno = 0
print(usage)
sys.exit(retorno) | DragonRoman/rhevm-utils | monitoring/rhev-nagios-table-host-mem.py | Python | gpl-3.0 | 2,283 |
def get_perm_argparser(self, args):
args = args.split(" ")
if args[0] == "nick":
self.conman.gen_send("Permission level for %s: %s" % (args[1], self.permsman.get_nick_perms(args[1])))
elif args[0] == "cmd":
if args[1].startswith("."):
args[1] = args[1][1:]
self.conman.gen_send("Permission level for %s: %s" % (args[1], self.permsman.get_cmd_perms(args[1])))
elif args[0] == "msg":
self.conman.gen_send("Message permissions for %s: %s" % (args[1], self.permsman.get_msg_perms(args[1])))
def set_perm_argparser(self, args):
args = args.split(" ")
if args[0] == "nick":
self.conman.gen_send("Setting permission level for %s: %s" % (args[1], args[2]))
self.permsman.set_nick_perms(args[1], args[2])
elif args[0] == "cmd":
if args[1].startswith("."):
args[1] = args[1][1:]
self.conman.gen_send("Setting permission level for %s: %s" % (args[1], args[2]))
self.permsman.set_cmd_perms(args[1], args[2])
elif args[0] == "msg":
args[2] = args[2].lower() == "true" or args[2] == "1"
self.conman.gen_send("Setting message permissions for %s: %s" % (args[1], args[2]))
self.permsman.set_msg_perms(args[1], args[2])
self._map("command", "getperm", get_perm_argparser)
self._map("command", "setperm", set_perm_argparser)
| vsquare95/JiyuuBot | modules/permissions.py | Python | gpl-3.0 | 1,361 |
from .base import BaseInterface
import eventlet
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from flask import Flask, render_template, session, request, send_from_directory
from flask_socketio import SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect
from werkzeug.utils import secure_filename
import threading, os, time, queue
import logging, sys, json
from ..engine.network import get_allip, get_hostname
import socket
from zeroconf import ServiceInfo, Zeroconf
thread = None
thread_lock = threading.Lock()
REGIE_PATH1 = '/opt/RPi-Regie'
REGIE_PATH2 = '/data/RPi-Regie'
class RegieInterface (BaseInterface):
def __init__(self, hplayer, port, datapath):
super(RegieInterface, self).__init__(hplayer, "Regie")
self._port = port
self._datapath = datapath
self._server = None
# HTTP receiver THREAD
def listen(self):
# Advertize on ZeroConf
zeroconf = Zeroconf()
info = ServiceInfo(
"_http._tcp.local.",
"Regie._"+get_hostname()+"._http._tcp.local.",
addresses=[socket.inet_aton(ip) for ip in get_allip()],
port=self._port,
properties={},
server=get_hostname()+".local.",
)
zeroconf.register_service(info)
# Start server
self.log( "regie interface on port", self._port)
with ThreadedHTTPServer(self, self._port) as server:
self._server = server
self.stopped.wait()
self._server = None
# Unregister ZeroConf
zeroconf.unregister_service(info)
zeroconf.close()
def projectPath(self):
return os.path.join(self._datapath, 'project.json')
def projectRaw(self):
project = '{"pool":[], "project":[[]]}'
if os.path.isfile(self.projectPath()):
with open( self.projectPath(), 'r') as file:
project = file.read()
return project
# parse locally for programatic execution
def reload(self):
try:
self._project = json.loads(self.projectRaw())
except:
self._project = None
self.log("Error while parsing project..")
# print(self._project)
return self._project
# play sequence
def playseq(self, sceneIndex, seqIndex):
self.log("PLAYSEQ")
try:
# self.log('PLAYSEQ', seqIndex, sceneIndex, boxes)
orderz = []
boxes = [b for b in self._project["project"][0][sceneIndex]["allMedias"] if b["y"] == seqIndex]
for b in boxes:
peerName = self._project["pool"][ b["x"] ]["name"]
# MEDIA
order = { 'peer': peerName, 'synchro': True}
if b["media"] in ['stop', 'pause', 'unfade'] :
order["event"] = b["media"]
elif b["media"] == '...':
order["event"] = 'continue'
elif b["media"].startswith('fade'):
order["event"] = 'fade'
order["data"] = b["media"].split('fade ')[1]
else:
order["event"] = 'playthen'
order["data"] = [ self._project["project"][0][sceneIndex]["name"] + '/' + b["media"] ]
# ON MEDIA END
if 'onend' in b:
if b['onend'] == 'next':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex+1] } )
elif b['onend'] == 'prev':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex-1] } )
elif b['onend'] == 'replay':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex] } )
orderz.append(order)
# LOOP
if b["loop"] == 'loop':
orderz.append( { 'peer': peerName, 'event': 'loop', 'data': 1} )
elif b["loop"] == 'unloop':
orderz.append( { 'peer': peerName, 'event': 'unloop'} )
# LIGHT
if b["light"] and b["light"] != '...':
order = { 'peer': peerName, 'synchro': True, 'event': 'esp'}
if b["light"].startswith('light'):
order["data"] = {
'topic': 'leds/all',
'data': b["light"].split('light ')[1]
}
elif b["light"].startswith('preset'):
order["data"] = {
'topic': 'leds/mem',
'data': b["light"].split('preset ')[1]
}
elif b["light"].startswith('off'):
order["data"] = {
'topic': 'leds/stop',
'data': ''
}
orderz.append(order)
self.emit('playingseq', sceneIndex, seqIndex)
self.emit('peers.triggers', orderz, 437)
except:
self.log('Error playing Scene', sceneIndex, 'Seq', seqIndex)
#
# Threaded HTTP Server
#
class ThreadedHTTPServer(object):
def __init__(self, regieinterface, port):
self.regieinterface = regieinterface
interface_path = os.path.dirname(os.path.realpath(__file__))
if os.path.isdir(REGIE_PATH1):
www_path = os.path.join(REGIE_PATH1, 'web')
elif os.path.isdir(REGIE_PATH2):
www_path = os.path.join(REGIE_PATH2, 'web')
else:
www_path = os.path.join(interface_path, 'regie')
app = Flask(__name__, template_folder=www_path)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*")
#
# FLASK Routing Static
#
@app.route('/')
def index():
# self.regieinterface.log('requesting index')
return send_from_directory(www_path, 'index.html')
@app.route('/<path:path>')
def send_static(path):
# self.regieinterface.log('requesting '+path)
return send_from_directory(www_path, path)
#
# FLASK Routing API
#
# @app.route('/<path:path>')
# def send_static(path):
# # self.regieinterface.log('requesting '+path)
# return send_from_directory(www_path, path)
#
# SOCKETIO Routing
#
self.sendBuffer = queue.Queue()
def background_thread():
while True:
try:
task = self.sendBuffer.get_nowait()
if len(task) > 1: socketio.emit(task[0], task[1])
else: socketio.emit(task[0], None)
self.sendBuffer.task_done()
except queue.Empty:
socketio.sleep(0.1)
@self.regieinterface.hplayer.on('files.dirlist-updated')
def filetree_send(ev, *args):
self.sendBuffer.put( ('data', {'fileTree': self.regieinterface.hplayer.files()}) )
@self.regieinterface.hplayer.on('files.activedir-updated')
def activedir_send(ev, *args):
self.sendBuffer.put( ('data', {'scene': args[1]}) )
@self.regieinterface.hplayer.on('*.peer.*')
def peer_send(ev, *args):
event = ev.split('.')[-1]
if event == 'playingseq':
print(ev, args[0]['data'][1])
self.sendBuffer.put( ('data', {'sequence': args[0]['data'][1]}) )
else:
args[0].update({'type': event})
self.sendBuffer.put( ('peer', args[0]) )
# !!! TODO: stop zyre monitoring when every client are disconnected
@socketio.on('connect')
def client_connect():
self.regieinterface.log('New Remote Regie connected')
@socketio.on('save')
def save(data):
try:
json.loads(data)
with open( os.path.join(self.regieinterface._datapath, 'project.json'), 'w') as file:
file.write(data)
except:
e = str(sys.exc_info()[0])
self.regieinterface.log('fail to save project: '+e+' '+data)
@socketio.on('init')
def init(data):
# send project
emit('data', self.projectData())
# Start update broadcaster
global thread
with thread_lock:
if thread is None:
thread = socketio.start_background_task(target=background_thread)
@socketio.on('register')
def register(data):
# enable peer monitoring
self.regieinterface.emit('peers.getlink')
self.regieinterface.emit('peers.subscribe', ['status', 'settings', 'playingseq'])
@socketio.on('event')
def event(data):
self.regieinterface.emit('peers.triggers', data, 437)
# prepare sub-thread
self.server_thread = threading.Thread(target=lambda:socketio.run(app, host='0.0.0.0', port=port))
self.server_thread.daemon = True
# watchdog project.json
self.watcher()
# internal load project
self.regieinterface.reload()
def start(self):
self.server_thread.start()
def stop(self):
#self.server.stop()
pass
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def projectData(self):
data={
'fullproject': self.regieinterface.projectRaw(),
'fileTree': self.regieinterface.hplayer.files()
}
return data
def watcher(self):
def onchange(e):
self.regieinterface.log('project updated ! pushing it...')
self.regieinterface.reload()
self.sendBuffer.put( ('data', self.projectData()) )
handler = PatternMatchingEventHandler("*/project.json", None, False, True)
handler.on_any_event = onchange
self.projectObserver = Observer()
self.projectObserver.schedule(handler, os.path.dirname(self.regieinterface.projectPath()))
try:
self.projectObserver.start()
except:
self.regieinterface.log('project.json not found') | Hemisphere-Project/HPlayer2 | core/interfaces/regie.py | Python | gpl-3.0 | 10,996 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from pyquery import PyQuery
from novel import serial, utils
BASE_URL = 'http://www.sto.cc/{}-1/'
PAGE_URL = 'http://www.sto.cc/{}-{}/'
class StoTool(utils.Tool):
def __init__(self):
super().__init__()
word_list = (
's思s兔s網s文s檔s下s載s與s在s線s閱s讀s',
's本s作s品s由s思s兔s網s提s供s下s載s與s在s線s閱s讀s',
's本s作s品s由s思s兔s在s線s閱s讀s網s友s整s理s上s傳s',
's思s兔s在s線s閱s讀s',
's思s兔s文s檔s共s享s與s在s線s閱s讀s',
)
symbol_list = (
'\^_\^', ':-\)', '\^o\^', '-_-!',
'││', '//', '\$\$',
)
symbols = '|'.join(symbol_list).join(('(.|', ')'))
pats = (symbols.join(w.split('s')) for w in word_list)
symbol_extras = ('',)
self.remove_extras.extend(
(re.compile(pat) for pat in pats)
)
self.remove_extras.extend(
(re.compile(pat) for pat in symbol_extras)
)
class Sto(serial.SerialNovel):
def __init__(self, tid):
super().__init__(utils.base_to_url(BASE_URL, tid), '#BookContent',
tid=tid)
self.tool = StoTool
def get_title_and_author(self):
st = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'keywords'
).attr('content')
return re.match(r'(.*?),(.*?),.*', st).groups()
@property
def chapter_list(self):
st = re.search(
r'ANP_goToPage\("Page_select",(\d+),(\d+),1\);', self.doc.html())
if st.group(1) == self.tid:
page_num = int(st.group(2))
else:
raise Exception('Something strange may happened.')
return [(i + 1, PAGE_URL.format(self.tid, i + 1), '第{:d}頁'.format(i + 1))
for i in range(page_num)]
def get_intro(self):
intro = self.doc('meta').filter(
lambda i, e: PyQuery(e).attr('name') == 'description'
).attr('content')
return intro
| wangjiezhe/FetchNovels | novel/sources/sto.py | Python | gpl-3.0 | 2,128 |
#!/afs/bx.psu.edu/project/pythons/py2.7-linux-x86_64-ucs4/bin/python2.7
"""
Convert wiggle data to a binned array. This assumes the input data is on a
single chromosome and does no sanity checks!
usage: %prog score_file out_file < wiggle_data
-c, --comp=type: compression type (none, zlib, lzo)
"""
from __future__ import division
import sys
import psyco_full
import bx.wiggle
from bx.binned_array import BinnedArray
from bx_extras.fpconst import isNaN
from bx.cookbook import doc_optparse
from bx import misc
def main():
# Parse command line
options, args = doc_optparse.parse( __doc__ )
try:
if options.comp:
comp_type = options.comp
else:
comp_type = None
score_fname = args[0]
out_fname = args[1]
except:
doc_optparse.exit()
scores = BinnedArray()
## last_chrom = None
for i, ( chrom, pos, val ) in enumerate( bx.wiggle.Reader( misc.open_compressed( score_fname ) ) ):
#if last_chrom is None:
# last_chrom = chrom
#else:
# assert chrom == last_chrom, "This script expects a 'wiggle' input on only one chromosome"
scores[pos] = val
# Status
if i % 10000 == 0: print i, "scores processed"
out = open( out_fname, "w" )
if comp_type:
scores.to_file( out, comp_type=comp_type )
else:
scores.to_file( out )
out.close()
if __name__ == "__main__": main()
| mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/bx_python-0.7.2-py2.7-linux-x86_64-ucs4.egg/EGG-INFO/scripts/wiggle_to_binned_array.py | Python | gpl-3.0 | 1,461 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-06-24 23:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problems', '0018_origintag_helptexts'),
]
operations = [
migrations.CreateModel(
name='AlgorithmTagProposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problems.Problem')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problems.AlgorithmTag')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'algorithm tag proposal',
'verbose_name_plural': 'algorithm tag proposals',
},
),
migrations.CreateModel(
name='DifficultyProposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('difficulty', models.CharField(max_length=10)),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problems.Problem')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'difficulty proposal',
'verbose_name_plural': 'difficulty proposals',
},
),
]
| sio2project/oioioi | oioioi/problems/migrations/0019_algorithmtagproposal_difficultyproposal.py | Python | gpl-3.0 | 1,893 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-28 15:17
from __future__ import unicode_literals
import DjangoUeditor.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogadmin', '0006_auto_20170827_1142'),
]
operations = [
migrations.CreateModel(
name='BookReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u4e66\u520a\u8bc4\u8bba',
'verbose_name_plural': '\u4e66\u520a\u8bc4\u8bba',
},
),
migrations.CreateModel(
name='Essay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u6742\u6587',
'verbose_name_plural': '\u6742\u6587',
},
),
migrations.CreateModel(
name='FilmReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=128, null=True, verbose_name='\u6807\u9898')),
('tag', models.CharField(blank=True, max_length=32, null=True, verbose_name='\u6807\u7b7e')),
('pub_time', models.DateTimeField(auto_now_add=True, verbose_name='\u53d1\u5e03\u65f6\u95f4')),
('update_time', models.DateTimeField(auto_now=True, null=True, verbose_name='\u66f4\u65b0\u65f6\u95f4')),
('content', DjangoUeditor.models.UEditorField(blank=True, default='', verbose_name='\u6b63\u6587')),
],
options={
'ordering': ['-update_time'],
'verbose_name': '\u5f71\u89c6\u8bc4\u8bba',
'verbose_name_plural': '\u5f71\u89c6\u8bc4\u8bba',
},
),
migrations.AlterModelOptions(
name='article',
options={'ordering': ['-update_time'], 'verbose_name': '\u6280\u672f\u7c7b\u535a\u5ba2', 'verbose_name_plural': '\u6280\u672f\u7c7b\u535a\u5ba2'},
),
migrations.AlterField(
model_name='article',
name='category',
field=models.CharField(choices=[('web', 'Web\u5f00\u53d1'), ('linux', '\u7cfb\u7edf\u8fd0\u7ef4'), ('algorithm', '\u7b97\u6cd5'), ('language', '\u7f16\u7a0b\u8bed\u8a00'), ('others', '\u5176\u4ed6')], default='web', max_length=64, verbose_name='\u7c7b\u522b'),
),
]
| baike21/blog | blogadmin/migrations/0007_auto_20170828_2317.py | Python | gpl-3.0 | 3,940 |
from twisted.trial import unittest
from rtpmidi.engines.midi.recovery_journal_chapters import *
class TestNote(unittest.TestCase):
def setUp(self):
self.note = Note()
def test_note_on(self):
#simple
note_to_test = self.note.note_on(100, 90)
#Testing type
assert(type(note_to_test)==str), self.fail("Wrong type return")
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
#with all args
note_to_test = self.note.note_on(100, 90, 0, 1)
#length test
assert(len(note_to_test)==2), \
self.fail("len of note On is higher than 2 octet")
def test_parse_note_on(self):
#Simple
note_to_test = self.note.note_on(100, 90)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
#With all args
note_to_test = self.note.note_on(100, 90, 0, 1)
res_n = self.note.parse_note_on(note_to_test)
#Testing content
assert(res_n[0] == 1), self.fail("S mark is not respected")
assert(res_n[1] == 100), self.fail("Note number is not respected")
assert(res_n[2] == 0), self.fail("Y mark not respected")
assert(res_n[3] == 90), self.fail("Note velocity is not respected")
def test_note_off(self):
#list of notes to test (note from the same midi channel)
plist = [[[128, 57, 100],1000], [[144, 4, 0],1000], \
[[144, 110, 0],1000], [[144, 112, 0],1000]]
#setting low and high like in create_chapter_n
high = 113 / 8
low = 4 / 8
#selecting note off like in create_chapter_n
note_off_list = [ plist[i][0][1] for i in range(len(plist))\
if (plist[i][0][0]&240 == 128) or \
(plist[i][0][2] == 0) ]
res = self.note.note_off(note_off_list, low, high)
#type test
assert(type(res)==str), self.fail("Wrong type return")
#checking size
size_wait = high - low + 1
assert(len(res) == size_wait), \
self.fail("Problem of size with note off creation")
def test_parse_note_off(self):
"""Test parse note off"""
#list of notes to test
#plist = [[[128, 120, 100],1000],[[145, 4, 0],1000],\
# [[145, 110, 0],1000], [[145, 112, 0],1000]]
#setting low and high like in create_chapter_n
note_off_test = [12, 57, 112, 114 ]
high = 115 / 8
low = 12 / 8
res = self.note.note_off(note_off_test, low, high)
#testing the result of parsing
res_parsed = self.note.parse_note_off(res, low, high)
#Testing type
assert(type(res_parsed)==list), self.fail("Wrong type returned")
#res_parsed.sort()
#Testing content
note_off_test = [12, 57, 112, 114 ]
for i in range(len(note_off_test)):
assert(res_parsed[i][1]==note_off_test[i]), \
self.fail("Problem getting the good value for note off encoded")
class TestChapterP(unittest.TestCase):
def setUp(self):
self.chapter_p = ChapterP()
#program change with msb and lsb
self.plist = [[[176, 0, 75], 1000], [[176, 32, 110], 1000], \
[[192, 110, 0], 1000]]
#program change without msb and lsb
self.plist_1 = [[[192, 110, 0], 1000]]
def test_update(self):
"""Testing chapter P creation from a list (with MSB and LSB)"""
self.chapter_p.update(self.plist)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==1), \
self.fail("Problem getting right value of B")
assert(chapter_parse[1][2]==75), \
self.fail("Problem getting right value of MSB")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
assert(chapter_parse[2][2]==110), \
self.fail("Problem getting right value of LSB")
def test_update_1(self):
"""Testing chapter P creation from a list (without MSB and LSB)"""
self.chapter_p.update(self.plist_1)
chapter = self.chapter_p.content
#Testing len
assert(len(chapter)==3), \
self.fail("Size of chapter p is not 24 bits!!!")
#Testing type
assert(type(chapter)==str), self.fail("Problem of type")
#Testing content
size, chapter_parse, marker_s, marker_x, marker_b \
= self.chapter_p.parse(chapter)
#Testing content
assert(marker_s==1), \
self.fail("Problem getting right value of S")
assert(chapter_parse[0][1]==110), \
self.fail("Problem getting right value of PROGRAM")
assert(marker_b==0), \
self.fail("Problem getting right value of B")
assert(marker_x==0), \
self.fail("Problem getting right value of X")
class TestChapterC(unittest.TestCase):
def setUp(self):
self.chapter_c = ChapterC()
self.plist = []
for i in range(127):
self.plist.append([[176, i, 100],6])
def test_header(self):
"""Test header creation ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Testing type
assert(type(header)==str), self.fail("Wrong type returned")
#Testing length
assert(len(header)==1), self.fail("Wrong header size")
def test_parse_header(self):
"""Test header parsing ChapterC"""
#Creating header
header = self.chapter_c.header(10, 1)
#Parsing header
header_parsed = self.chapter_c.parse_header(header)
#Testing type
assert(type(header_parsed)==tuple), self.fail("Wrong size returned")
#Testing content
assert(header_parsed[0]==1), self.fail("Wrong marker_s value")
assert(header_parsed[1]==10), self.fail("Wrong length value")
def test_create_log_c(self):
"""Test create log C (individual component from ChapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==2), self.fail("Wrong size returned")
def test_parse_log_c(self):
"""Test parsing individual component from chapterC"""
res = self.chapter_c.create_log_c(0, 110, 1, 90)
res_parsed = self.chapter_c.parse_log_c(res)
assert(res_parsed[0]==0), self.fail("Wrong value for marker_s")
assert(res_parsed[1]==110), self.fail("Wrong value for number")
assert(res_parsed[2]==1), self.fail("Wrong value for marker_a")
assert(res_parsed[3]==90), self.fail("Wrong value for value")
def test_update(self):
"""Testing chapter C creation"""
self.chapter_c.update(self.plist)
assert(type(self.chapter_c.content)==str), self.fail("Wrong type returned")
#length calc header == 1 + 2 * length
length_wait = 1 + 2 * len(self.plist)
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_update_1(self):
self.plist.append([[176, 42, 100],6])
self.chapter_c.update(self.plist)
length_wait = 1 + 2 * 127
assert(len(self.chapter_c.content)==length_wait), self.fail("Wrong length returned")
def test_parse(self):
"""Test chapter C parsing"""
self.chapter_c.update(self.plist)
size, parsed_res, marker_s = self.chapter_c.parse(self.chapter_c.content)
assert(len(parsed_res)==len(self.plist)), \
self.fail("Wrong number of command returned")
for i in range(len(self.plist)):
assert(parsed_res[i][0]==self.plist[i][0][0]), \
self.fail("Wrong value returned for cmd")
assert(parsed_res[i][1]==self.plist[i][0][1]), \
self.fail("Wrong value returned for pitch")
assert(parsed_res[i][2]==self.plist[i][0][2]), \
self.fail("Wrong value returned for velocity")
def test_trim(self):
plist = []
plist.append([[176, 42, 100],6])
plist.append([[176, 43, 100],7])
plist.append([[176, 44, 100],8])
self.chapter_c.update(plist)
self.chapter_c.trim(7)
assert(len(self.chapter_c.controllers)==1), self.fail("Problem erasing controllers on trim")
def test_update_highest(self):
plist = []
plist.append([[176, 42, 100],6])
plist.append([[176, 43, 100],7])
plist.append([[176, 44, 100],8])
self.chapter_c.update(plist)
assert(self.chapter_c.highest==8), \
self.fail("Problem with highest on update")
self.chapter_c.trim(7)
assert(self.chapter_c.highest==8), \
self.fail("Problem with highest on trim(1)")
self.chapter_c.trim(8)
assert(self.chapter_c.highest==0), \
self.fail("Problem with highest on trim(2)")
class TestChapterW(unittest.TestCase):
def setUp(self):
self.chapter_w = ChapterW()
self.plist = [[[224, 0, 120], 6], [[224, 1, 110], 6]]
def test_update(self):
"""Test create chapter W"""
self.chapter_w.update(self.plist)
assert(type(self.chapter_w.content)==str), self.fail("Wrong type returned")
assert(len(self.chapter_w.content)==2), \
self.fail("Wrong size for chapter W part in recovery journal")
def test_parse(self):
self.chapter_w.update(self.plist)
size, res_2, mark_s = self.chapter_w.parse(self.chapter_w.content)
assert(mark_s == 1), \
self.fail("Wrong value for S bit in Chapter W")
assert(res_2[0][2]==120), \
self.fail("Wrong value for wheel_1 in Chapter W")
assert(res_2[1][2]==110), \
self.fail("Wrong value for wheel_2 in Chapter W")
def test_trim(self):
self.chapter_w.update(self.plist)
self.chapter_w.trim(6)
for data in self.chapter_w.data_list:
assert(data[0]==0), self.fail("Problem trimming chapter")
assert(self.chapter_w.highest==0), self.fail("Wrong update for highest")
class TestChapterN(unittest.TestCase):
def setUp(self):
self.chapter_n = ChapterN()
self.plist_on = []
self.plist_off = []
#List of notes to test
#Note on
for i in range(127):
self.plist_on.append([[144, i, 100],6])
#Note off
for i in range(127):
self.plist_off.append([[128, i, 100],7])
def test_header(self):
"""Test Create header of chapterN """
#Creating chapter
self.chapter_n.update(self.plist_on)
res = self.chapter_n.header()
#length type test
assert(len(res)==2), self.fail("length of header is not good")
assert(type(res)==str), self.fail("Wrong type return")
def test_parse_header(self):
"""Test parse header of ChapterN"""
#Creating chapter
self.chapter_n.update(self.plist_off)
res = self.chapter_n.header()
#Parsing
res_parsed = self.chapter_n.parse_header(res)
#Testing type
assert(type(res_parsed)==tuple), self.fail("Wrong type return")
#Testing content
assert(res_parsed[1]==0), \
self.fail("Problem getting good value of LEN")
assert(res_parsed[2]==0), \
self.fail("Problem getting good value of LOW")
assert(res_parsed[3]==15), \
self.fail("Problem getting good value of HIGH")
def test_update(self):
"""Update with 127 note_off"""
self.chapter_n.update(self.plist_off)
#Test len content
length_wait = 128 / 8 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
#Test low
assert(self.chapter_n.low==0), self.fail("Wrong low calculation")
#Test high
assert(self.chapter_n.high==15), self.fail("Wrong high calculation")
#TEst highest
assert(self.chapter_n.highest==7), self.fail("Wrong highest saved")
def test_update_1(self):
"""Update with 127 note_on"""
self.chapter_n.update(self.plist_on)
#Test len content
length_wait = 127 * 2 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==127), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==0), \
self.fail("Wrong nb of note off recorded")
#Test low
assert(self.chapter_n.low==0), self.fail("Wrong low calculation")
#Test high
assert(self.chapter_n.high==0), self.fail("Wrong high calculation")
#TEst highest
assert(self.chapter_n.highest==6), self.fail("Wrong highest saved")
def test_update_2(self):
"""Update with note_on / off and ..."""
self.plist_on.append([[144, 42, 100],6])
self.chapter_n.update(self.plist_on)
#Test len content
length_wait = 127 * 2 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
assert(len(self.chapter_n.note_on)==127), \
self.fail("Wrong nb of note on recorded")
self.chapter_n.update(self.plist_off)
#Test len content
length_wait = 128 / 8 + 2
assert(len(self.chapter_n.content)==length_wait), \
self.fail("Wrong size for chapter encoded returned")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
def test_parse(self):
""" Test parse chapter N with several notes"""
#creating chapter
self.chapter_n.update(self.plist_off)
size, notes_parsed = self.chapter_n.parse(self.chapter_n.content)
assert(len(notes_parsed)==127), self.fail("Wrong number of notes returned")
assert(size==18), self.fail("Wrong size of encoded chapter")
def test_parse_2(self):
off_mont = [[[128, 62, 100],1000]]
self.chapter_n.update(off_mont)
size, notes_parsed = self.chapter_n.parse(self.chapter_n.content)
def test_trim(self):
self.chapter_n.update(self.plist_off)
self.chapter_n.trim(6)
#Test highest
assert(self.chapter_n.highest==7), \
self.fail("Wrong highest saved")
#Test note_on
assert(len(self.chapter_n.note_on)==0), \
self.fail("Wrong nb of note on recorded")
#Test note_off
assert(len(self.chapter_n.note_off)==127), \
self.fail("Wrong nb of note off recorded")
self.chapter_n.trim(7)
assert(len(self.chapter_n.note_off)==0), \
self.fail("Wrong nb of note off recorded after trim")
def test_update_highest(self):
plist = []
plist.append([[144, 1, 100],6])
plist.append([[144, 1, 100],7])
plist.append([[144, 1, 100],8])
self.chapter_n.update(plist)
assert(self.chapter_n.highest==8), \
self.fail("wrong update of highest on update")
self.chapter_n.trim(7)
assert(self.chapter_n.highest==8), \
self.fail("wrong update of highest on trim")
self.chapter_n.trim(8)
assert(self.chapter_n.highest==0), \
self.fail("wrong update of highest on trim")
class TestChapterT(unittest.TestCase):
def setUp(self):
self.chap_t = ChapterT()
def test_update(self):
"""Test Create Chapter T (After Touch)"""
plist = [[[208, 80, 98], 1000]]
self.chap_t.update(plist)
res = self.chap_t.content
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res) == 1), self.fail("Wrong size returned")
assert(self.chap_t.highest==1000), self.fail("Problem with highest update")
def test_parse(self):
"""Test parse Chapter T"""
self.chap_t.update( [[[208, 80, 0], 1000]])
res = self.chap_t.content
size, midi_cmd = self.chap_t.parse(res)
pressure = midi_cmd[0][1]
assert(size==1), self.fail("Wrong size returned")
assert(pressure==80), self.fail("Wrong value returned for pressure")
class TestChapterA(unittest.TestCase):
def setUp(self):
self.chap_a = ChapterA()
def test_header(self):
"""Test header for Chapter A"""
res = self.chap_a.header(1, 127)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==1), self.fail("Wrong size returned")
def test_parse_header(self):
"""Test parse header Chapter A"""
res = self.chap_a.header(1, 127)
marker_s, length = self.chap_a.parse_header(res)
assert(marker_s==1), self.fail("Wrong value returned for marker S")
assert(length==127), self.fail("Wrong value returned for length")
def test_create_log_a(self):
"""Test Create log A"""
res = self.chap_a.create_log_a(1, 127, 1, 127)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==2), self.fail("Wrong size returned")
def test_parse_log_a(self):
"""Test Parse log A"""
res = self.chap_a.create_log_a(1, 127, 1, 110)
marker_s, notenum, marker_x, pressure = self.chap_a.parse_log_a(res)
assert(marker_s==1), self.fail("Wrong value returned for marker S")
assert(notenum==127), self.fail("Wrong value returned for length")
assert(marker_x==1), self.fail("Wrong value returned for marker S")
assert(pressure==110), self.fail("Wrong value returned for length")
def test_update(self):
"""Test create Chapter A"""
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
res = self.chap_a.content
len_expected = 1 + 2 * len(midi_cmd)
assert(type(res)==str), self.fail("Wrong type returned")
assert(len(res)==len_expected), self.fail("Wrong size returned")
def test_update_1(self):
"""Test create Chapter A with a big amount of commands"""
#With 127 notes (max is 127)
midi_cmd = []
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
size_waited = 1 + 2 *127
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(1) !")
midi_cmd = []
midi_cmd.append([[160, 42, 98], 2])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(2) !")
def test_update_2(self):
"""Test create Chapter A with a big amount of commands
in a lonely function call"""
#With 127 notes (max is 127)
midi_cmd = []
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
for i in range(127):
midi_cmd.append([[160, i, 98], 1])
self.chap_a.update(midi_cmd)
#Test content
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
size_waited = 1 + 2 *127
assert(size==size_waited), self.fail("Wrong size returned for 127 notes(1) !")
def test_parse(self):
"""Test parsing chapterA"""
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
res = self.chap_a.content
size, marker_s, midi_cmd_parsed = self.chap_a.parse(res)
assert(size==5), self.fail("Wrong value for size returned")
assert(marker_s==1), self.fail("Wrong value for marker_s returned")
assert(len(midi_cmd)==len(midi_cmd)), self.fail("Wrong size returned")
for i in range(len(midi_cmd)):
assert(midi_cmd[i][0]==midi_cmd_parsed[i]), \
self.fail("Wrong value returned")
def test_trim(self):
"""Test trim without note remplacement"""
#Adding Packet 1000
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
#Adding Packet 1001
midi_cmd = [[[160, 84, 98], 1001], [[160, 86, 90], 1001]]
self.chap_a.update(midi_cmd)
#Adding Packet 1002
midi_cmd = [[[160, 88, 98], 1002], [[160, 90, 90], 1002]]
self.chap_a.update(midi_cmd)
self.chap_a.trim(1001)
res = self.chap_a.parse(self.chap_a.content)
def test_update_highest(self):
#Adding Packet 1000
midi_cmd = [[[160, 80, 98], 1000], [[160, 82, 90], 1000]]
self.chap_a.update(midi_cmd)
self.chap_a.update_highest()
assert(self.chap_a.highest==1000), \
self.fail("Update problem for highest after an update")
#Adding Packet 1001
midi_cmd = [[[160, 84, 98], 1001], [[160, 86, 90], 1001]]
self.chap_a.update(midi_cmd)
self.chap_a.update_highest()
assert(self.chap_a.highest==1001), \
self.fail("Update problem for highest after an update")
self.chap_a.trim(1001)
assert(self.chap_a.highest==0), \
self.fail("Update problem for highest after an trim")
| avsaj/rtpmidi | rtpmidi/test/test_recovery_journal_chapters.py | Python | gpl-3.0 | 22,784 |
#!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2014-2017 DESY, Jan Kotanski <[email protected]>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# user data tab
""" user data tab """
try:
from taurus.external.qt import Qt
except Exception:
from taurus.qt import Qt
from .EdListDlg import EdListWg
import logging
#: (:obj:`logging.Logger`) logger object
logger = logging.getLogger(__name__)
class Data(Qt.QObject):
""" User data tab widget
"""
#: (:class:`taurus.qt.Qt.pyqtSignal`) dirty signal
dirty = Qt.pyqtSignal()
def __init__(self, ui, state=None, simpleMode=False):
""" constructor
:param ui: ui instance
:type ui: :class:`taurus.qt.qtgui.util.ui.__UI`
:param state: server state
:type state: :class:`nxsselector.ServerState.ServerState`
:param simpleMode: if simple display mode
:type simpleMode: :obj:`bool`
"""
Qt.QObject.__init__(self)
#: (:class:`taurus.qt.qtgui.util.ui.__UI`) ui instance
self.ui = ui
#: (:class:`nxsselector.ServerState.ServerState`) server state
self.state = state
#: (:class:`taurus.qt.Qt.QLayout`)
self.glayout = None
#: (:obj:`bool`) if simple view mode
self.__simpleMode = simpleMode
#: (:class:`nxsselector.EdListWg.EdListWg`) table editing widget
self.form = EdListWg(self.ui.data)
def createGUI(self):
""" creates widget GUI
"""
self.ui.data.hide()
if self.glayout:
child = self.glayout.takeAt(0)
while child:
self.glayout.removeItem(child)
if isinstance(child, Qt.QWidgetItem):
self.glayout.removeWidget(child.widget())
child = self.glayout.takeAt(0)
self.form.dirty.disconnect(self.__setDirty)
else:
self.glayout = Qt.QHBoxLayout(self.ui.data)
if self.form:
self.form.setParent(None)
if self.__simpleMode:
self.form.disable = self.state.admindata
self.form.record = self.state.datarecord
names = self.state.clientRecords()
logger.debug("NAMES: %s " % names)
self.form.available_names = names
self.form.createGUI()
self.glayout.addWidget(self.form)
self.ui.data.update()
if self.ui.tabWidget.currentWidget() == self.ui.data:
self.ui.data.show()
self.form.dirty.connect(self.__setDirty)
def reset(self):
""" recreates widget GUI
"""
self.createGUI()
@Qt.pyqtSlot()
def __setDirty(self):
""" emits the `dirty` signal
"""
self.dirty.emit()
| nexdatas/selector | nxsselector/Data.py | Python | gpl-3.0 | 3,391 |
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import sys
import os
import urllib
import logging
import re
import time
import errno
import uuid
import datetime
from bs4 import BeautifulSoup
import geoserver
import httplib2
from urlparse import urlparse
from urlparse import urlsplit
from threading import local
from collections import namedtuple
from itertools import cycle, izip
from lxml import etree
import xml.etree.ElementTree as ET
from decimal import Decimal
from owslib.wcs import WebCoverageService
from owslib.util import http_post
from django.core.exceptions import ImproperlyConfigured
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import pre_delete
from django.template.loader import render_to_string
from django.conf import settings
from django.utils.translation import ugettext as _
from dialogos.models import Comment
from agon_ratings.models import OverallRating
from gsimporter import Client
from owslib.wms import WebMapService
from geoserver.store import CoverageStore, DataStore, datastore_from_index,\
coveragestore_from_index, wmsstore_from_index
from geoserver.workspace import Workspace
from geoserver.catalog import Catalog
from geoserver.catalog import FailedRequestError, UploadError
from geoserver.catalog import ConflictingDataError
from geoserver.resource import FeatureType, Coverage
from geoserver.support import DimensionInfo
from geonode import GeoNodeException
from geonode.layers.utils import layer_type, get_files
from geonode.layers.models import Layer, Attribute, Style
from geonode.layers.enumerations import LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES
logger = logging.getLogger(__name__)
if not hasattr(settings, 'OGC_SERVER'):
msg = (
'Please configure OGC_SERVER when enabling geonode.geoserver.'
' More info can be found at '
'http://docs.geonode.org/en/master/reference/developers/settings.html#ogc-server')
raise ImproperlyConfigured(msg)
def check_geoserver_is_up():
"""Verifies all geoserver is running,
this is needed to be able to upload.
"""
url = "%sweb/" % ogc_server_settings.LOCATION
resp, content = http_client.request(url, "GET")
msg = ('Cannot connect to the GeoServer at %s\nPlease make sure you '
'have started it.' % ogc_server_settings.LOCATION)
assert resp['status'] == '200', msg
def _add_sld_boilerplate(symbolizer):
"""
Wrap an XML snippet representing a single symbolizer in the appropriate
elements to make it a valid SLD which applies that symbolizer to all features,
including format strings to allow interpolating a "name" variable in.
"""
return """
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(name)s</Name>
<UserStyle>
<Name>%(name)s</Name>
<Title>%(name)s</Title>
<FeatureTypeStyle>
<Rule>
""" + symbolizer + """
</Rule>
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
_raster_template = """
<RasterSymbolizer>
<Opacity>1.0</Opacity>
</RasterSymbolizer>
"""
_polygon_template = """
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
<CssParameter name="stroke-width">0.7</CssParameter>
</Stroke>
</PolygonSymbolizer>
"""
_line_template = """
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(bg)s</CssParameter>
<CssParameter name="stroke-width">3</CssParameter>
</Stroke>
</LineSymbolizer>
</Rule>
</FeatureTypeStyle>
<FeatureTypeStyle>
<Rule>
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</LineSymbolizer>
"""
_point_template = """
<PointSymbolizer>
<Graphic>
<Mark>
<WellKnownName>%(mark)s</WellKnownName>
<Fill>
<CssParameter name="fill">%(bg)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(fg)s</CssParameter>
</Stroke>
</Mark>
<Size>10</Size>
</Graphic>
</PointSymbolizer>
"""
_style_templates = dict(
raster=_add_sld_boilerplate(_raster_template),
polygon=_add_sld_boilerplate(_polygon_template),
line=_add_sld_boilerplate(_line_template),
point=_add_sld_boilerplate(_point_template)
)
def _style_name(resource):
return _punc.sub("_", resource.store.workspace.name + ":" + resource.name)
def get_sld_for(layer):
# FIXME: GeoServer sometimes fails to associate a style with the data, so
# for now we default to using a point style.(it works for lines and
# polygons, hope this doesn't happen for rasters though)
name = layer.default_style.name if layer.default_style is not None else "point"
# FIXME: When gsconfig.py exposes the default geometry type for vector
# layers we should use that rather than guessing based on the auto-detected
# style.
if name in _style_templates:
fg, bg, mark = _style_contexts.next()
return _style_templates[name] % dict(
name=layer.name,
fg=fg,
bg=bg,
mark=mark)
else:
return None
def fixup_style(cat, resource, style):
logger.debug("Creating styles for layers associated with [%s]", resource)
layers = cat.get_layers(resource=resource)
logger.info("Found %d layers associated with [%s]", len(layers), resource)
for lyr in layers:
if lyr.default_style.name in _style_templates:
logger.info("%s uses a default style, generating a new one", lyr)
name = _style_name(resource)
if style is None:
sld = get_sld_for(lyr)
else:
sld = style.read()
logger.info("Creating style [%s]", name)
style = cat.create_style(name, sld)
lyr.default_style = cat.get_style(name)
logger.info("Saving changes to %s", lyr)
cat.save(lyr)
logger.info("Successfully updated %s", lyr)
def cascading_delete(cat, layer_name):
resource = None
try:
if layer_name.find(':') != -1:
workspace, name = layer_name.split(':')
ws = cat.get_workspace(workspace)
try:
store = get_store(cat, name, workspace=ws)
except FailedRequestError:
if ogc_server_settings.DATASTORE:
try:
store = get_store(cat, ogc_server_settings.DATASTORE, workspace=ws)
except FailedRequestError:
logger.debug(
'the store was not found in geoserver')
return
else:
logger.debug(
'the store was not found in geoserver')
return
if ws is None:
logger.debug(
'cascading delete was called on a layer where the workspace was not found')
return
resource = cat.get_resource(name, store=store, workspace=workspace)
else:
resource = cat.get_resource(layer_name)
except EnvironmentError as e:
if e.errno == errno.ECONNREFUSED:
msg = ('Could not connect to geoserver at "%s"'
'to save information for layer "%s"' % (
ogc_server_settings.LOCATION, layer_name)
)
logger.warn(msg, e)
return None
else:
raise e
if resource is None:
# If there is no associated resource,
# this method can not delete anything.
# Let's return and make a note in the log.
logger.debug(
'cascading_delete was called with a non existent resource')
return
resource_name = resource.name
lyr = cat.get_layer(resource_name)
if(lyr is not None): # Already deleted
store = resource.store
styles = lyr.styles + [lyr.default_style]
cat.delete(lyr)
for s in styles:
if s is not None and s.name not in _default_style_names:
try:
cat.delete(s, purge='true')
except FailedRequestError as e:
# Trying to delete a shared style will fail
# We'll catch the exception and log it.
logger.debug(e)
# Due to a possible bug of geoserver, we need this trick for now
# TODO: inspect the issue reported by this hack. Should be solved
# with GS 2.7+
try:
cat.delete(resource, recurse=True) # This may fail
except:
cat.reload() # this preservers the integrity of geoserver
if store.resource_type == 'dataStore' and 'dbtype' in store.connection_parameters and \
store.connection_parameters['dbtype'] == 'postgis':
delete_from_postgis(resource_name)
elif store.type and store.type.lower() == 'geogig':
# Prevent the entire store from being removed when the store is a
# GeoGig repository.
return
else:
if store.resource_type == 'coverageStore':
try:
logger.info(" - Going to purge the " + store.resource_type + " : " + store.href)
cat.reset() # this resets the coverage readers and unlocks the files
cat.delete(store, purge='all', recurse=True)
cat.reload() # this preservers the integrity of geoserver
except FailedRequestError as e:
# Trying to recursively purge a store may fail
# We'll catch the exception and log it.
logger.debug(e)
else:
try:
if not store.get_resources():
cat.delete(store, recurse=True)
except FailedRequestError as e:
# Catch the exception and log it.
logger.debug(e)
def delete_from_postgis(resource_name):
"""
Delete a table from PostGIS (because Geoserver won't do it yet);
to be used after deleting a layer from the system.
"""
import psycopg2
db = ogc_server_settings.datastore_db
conn = psycopg2.connect(
"dbname='" +
db['NAME'] +
"' user='" +
db['USER'] +
"' password='" +
db['PASSWORD'] +
"' port=" +
db['PORT'] +
" host='" +
db['HOST'] +
"'")
try:
cur = conn.cursor()
cur.execute("SELECT DropGeometryTable ('%s')" % resource_name)
conn.commit()
except Exception as e:
logger.error(
"Error deleting PostGIS table %s:%s",
resource_name,
str(e))
finally:
conn.close()
def gs_slurp(
ignore_errors=True,
verbosity=1,
console=None,
owner=None,
workspace=None,
store=None,
filter=None,
skip_unadvertised=False,
skip_geonode_registered=False,
remove_deleted=False):
"""Configure the layers available in GeoServer in GeoNode.
It returns a list of dictionaries with the name of the layer,
the result of the operation and the errors and traceback if it failed.
"""
if console is None:
console = open(os.devnull, 'w')
if verbosity > 1:
print >> console, "Inspecting the available layers in GeoServer ..."
cat = Catalog(ogc_server_settings.internal_rest, _user, _password)
if workspace is not None:
workspace = cat.get_workspace(workspace)
if workspace is None:
resources = []
else:
# obtain the store from within the workspace. if it exists, obtain resources
# directly from store, otherwise return an empty list:
if store is not None:
store = get_store(cat, store, workspace=workspace)
if store is None:
resources = []
else:
resources = cat.get_resources(store=store)
else:
resources = cat.get_resources(workspace=workspace)
elif store is not None:
store = get_store(cat, store)
resources = cat.get_resources(store=store)
else:
resources = cat.get_resources()
if remove_deleted:
resources_for_delete_compare = resources[:]
workspace_for_delete_compare = workspace
# filter out layers for delete comparison with GeoNode layers by following criteria:
# enabled = true, if --skip-unadvertised: advertised = true, but
# disregard the filter parameter in the case of deleting layers
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.enabled in ["true", True]]
if skip_unadvertised:
resources_for_delete_compare = [
k for k in resources_for_delete_compare if k.advertised in ["true", True]]
if filter:
resources = [k for k in resources if filter in k.name]
# filter out layers depending on enabled, advertised status:
resources = [k for k in resources if k.enabled in ["true", True]]
if skip_unadvertised:
resources = [k for k in resources if k.advertised in ["true", True]]
# filter out layers already registered in geonode
layer_names = Layer.objects.all().values_list('typename', flat=True)
if skip_geonode_registered:
resources = [k for k in resources
if not '%s:%s' % (k.workspace.name, k.name) in layer_names]
# TODO: Should we do something with these?
# i.e. look for matching layers in GeoNode and also disable?
# disabled_resources = [k for k in resources if k.enabled == "false"]
number = len(resources)
if verbosity > 1:
msg = "Found %d layers, starting processing" % number
print >> console, msg
output = {
'stats': {
'failed': 0,
'updated': 0,
'created': 0,
'deleted': 0,
},
'layers': [],
'deleted_layers': []
}
start = datetime.datetime.now()
for i, resource in enumerate(resources):
name = resource.name
the_store = resource.store
workspace = the_store.workspace
try:
layer, created = Layer.objects.get_or_create(name=name, defaults={
"workspace": workspace.name,
"store": the_store.name,
"storeType": the_store.resource_type,
"typename": "%s:%s" % (workspace.name.encode('utf-8'), resource.name.encode('utf-8')),
"title": resource.title or 'No title provided',
"abstract": resource.abstract or 'No abstract provided',
"owner": owner,
"uuid": str(uuid.uuid4()),
"bbox_x0": Decimal(resource.latlon_bbox[0]),
"bbox_x1": Decimal(resource.latlon_bbox[1]),
"bbox_y0": Decimal(resource.latlon_bbox[2]),
"bbox_y1": Decimal(resource.latlon_bbox[3])
})
# recalculate the layer statistics
set_attributes(layer, overwrite=True)
# Fix metadata links if the ip has changed
if layer.link_set.metadata().count() > 0:
if not created and settings.SITEURL not in layer.link_set.metadata()[0].url:
layer.link_set.metadata().delete()
layer.save()
metadata_links = []
for link in layer.link_set.metadata():
metadata_links.append((link.mime, link.name, link.url))
resource.metadata_links = metadata_links
cat.save(resource)
except Exception as e:
if ignore_errors:
status = 'failed'
exception_type, error, traceback = sys.exc_info()
else:
if verbosity > 0:
msg = "Stopping process because --ignore-errors was not set and an error was found."
print >> sys.stderr, msg
raise Exception(
'Failed to process %s' %
resource.name.encode('utf-8'), e), None, sys.exc_info()[2]
else:
if created:
layer.set_default_permissions()
status = 'created'
output['stats']['created'] += 1
else:
status = 'updated'
output['stats']['updated'] += 1
msg = "[%s] Layer %s (%d/%d)" % (status, name, i + 1, number)
info = {'name': name, 'status': status}
if status == 'failed':
output['stats']['failed'] += 1
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['layers'].append(info)
if verbosity > 0:
print >> console, msg
if remove_deleted:
q = Layer.objects.filter()
if workspace_for_delete_compare is not None:
if isinstance(workspace_for_delete_compare, Workspace):
q = q.filter(
workspace__exact=workspace_for_delete_compare.name)
else:
q = q.filter(workspace__exact=workspace_for_delete_compare)
if store is not None:
if isinstance(
store,
CoverageStore) or isinstance(
store,
DataStore):
q = q.filter(store__exact=store.name)
else:
q = q.filter(store__exact=store)
logger.debug("Executing 'remove_deleted' logic")
logger.debug("GeoNode Layers Found:")
# compare the list of GeoNode layers obtained via query/filter with valid resources found in GeoServer
# filtered per options passed to updatelayers: --workspace, --store, --skip-unadvertised
# add any layers not found in GeoServer to deleted_layers (must match
# workspace and store as well):
deleted_layers = []
for layer in q:
logger.debug(
"GeoNode Layer info: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
layer_found_in_geoserver = False
for resource in resources_for_delete_compare:
# if layer.name matches a GeoServer resource, check also that
# workspace and store match, mark valid:
if layer.name == resource.name:
if layer.workspace == resource.workspace.name and layer.store == resource.store.name:
logger.debug(
"Matches GeoServer layer: name: %s, workspace: %s, store: %s",
resource.name,
resource.workspace.name,
resource.store.name)
layer_found_in_geoserver = True
if not layer_found_in_geoserver:
logger.debug(
"----- Layer %s not matched, marked for deletion ---------------",
layer.name)
deleted_layers.append(layer)
number_deleted = len(deleted_layers)
if verbosity > 1:
msg = "\nFound %d layers to delete, starting processing" % number_deleted if number_deleted > 0 else \
"\nFound %d layers to delete" % number_deleted
print >> console, msg
for i, layer in enumerate(deleted_layers):
logger.debug(
"GeoNode Layer to delete: name: %s, workspace: %s, store: %s",
layer.name,
layer.workspace,
layer.store)
try:
# delete ratings, comments, and taggit tags:
ct = ContentType.objects.get_for_model(layer)
OverallRating.objects.filter(
content_type=ct,
object_id=layer.id).delete()
Comment.objects.filter(
content_type=ct,
object_id=layer.id).delete()
layer.keywords.clear()
layer.delete()
output['stats']['deleted'] += 1
status = "delete_succeeded"
except Exception as e:
status = "delete_failed"
finally:
from .signals import geoserver_pre_delete
pre_delete.connect(geoserver_pre_delete, sender=Layer)
msg = "[%s] Layer %s (%d/%d)" % (status,
layer.name,
i + 1,
number_deleted)
info = {'name': layer.name, 'status': status}
if status == "delete_failed":
exception_type, error, traceback = sys.exc_info()
info['traceback'] = traceback
info['exception_type'] = exception_type
info['error'] = error
output['deleted_layers'].append(info)
if verbosity > 0:
print >> console, msg
finish = datetime.datetime.now()
td = finish - start
output['stats']['duration_sec'] = td.microseconds / \
1000000 + td.seconds + td.days * 24 * 3600
return output
def get_stores(store_type=None):
cat = Catalog(ogc_server_settings.internal_rest, _user, _password)
stores = cat.get_stores()
store_list = []
for store in stores:
store.fetch()
stype = store.dom.find('type').text.lower()
if store_type and store_type.lower() == stype:
store_list.append({'name': store.name, 'type': stype})
elif store_type is None:
store_list.append({'name': store.name, 'type': stype})
return store_list
def set_attributes(layer, overwrite=False):
"""
Retrieve layer attribute names & types from Geoserver,
then store in GeoNode database using Attribute model
"""
attribute_map = []
server_url = ogc_server_settings.LOCATION if layer.storeType != "remoteStore" else layer.service.base_url
if layer.storeType == "remoteStore" and layer.service.ptype == "gxp_arcrestsource":
dft_url = server_url + ("%s?f=json" % layer.typename)
try:
# The code below will fail if http_client cannot be imported
body = json.loads(http_client.request(dft_url)[1])
attribute_map = [[n["name"], _esri_types[n["type"]]]
for n in body["fields"] if n.get("name") and n.get("type")]
except Exception:
attribute_map = []
elif layer.storeType in ["dataStore", "remoteStore", "wmsStore"]:
dft_url = re.sub("\/wms\/?$",
"/",
server_url) + "wfs?" + urllib.urlencode({"service": "wfs",
"version": "1.0.0",
"request": "DescribeFeatureType",
"typename": layer.typename.encode('utf-8'),
})
try:
# The code below will fail if http_client cannot be imported or
# WFS not supported
body = http_client.request(dft_url)[1]
doc = etree.fromstring(body)
path = ".//{xsd}extension/{xsd}sequence/{xsd}element".format(
xsd="{http://www.w3.org/2001/XMLSchema}")
attribute_map = [[n.attrib["name"], n.attrib["type"]] for n in doc.findall(
path) if n.attrib.get("name") and n.attrib.get("type")]
except Exception:
attribute_map = []
# Try WMS instead
dft_url = server_url + "?" + urllib.urlencode({
"service": "wms",
"version": "1.0.0",
"request": "GetFeatureInfo",
"bbox": ','.join([str(x) for x in layer.bbox]),
"LAYERS": layer.typename.encode('utf-8'),
"QUERY_LAYERS": layer.typename.encode('utf-8'),
"feature_count": 1,
"width": 1,
"height": 1,
"srs": "EPSG:4326",
"info_format": "text/html",
"x": 1,
"y": 1
})
try:
body = http_client.request(dft_url)[1]
soup = BeautifulSoup(body)
for field in soup.findAll('th'):
if(field.string is None):
field_name = field.contents[0].string
else:
field_name = field.string
attribute_map.append([field_name, "xsd:string"])
except Exception:
attribute_map = []
elif layer.storeType in ["coverageStore"]:
dc_url = server_url + "wcs?" + urllib.urlencode({
"service": "wcs",
"version": "1.1.0",
"request": "DescribeCoverage",
"identifiers": layer.typename.encode('utf-8')
})
try:
response, body = http_client.request(dc_url)
doc = etree.fromstring(body)
path = ".//{wcs}Axis/{wcs}AvailableKeys/{wcs}Key".format(
wcs="{http://www.opengis.net/wcs/1.1.1}")
attribute_map = [[n.text, "raster"] for n in doc.findall(path)]
except Exception:
attribute_map = []
# we need 3 more items for description, attribute_label and display_order
attribute_map_dict = {
'field': 0,
'ftype': 1,
'description': 2,
'label': 3,
'display_order': 4,
}
for attribute in attribute_map:
attribute.extend((None, None, 0))
attributes = layer.attribute_set.all()
# Delete existing attributes if they no longer exist in an updated layer
for la in attributes:
lafound = False
for attribute in attribute_map:
field, ftype, description, label, display_order = attribute
if field == la.attribute:
lafound = True
# store description and attribute_label in attribute_map
attribute[attribute_map_dict['description']] = la.description
attribute[attribute_map_dict['label']] = la.attribute_label
attribute[attribute_map_dict['display_order']] = la.display_order
if overwrite or not lafound:
logger.debug(
"Going to delete [%s] for [%s]",
la.attribute,
layer.name.encode('utf-8'))
la.delete()
# Add new layer attributes if they don't already exist
if attribute_map is not None:
iter = len(Attribute.objects.filter(layer=layer)) + 1
for attribute in attribute_map:
field, ftype, description, label, display_order = attribute
if field is not None:
la, created = Attribute.objects.get_or_create(
layer=layer, attribute=field, attribute_type=ftype,
description=description, attribute_label=label,
display_order=display_order)
if created:
if is_layer_attribute_aggregable(
layer.storeType,
field,
ftype):
logger.debug("Generating layer attribute statistics")
result = get_attribute_statistics(layer.name, field)
if result is not None:
la.count = result['Count']
la.min = result['Min']
la.max = result['Max']
la.average = result['Average']
la.median = result['Median']
la.stddev = result['StandardDeviation']
la.sum = result['Sum']
la.unique_values = result['unique_values']
la.last_stats_updated = datetime.datetime.now()
la.visible = ftype.find("gml:") != 0
la.display_order = iter
la.save()
iter += 1
logger.debug(
"Created [%s] attribute for [%s]",
field,
layer.name.encode('utf-8'))
else:
logger.debug("No attributes found")
def set_styles(layer, gs_catalog):
style_set = []
gs_layer = gs_catalog.get_layer(layer.name)
default_style = gs_layer.default_style
layer.default_style = save_style(default_style)
# FIXME: This should remove styles that are no longer valid
style_set.append(layer.default_style)
alt_styles = gs_layer.styles
for alt_style in alt_styles:
style_set.append(save_style(alt_style))
layer.styles = style_set
return layer
def save_style(gs_style):
style, created = Style.objects.get_or_create(name=gs_style.name)
style.sld_title = gs_style.sld_title
style.sld_body = gs_style.sld_body
style.sld_url = gs_style.body_href
style.save()
return style
def is_layer_attribute_aggregable(store_type, field_name, field_type):
"""
Decipher whether layer attribute is suitable for statistical derivation
"""
# must be vector layer
if store_type != 'dataStore':
return False
# must be a numeric data type
if field_type not in LAYER_ATTRIBUTE_NUMERIC_DATA_TYPES:
return False
# must not be an identifier type field
if field_name.lower() in ['id', 'identifier']:
return False
return True
def get_attribute_statistics(layer_name, field):
"""
Generate statistics (range, mean, median, standard deviation, unique values)
for layer attribute
"""
logger.debug('Deriving aggregate statistics for attribute %s', field)
if not ogc_server_settings.WPS_ENABLED:
return None
try:
return wps_execute_layer_attribute_statistics(layer_name, field)
except Exception:
logger.exception('Error generating layer aggregate statistics')
def get_wcs_record(instance, retry=True):
wcs = WebCoverageService(ogc_server_settings.LOCATION + 'wcs', '1.0.0')
key = instance.workspace + ':' + instance.name
logger.debug(wcs.contents)
if key in wcs.contents:
return wcs.contents[key]
else:
msg = ("Layer '%s' was not found in WCS service at %s." %
(key, ogc_server_settings.public_url)
)
if retry:
logger.debug(
msg +
' Waiting a couple of seconds before trying again.')
time.sleep(2)
return get_wcs_record(instance, retry=False)
else:
raise GeoNodeException(msg)
def get_coverage_grid_extent(instance):
"""
Returns a list of integers with the size of the coverage
extent in pixels
"""
instance_wcs = get_wcs_record(instance)
grid = instance_wcs.grid
return [(int(h) - int(l) + 1) for
h, l in zip(grid.highlimits, grid.lowlimits)]
GEOSERVER_LAYER_TYPES = {
'vector': FeatureType.resource_type,
'raster': Coverage.resource_type,
}
def geoserver_layer_type(filename):
the_type = layer_type(filename)
return GEOSERVER_LAYER_TYPES[the_type]
def cleanup(name, uuid):
"""Deletes GeoServer and Catalogue records for a given name.
Useful to clean the mess when something goes terribly wrong.
It also verifies if the Django record existed, in which case
it performs no action.
"""
try:
Layer.objects.get(name=name)
except Layer.DoesNotExist as e:
pass
else:
msg = ('Not doing any cleanup because the layer %s exists in the '
'Django db.' % name)
raise GeoNodeException(msg)
cat = gs_catalog
gs_store = None
gs_layer = None
gs_resource = None
# FIXME: Could this lead to someone deleting for example a postgis db
# with the same name of the uploaded file?.
try:
gs_store = cat.get_store(name)
if gs_store is not None:
gs_layer = cat.get_layer(name)
if gs_layer is not None:
gs_resource = gs_layer.resource
else:
gs_layer = None
gs_resource = None
except FailedRequestError as e:
msg = ('Couldn\'t connect to GeoServer while cleaning up layer '
'[%s] !!', str(e))
logger.warning(msg)
if gs_layer is not None:
try:
cat.delete(gs_layer)
except:
logger.warning("Couldn't delete GeoServer layer during cleanup()")
if gs_resource is not None:
try:
cat.delete(gs_resource)
except:
msg = 'Couldn\'t delete GeoServer resource during cleanup()'
logger.warning(msg)
if gs_store is not None:
try:
cat.delete(gs_store)
except:
logger.warning("Couldn't delete GeoServer store during cleanup()")
logger.warning('Deleting dangling Catalogue record for [%s] '
'(no Django record to match)', name)
if 'geonode.catalogue' in settings.INSTALLED_APPS:
from geonode.catalogue import get_catalogue
catalogue = get_catalogue()
catalogue.remove_record(uuid)
logger.warning('Finished cleanup after failed Catalogue/Django '
'import for layer: %s', name)
def _create_featurestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
cat = gs_catalog
cat.create_featurestore(name, data, overwrite=overwrite, charset=charset)
store = get_store(cat, name, workspace=workspace)
return store, cat.get_resource(name, store=store, workspace=workspace)
def _create_coveragestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
cat = gs_catalog
cat.create_coveragestore(name, data, overwrite=overwrite)
store = get_store(cat, name, workspace=workspace)
return store, cat.get_resource(name, store=store, workspace=workspace)
def _create_db_featurestore(name, data, overwrite=False, charset="UTF-8", workspace=None):
"""Create a database store then use it to import a shapefile.
If the import into the database fails then delete the store
(and delete the PostGIS table for it).
"""
cat = gs_catalog
dsname = ogc_server_settings.DATASTORE
try:
ds = get_store(cat, dsname, workspace=workspace)
except FailedRequestError:
ds = cat.create_datastore(dsname, workspace=workspace)
db = ogc_server_settings.datastore_db
db_engine = 'postgis' if \
'postgis' in db['ENGINE'] else db['ENGINE']
ds.connection_parameters.update(
{'validate connections': 'true',
'max connections': '10',
'min connections': '1',
'fetch size': '1000',
'host': db['HOST'],
'port': db['PORT'],
'database': db['NAME'],
'user': db['USER'],
'passwd': db['PASSWORD'],
'dbtype': db_engine}
)
cat.save(ds)
ds = get_store(cat, dsname, workspace=workspace)
try:
cat.add_data_to_store(ds, name, data,
overwrite=overwrite,
charset=charset)
return ds, cat.get_resource(name, store=ds, workspace=workspace)
except Exception:
msg = _("An exception occurred loading data to PostGIS")
msg += "- %s" % (sys.exc_info()[1])
try:
delete_from_postgis(name)
except Exception:
msg += _(" Additionally an error occured during database cleanup")
msg += "- %s" % (sys.exc_info()[1])
raise GeoNodeException(msg)
def get_store(cat, name, workspace=None):
# Make sure workspace is a workspace object and not a string.
# If the workspace does not exist, continue as if no workspace had been defined.
if isinstance(workspace, basestring):
workspace = cat.get_workspace(workspace)
if workspace is None:
workspace = cat.get_default_workspace()
try:
store = cat.get_xml('%s/%s.xml' % (workspace.datastore_url[:-4], name))
except FailedRequestError:
try:
store = cat.get_xml('%s/%s.xml' % (workspace.coveragestore_url[:-4], name))
except FailedRequestError:
try:
store = cat.get_xml('%s/%s.xml' % (workspace.wmsstore_url[:-4], name))
except FailedRequestError:
raise FailedRequestError("No store found named: " + name)
if store.tag == 'dataStore':
store = datastore_from_index(cat, workspace, store)
elif store.tag == 'coverageStore':
store = coveragestore_from_index(cat, workspace, store)
elif store.tag == 'wmsStore':
store = wmsstore_from_index(cat, workspace, store)
return store
def geoserver_upload(
layer,
base_file,
user,
name,
overwrite=True,
title=None,
abstract=None,
permissions=None,
keywords=(),
charset='UTF-8'):
# Step 2. Check that it is uploading to the same resource type as
# the existing resource
logger.info('>>> Step 2. Make sure we are not trying to overwrite a '
'existing resource named [%s] with the wrong type', name)
the_layer_type = geoserver_layer_type(base_file)
# Get a short handle to the gsconfig geoserver catalog
cat = gs_catalog
# Fix bug on layer replace #2642
# https://github.com/GeoNode/geonode/issues/2462
cat.reload()
workspace = cat.get_default_workspace()
# Check if the store exists in geoserver
try:
store = get_store(cat, name, workspace=workspace)
except geoserver.catalog.FailedRequestError as e:
# There is no store, ergo the road is clear
pass
else:
# If we get a store, we do the following:
resources = store.get_resources()
# If the store is empty, we just delete it.
if len(resources) == 0:
cat.delete(store)
else:
# If our resource is already configured in the store it needs
# to have the right resource type
for resource in resources:
if resource.name == name:
msg = 'Name already in use and overwrite is False'
assert overwrite, msg
existing_type = resource.resource_type
if existing_type != the_layer_type:
msg = ('Type of uploaded file %s (%s) '
'does not match type of existing '
'resource type '
'%s' % (name, the_layer_type, existing_type))
logger.info(msg)
raise GeoNodeException(msg)
# Step 3. Identify whether it is vector or raster and which extra files
# are needed.
logger.info('>>> Step 3. Identifying if [%s] is vector or raster and '
'gathering extra files', name)
if the_layer_type == FeatureType.resource_type:
logger.debug('Uploading vector layer: [%s]', base_file)
if ogc_server_settings.DATASTORE:
create_store_and_resource = _create_db_featurestore
else:
create_store_and_resource = _create_featurestore
elif the_layer_type == Coverage.resource_type:
logger.debug("Uploading raster layer: [%s]", base_file)
create_store_and_resource = _create_coveragestore
else:
msg = ('The layer type for name %s is %s. It should be '
'%s or %s,' % (name,
the_layer_type,
FeatureType.resource_type,
Coverage.resource_type))
logger.warn(msg)
raise GeoNodeException(msg)
# Step 4. Create the store in GeoServer
logger.info('>>> Step 4. Starting upload of [%s] to GeoServer...', name)
# Get the helper files if they exist
files = get_files(base_file)
data = files
if 'shp' not in files:
data = base_file
try:
store, gs_resource = create_store_and_resource(name,
data,
charset=charset,
overwrite=overwrite,
workspace=workspace)
except UploadError as e:
msg = ('Could not save the layer %s, there was an upload '
'error: %s' % (name, str(e)))
logger.warn(msg)
e.args = (msg,)
raise
except ConflictingDataError as e:
# A datastore of this name already exists
msg = ('GeoServer reported a conflict creating a store with name %s: '
'"%s". This should never happen because a brand new name '
'should have been generated. But since it happened, '
'try renaming the file or deleting the store in '
'GeoServer.' % (name, str(e)))
logger.warn(msg)
e.args = (msg,)
raise
else:
logger.debug('Finished upload of [%s] to GeoServer without '
'errors.', name)
# Step 5. Create the resource in GeoServer
logger.info('>>> Step 5. Generating the metadata for [%s] after '
'successful import to GeoSever', name)
# Verify the resource was created
if gs_resource is not None:
assert gs_resource.name == name
else:
msg = ('GeoNode encountered problems when creating layer %s.'
'It cannot find the Layer that matches this Workspace.'
'try renaming your files.' % name)
logger.warn(msg)
raise GeoNodeException(msg)
# Step 6. Make sure our data always has a valid projection
# FIXME: Put this in gsconfig.py
logger.info('>>> Step 6. Making sure [%s] has a valid projection' % name)
if gs_resource.latlon_bbox is None:
box = gs_resource.native_bbox[:4]
minx, maxx, miny, maxy = [float(a) for a in box]
if -180 <= minx <= 180 and -180 <= maxx <= 180 and \
-90 <= miny <= 90 and -90 <= maxy <= 90:
logger.info('GeoServer failed to detect the projection for layer '
'[%s]. Guessing EPSG:4326', name)
# If GeoServer couldn't figure out the projection, we just
# assume it's lat/lon to avoid a bad GeoServer configuration
gs_resource.latlon_bbox = gs_resource.native_bbox
gs_resource.projection = "EPSG:4326"
cat.save(gs_resource)
else:
msg = ('GeoServer failed to detect the projection for layer '
'[%s]. It doesn\'t look like EPSG:4326, so backing out '
'the layer.')
logger.info(msg, name)
cascading_delete(cat, name)
raise GeoNodeException(msg % name)
# Step 7. Create the style and assign it to the created resource
# FIXME: Put this in gsconfig.py
logger.info('>>> Step 7. Creating style for [%s]' % name)
publishing = cat.get_layer(name)
if 'sld' in files:
f = open(files['sld'], 'r')
sld = f.read()
f.close()
else:
sld = get_sld_for(publishing)
if sld is not None:
try:
cat.create_style(name, sld)
except geoserver.catalog.ConflictingDataError as e:
msg = ('There was already a style named %s in GeoServer, '
'cannot overwrite: "%s"' % (name, str(e)))
logger.warn(msg)
e.args = (msg,)
# FIXME: Should we use the fully qualified typename?
publishing.default_style = cat.get_style(name)
cat.save(publishing)
# Step 10. Create the Django record for the layer
logger.info('>>> Step 10. Creating Django record for [%s]', name)
# FIXME: Do this inside the layer object
typename = workspace.name + ':' + gs_resource.name
layer_uuid = str(uuid.uuid1())
defaults = dict(store=gs_resource.store.name,
storeType=gs_resource.store.resource_type,
typename=typename,
title=title or gs_resource.title,
uuid=layer_uuid,
abstract=abstract or gs_resource.abstract or '',
owner=user)
return name, workspace.name, defaults, gs_resource
class ServerDoesNotExist(Exception):
pass
class OGC_Server(object):
"""
OGC Server object.
"""
def __init__(self, ogc_server, alias):
self.alias = alias
self.server = ogc_server
def __getattr__(self, item):
return self.server.get(item)
@property
def credentials(self):
"""
Returns a tuple of the server's credentials.
"""
creds = namedtuple('OGC_SERVER_CREDENTIALS', ['username', 'password'])
return creds(username=self.USER, password=self.PASSWORD)
@property
def datastore_db(self):
"""
Returns the server's datastore dict or None.
"""
if self.DATASTORE and settings.DATABASES.get(self.DATASTORE, None):
return settings.DATABASES.get(self.DATASTORE, dict())
else:
return dict()
@property
def ows(self):
"""
The Open Web Service url for the server.
"""
location = self.PUBLIC_LOCATION if self.PUBLIC_LOCATION else self.LOCATION
return self.OWS_LOCATION if self.OWS_LOCATION else location + 'ows'
@property
def rest(self):
"""
The REST endpoint for the server.
"""
return self.LOCATION + \
'rest' if not self.REST_LOCATION else self.REST_LOCATION
@property
def public_url(self):
"""
The global public endpoint for the server.
"""
return self.LOCATION if not self.PUBLIC_LOCATION else self.PUBLIC_LOCATION
@property
def internal_ows(self):
"""
The Open Web Service url for the server used by GeoNode internally.
"""
location = self.LOCATION
return location + 'ows'
@property
def internal_rest(self):
"""
The internal REST endpoint for the server.
"""
return self.LOCATION + 'rest'
@property
def hostname(self):
return urlsplit(self.LOCATION).hostname
@property
def netloc(self):
return urlsplit(self.LOCATION).netloc
def __str__(self):
return self.alias
class OGC_Servers_Handler(object):
"""
OGC Server Settings Convenience dict.
"""
def __init__(self, ogc_server_dict):
self.servers = ogc_server_dict
# FIXME(Ariel): Are there better ways to do this without involving
# local?
self._servers = local()
def ensure_valid_configuration(self, alias):
"""
Ensures the settings are valid.
"""
try:
server = self.servers[alias]
except KeyError:
raise ServerDoesNotExist("The server %s doesn't exist" % alias)
datastore = server.get('DATASTORE')
uploader_backend = getattr(
settings,
'UPLOADER',
dict()).get(
'BACKEND',
'geonode.rest')
if uploader_backend == 'geonode.importer' and datastore and not settings.DATABASES.get(
datastore):
raise ImproperlyConfigured(
'The OGC_SERVER setting specifies a datastore '
'but no connection parameters are present.')
if uploader_backend == 'geonode.importer' and not datastore:
raise ImproperlyConfigured(
'The UPLOADER BACKEND is set to geonode.importer but no DATASTORE is specified.')
if 'PRINTNG_ENABLED' in server:
raise ImproperlyConfigured("The PRINTNG_ENABLED setting has been removed, use 'PRINT_NG_ENABLED' instead.")
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection where no settings is provided.
"""
try:
server = self.servers[alias]
except KeyError:
raise ServerDoesNotExist("The server %s doesn't exist" % alias)
server.setdefault('BACKEND', 'geonode.geoserver')
server.setdefault('LOCATION', 'http://localhost:8080/geoserver/')
server.setdefault('USER', 'admin')
server.setdefault('PASSWORD', 'geoserver')
server.setdefault('DATASTORE', str())
server.setdefault('GEOGIG_DATASTORE_DIR', str())
for option in ['MAPFISH_PRINT_ENABLED', 'PRINT_NG_ENABLED', 'GEONODE_SECURITY_ENABLED',
'BACKEND_WRITE_ENABLED']:
server.setdefault(option, True)
for option in ['GEOGIG_ENABLED', 'WMST_ENABLED', 'WPS_ENABLED']:
server.setdefault(option, False)
def __getitem__(self, alias):
if hasattr(self._servers, alias):
return getattr(self._servers, alias)
self.ensure_defaults(alias)
self.ensure_valid_configuration(alias)
server = self.servers[alias]
server = OGC_Server(alias=alias, ogc_server=server)
setattr(self._servers, alias, server)
return server
def __setitem__(self, key, value):
setattr(self._servers, key, value)
def __iter__(self):
return iter(self.servers)
def all(self):
return [self[alias] for alias in self]
def get_wms():
wms_url = ogc_server_settings.internal_ows + \
"?service=WMS&request=GetCapabilities&version=1.1.0"
netloc = urlparse(wms_url).netloc
http = httplib2.Http()
http.add_credentials(_user, _password)
http.authorizations.append(
httplib2.BasicAuthentication(
(_user, _password),
netloc,
wms_url,
{},
None,
None,
http
)
)
body = http.request(wms_url)[1]
_wms = WebMapService(wms_url, xml=body)
return _wms
def wps_execute_layer_attribute_statistics(layer_name, field):
"""Derive aggregate statistics from WPS endpoint"""
# generate statistics using WPS
url = '%s/ows' % (ogc_server_settings.LOCATION)
# TODO: use owslib.wps.WebProcessingService for WPS interaction
# this requires GeoServer's WPS gs:Aggregate function to
# return a proper wps:ExecuteResponse
request = render_to_string('layers/wps_execute_gs_aggregate.xml', {
'layer_name': 'geonode:%s' % layer_name,
'field': field
})
response = http_post(
url,
request,
timeout=ogc_server_settings.TIMEOUT,
username=ogc_server_settings.credentials.username,
password=ogc_server_settings.credentials.password)
exml = etree.fromstring(response)
result = {}
for f in ['Min', 'Max', 'Average', 'Median', 'StandardDeviation', 'Sum']:
fr = exml.find(f)
if fr is not None:
result[f] = fr.text
else:
result[f] = 'NA'
count = exml.find('Count')
if count is not None:
result['Count'] = int(count.text)
else:
result['Count'] = 0
result['unique_values'] = 'NA'
return result
# TODO: find way of figuring out threshold better
# Looks incomplete what is the purpose if the nex lines?
# if result['Count'] < 10000:
# request = render_to_string('layers/wps_execute_gs_unique.xml', {
# 'layer_name': 'geonode:%s' % layer_name,
# 'field': field
# })
# response = http_post(
# url,
# request,
# timeout=ogc_server_settings.TIMEOUT,
# username=ogc_server_settings.credentials.username,
# password=ogc_server_settings.credentials.password)
# exml = etree.fromstring(response)
def style_update(request, url):
"""
Sync style stuff from GS to GN.
Ideally we should call this from a view straight from GXP, and we should use
gsConfig, that at this time does not support styles updates. Before gsConfig
is updated, for now we need to parse xml.
In case of a DELETE, we need to query request.path to get the style name,
and then remove it.
In case of a POST or PUT, we need to parse the xml from
request.body, which is in this format:
"""
if request.method in ('POST', 'PUT'): # we need to parse xml
# Need to remove NSx from IE11
if "HTTP_USER_AGENT" in request.META:
if ('Trident/7.0' in request.META['HTTP_USER_AGENT'] and
'rv:11.0' in request.META['HTTP_USER_AGENT']):
txml = re.sub(r'xmlns:NS[0-9]=""', '', request.body)
txml = re.sub(r'NS[0-9]:', '', txml)
request._body = txml
tree = ET.ElementTree(ET.fromstring(request.body))
elm_namedlayer_name = tree.findall(
'.//{http://www.opengis.net/sld}Name')[0]
elm_user_style_name = tree.findall(
'.//{http://www.opengis.net/sld}Name')[1]
elm_user_style_title = tree.find(
'.//{http://www.opengis.net/sld}Title')
if not elm_user_style_title:
elm_user_style_title = elm_user_style_name
layer_name = elm_namedlayer_name.text
style_name = elm_user_style_name.text
sld_body = '<?xml version="1.0" encoding="UTF-8"?>%s' % request.body
# add style in GN and associate it to layer
if request.method == 'POST':
style = Style(name=style_name, sld_body=sld_body, sld_url=url)
style.save()
layer = Layer.objects.all().filter(typename=layer_name)[0]
style.layer_styles.add(layer)
style.save()
if request.method == 'PUT': # update style in GN
style = Style.objects.all().filter(name=style_name)[0]
style.sld_body = sld_body
style.sld_url = url
if len(elm_user_style_title.text) > 0:
style.sld_title = elm_user_style_title.text
style.save()
for layer in style.layer_styles.all():
layer.save()
if request.method == 'DELETE': # delete style from GN
style_name = os.path.basename(request.path)
style = Style.objects.all().filter(name=style_name)[0]
style.delete()
def set_time_info(layer, attribute, end_attribute, presentation,
precision_value, precision_step, enabled=True):
'''Configure the time dimension for a layer.
:param layer: the layer to configure
:param attribute: the attribute used to represent the instant or period
start
:param end_attribute: the optional attribute used to represent the end
period
:param presentation: either 'LIST', 'DISCRETE_INTERVAL', or
'CONTINUOUS_INTERVAL'
:param precision_value: number representing number of steps
:param precision_step: one of 'seconds', 'minutes', 'hours', 'days',
'months', 'years'
:param enabled: defaults to True
'''
layer = gs_catalog.get_layer(layer.name)
if layer is None:
raise ValueError('no such layer: %s' % layer.name)
resource = layer.resource
resolution = None
if precision_value and precision_step:
resolution = '%s %s' % (precision_value, precision_step)
info = DimensionInfo("time", enabled, presentation, resolution, "ISO8601",
None, attribute=attribute, end_attribute=end_attribute)
metadata = dict(resource.metadata or {})
metadata['time'] = info
resource.metadata = metadata
gs_catalog.save(resource)
def get_time_info(layer):
'''Get the configured time dimension metadata for the layer as a dict.
The keys of the dict will be those of the parameters of `set_time_info`.
:returns: dict of values or None if not configured
'''
layer = gs_catalog.get_layer(layer.name)
if layer is None:
raise ValueError('no such layer: %s' % layer.name)
resource = layer.resource
info = resource.metadata.get('time', None) if resource.metadata else None
vals = None
if info:
value = step = None
resolution = info.resolution_str()
if resolution:
value, step = resolution.split()
vals = dict(
enabled=info.enabled,
attribute=info.attribute,
end_attribute=info.end_attribute,
presentation=info.presentation,
precision_value=value,
precision_step=step,
)
return vals
ogc_server_settings = OGC_Servers_Handler(settings.OGC_SERVER)['default']
_wms = None
_csw = None
_user, _password = ogc_server_settings.credentials
http_client = httplib2.Http()
http_client.add_credentials(_user, _password)
http_client.add_credentials(_user, _password)
_netloc = urlparse(ogc_server_settings.LOCATION).netloc
http_client.authorizations.append(
httplib2.BasicAuthentication(
(_user, _password),
_netloc,
ogc_server_settings.LOCATION,
{},
None,
None,
http_client
)
)
url = ogc_server_settings.rest
gs_catalog = Catalog(url, _user, _password)
gs_uploader = Client(url, _user, _password)
_punc = re.compile(r"[\.:]") # regex for punctuation that confuses restconfig
_foregrounds = [
"#ffbbbb",
"#bbffbb",
"#bbbbff",
"#ffffbb",
"#bbffff",
"#ffbbff"]
_backgrounds = [
"#880000",
"#008800",
"#000088",
"#888800",
"#008888",
"#880088"]
_marks = ["square", "circle", "cross", "x", "triangle"]
_style_contexts = izip(cycle(_foregrounds), cycle(_backgrounds), cycle(_marks))
_default_style_names = ["point", "line", "polygon", "raster"]
_esri_types = {
"esriFieldTypeDouble": "xsd:double",
"esriFieldTypeString": "xsd:string",
"esriFieldTypeSmallInteger": "xsd:int",
"esriFieldTypeInteger": "xsd:int",
"esriFieldTypeDate": "xsd:dateTime",
"esriFieldTypeOID": "xsd:long",
"esriFieldTypeGeometry": "xsd:geometry",
"esriFieldTypeBlob": "xsd:base64Binary",
"esriFieldTypeRaster": "raster",
"esriFieldTypeGUID": "xsd:string",
"esriFieldTypeGlobalID": "xsd:string",
"esriFieldTypeXML": "xsd:anyType"}
def _render_thumbnail(req_body):
spec = _fixup_ows_url(req_body)
url = "%srest/printng/render.png" % ogc_server_settings.LOCATION
hostname = urlparse(settings.SITEURL).hostname
params = dict(width=240, height=180, auth="%s,%s,%s" % (hostname, _user, _password))
url = url + "?" + urllib.urlencode(params)
# @todo annoying but not critical
# openlayers controls posted back contain a bad character. this seems
# to come from a − entity in the html, but it gets converted
# to a unicode en-dash but is not uncoded properly during transmission
# 'ignore' the error for now as controls are not being rendered...
data = spec
if type(data) == unicode:
# make sure any stored bad values are wiped out
# don't use keyword for errors - 2.6 compat
# though unicode accepts them (as seen below)
data = data.encode('ASCII', 'ignore')
data = unicode(data, errors='ignore').encode('UTF-8')
try:
resp, content = http_client.request(url, "POST", data, {
'Content-type': 'text/html'
})
except Exception:
logging.warning('Error generating thumbnail')
return
return content
def _fixup_ows_url(thumb_spec):
# @HACK - for whatever reason, a map's maplayers ows_url contains only /geoserver/wms
# so rendering of thumbnails fails - replace those uri's with full geoserver URL
import re
gspath = '"' + ogc_server_settings.public_url # this should be in img src attributes
repl = '"' + ogc_server_settings.LOCATION
return re.sub(gspath, repl, thumb_spec)
| PhilLidar-DAD/geonode | geonode/geoserver/helpers.py | Python | gpl-3.0 | 61,811 |
#!/usr/bin/python
# -*- coding: utf-8 -*- #
# This would be a lot easier with a shellscript.
# Will probably just do this in bash, once I make this work.
# In this case we're doing it for educational purposes.
# Expect future revisions to be faster and more efficient.
# Started with working code, broke it to fit my needs.
# Makes me very sad, and way more work to figure out than ls *.mp3
# but at least it's not full on regex? :p
# by steakwipe with way too much help from beatsteak
#importing old soup for good luck
import os
#yt variable needed later
yt=https://youtu.be/
mp3list = '/home/steakwipe/git/ytdl-namer' #i'd like this to be a runtime option later
# let's look around here for some mp3s
def mp3gen():
for root, dirs, files in os.walk('.'):
for filename in files:
if os.path.splitext(filename)[1] == ".mp3":
yield os.path.join(root, filename)
# next we are attempting to process all.mp3 files in the dir
# to isolate the part of teh filename that is for YT.
# pretty much dies right away, but i basically did this
# in a python console with a single file.
# splitext, grab the first piece, then trim off the last
# 11 characters. Should result in NVEzFqKGrXY or something.
# broke as fuck. hopefully i've at least got the right idea.
# this'll need to chew thru hundreds of mp3s at a time
# and pushing the output youtube url's back in as id3 tags.
for mp3file in mp3gen():
fn = os.path.splitext(os.path.basename('mp3file')),
text=print([fn[0]]),
url = text[-11::],
print(yt+url)
| steakwipe/ytdl-namer | mp3gen.py | Python | gpl-3.0 | 1,558 |
import gzip
import os
import pickle
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.use('TKagg')
def show_attention():
# Load attentions
print('Loading attentions to pickle file')
with gzip.open(
os.path.join('training_results', 'torch_train', 'attentions.pkl.gz'),
'r') as att_file:
attentions = pickle.load(att_file)
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(np.mean(np.array(attentions),axis=(0,1)), cmap='bone')
fig.colorbar(cax)
# # Set up axes
# ax.set_xticklabels([''] + input_sentence.split(' ') +
# ['<EOS>'], rotation=90)
# ax.set_yticklabels([''] + output_words)
#
# # Show label at every tick
# ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
# ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
show_attention()
| albertaparicio/tfg-voice-conversion | attention_graphs.py | Python | gpl-3.0 | 905 |
# -*- coding: utf-8 -*-
import datetime
from kivy.app import App
from kivy.uix.widget import Widget
import random
from kivy.clock import Clock
from kivy.properties import StringProperty, NumericProperty
from webScrape import webScraper
class MirrorWindow(Widget):
dayPrint = ['Sön', 'Mån', 'Tis', 'Ons', 'Tors', 'Fre', 'Lör']
secondsAnim = NumericProperty(0)
minute = NumericProperty(0)
time = StringProperty('')
day = StringProperty('')
date = StringProperty('')
weather1 = StringProperty('')
weather2 = StringProperty('')
weather3 = StringProperty('')
seconds = StringProperty('')
def update(self, dt):
self.time = datetime.datetime.today().strftime("%H:%M")
self.day = self.dayPrint[int(datetime.date.today().strftime('%w'))]
self.date = datetime.date.today().strftime('%y%m%d')
#self.seconds = str (( int (datetime.datetime.today().strftime('%f')) / 1000 ) )
#self.seconds = ( int (datetime.datetime.today().strftime('%f')) / 1000 )
self.seconds = str(datetime.datetime.today().strftime('%S'))
# self.weather1 = (' ').join(webScraper().weather()[0][:3])
# self.weather2 = (' ').join(webScraper().weather()[1][:3])
# self.weather3 = (' ').join(webScraper().weather()[2][:3])
#60 000 000
if self.secondsAnim < 360:
self.secondsAnim = self.secondsAnim + 6
else:
self.secondsAnim = 0
#self.minute = int (datetime.datetime.today().strftime('%S') )
if self.minute < 360:
self.minute = self.minute + 0.1
else:
self.minute = 0.1
class MirrorApp(App):
def build(self):
mirrorWindow = MirrorWindow()
Clock.schedule_interval(mirrorWindow.update, 0.01)
return mirrorWindow
if __name__ == '__main__':
MirrorApp().run()
| jwesstrom/cleverMirror | main.py | Python | gpl-3.0 | 1,864 |
from unittest import TestCase
from gnomon import MagneticField
class MockG4ThreeVector():
x = 0
y = 0
z = 0
class TestWandsToroidField(TestCase):
def setUp(self):
self.field_minus = MagneticField.WandsToroidField('-')
self.field_plus = MagneticField.WandsToroidField('+')
self.fields = [self.field_minus, self.field_plus]
def test_PhenomModel(self):
for field in self.fields:
with self.assertRaises(ValueError):
field.PhenomModel(0)
with self.assertRaises(ValueError):
field.PhenomModel(-1)
field.PhenomModel(1)
def test_GetFieldValue(self):
for field in self.fields:
pos = MockG4ThreeVector()
vector = field.GetFieldValue(pos, 0)
self.assertEqual(vector.x, 0)
self.assertEqual(vector.y, 0)
self.assertEqual(vector.z, 0)
pos.x = 1
vector = field.GetFieldValue(pos, 0)
pos = MockG4ThreeVector()
pos.x = 1
pos.y = 2
pos.z = 3
vector_plus = self.field_plus.GetFieldValue(pos, 0)
vector_minus = self.field_minus.GetFieldValue(pos, 0)
self.assertAlmostEqual(vector_plus.x, -1 * vector_minus.x)
self.assertAlmostEqual(vector_plus.y, -1 * vector_minus.y)
self.assertAlmostEqual(vector_plus.z, -1 * vector_minus.z)
| nuSTORM/gnomon | tests/test_MagneticField.py | Python | gpl-3.0 | 1,406 |
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import errno
import logging
import os
import re
from django.conf import settings
from pootle.core.log import STORE_RESURRECTED, store_log
from pootle.core.utils.timezone import datetime_min
from pootle_app.models.directory import Directory
from pootle_language.models import Language
from pootle_store.models import Store
from pootle_store.util import absolute_real_path, relative_real_path
#: Case insensitive match for language codes
LANGCODE_RE = re.compile('^[a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?$',
re.IGNORECASE)
#: Case insensitive match for language codes as postfix
LANGCODE_POSTFIX_RE = re.compile(
'^.*?[-_.]([a-z]{2,3}([_-][a-z]{2,3})?(@[a-z0-9]+)?)$', re.IGNORECASE)
def direct_language_match_filename(language_code, path_name):
name, ext = os.path.splitext(os.path.basename(path_name))
if name == language_code or name.lower() == language_code.lower():
return True
# Check file doesn't match another language.
if Language.objects.filter(code__iexact=name).count():
return False
detect = LANGCODE_POSTFIX_RE.split(name)
return (len(detect) > 1 and
(detect[1] == language_code or
detect[1].lower() == language_code.lower()))
def match_template_filename(project, filename):
"""Test if :param:`filename` might point at a template file for a given
:param:`project`.
"""
name, ext = os.path.splitext(os.path.basename(filename))
# FIXME: is the test for matching extension redundant?
if ext == os.path.extsep + project.get_template_filetype():
if ext != os.path.extsep + project.localfiletype:
# Template extension is distinct, surely file is a template.
return True
elif not find_lang_postfix(filename):
# File name can't possibly match any language, assume it is a
# template.
return True
return False
def get_matching_language_dirs(project_dir, language):
return [lang_dir for lang_dir in os.listdir(project_dir)
if language.code == lang_dir]
def get_non_existant_language_dir(project_dir, language, file_style,
make_dirs):
if file_style == "gnu":
return project_dir
elif make_dirs:
language_dir = os.path.join(project_dir, language.code)
os.mkdir(language_dir)
return language_dir
else:
raise IndexError("Directory not found for language %s, project %s" %
(language.code, project_dir))
def get_or_make_language_dir(project_dir, language, file_style, make_dirs):
matching_language_dirs = get_matching_language_dirs(project_dir, language)
if len(matching_language_dirs) == 0:
# If no matching directories can be found, check if it is a GNU-style
# project.
return get_non_existant_language_dir(project_dir, language, file_style,
make_dirs)
else:
return os.path.join(project_dir, matching_language_dirs[0])
def get_language_dir(project_dir, language, file_style, make_dirs):
language_dir = os.path.join(project_dir, language.code)
if not os.path.exists(language_dir):
return get_or_make_language_dir(project_dir, language, file_style,
make_dirs)
else:
return language_dir
def get_translation_project_dir(language, project_dir, file_style,
make_dirs=False):
"""Returns the base directory containing translations files for the
project.
:param make_dirs: if ``True``, project and language directories will be
created as necessary.
"""
if file_style == 'gnu':
return project_dir
else:
return get_language_dir(project_dir, language, file_style, make_dirs)
def is_hidden_file(path):
return path[0] == '.'
def split_files_and_dirs(ignored_files, ext, real_dir, file_filter):
files = []
dirs = []
for child_path in [child_path for child_path in os.listdir(real_dir)
if child_path not in ignored_files and
not is_hidden_file(child_path)]:
full_child_path = os.path.join(real_dir, child_path)
if (os.path.isfile(full_child_path) and
full_child_path.endswith(ext) and file_filter(full_child_path)):
files.append(child_path)
elif os.path.isdir(full_child_path):
dirs.append(child_path)
return files, dirs
def add_items(fs_items_set, db_items, create_or_resurrect_db_item, parent):
"""Add/make obsolete the database items to correspond to the filesystem.
:param fs_items_set: items (dirs, files) currently in the filesystem
:param db_items: dict (name, item) of items (dirs, stores) currently in the
database
:create_or_resurrect_db_item: callable that will create a new db item
or resurrect an obsolete db item with a given name and parent.
:parent: parent db directory for the items
:return: list of all items, list of newly added items
:rtype: tuple
"""
items = []
new_items = []
db_items_set = set(db_items)
items_to_delete = db_items_set - fs_items_set
items_to_create = fs_items_set - db_items_set
for name in items_to_delete:
db_items[name].makeobsolete()
if len(items_to_delete) > 0:
parent.update_all_cache()
for vfolder_treeitem in parent.vfolder_treeitems:
vfolder_treeitem.update_all_cache()
for name in db_items_set - items_to_delete:
items.append(db_items[name])
for name in items_to_create:
item = create_or_resurrect_db_item(name)
items.append(item)
new_items.append(item)
try:
item.save()
except Exception:
logging.exception('Error while adding %s', item)
return items, new_items
def create_or_resurrect_store(file, parent, name, translation_project):
"""Create or resurrect a store db item with given name and parent."""
try:
store = Store.objects.get(parent=parent, name=name)
store.obsolete = False
store.file_mtime = datetime_min
if store.last_sync_revision is None:
store.last_sync_revision = store.get_max_unit_revision()
store_log(user='system', action=STORE_RESURRECTED,
path=store.pootle_path, store=store.id)
except Store.DoesNotExist:
store = Store(file=file, parent=parent,
name=name, translation_project=translation_project)
store.mark_all_dirty()
return store
def create_or_resurrect_dir(name, parent):
"""Create or resurrect a directory db item with given name and parent."""
try:
dir = Directory.objects.get(parent=parent, name=name)
dir.obsolete = False
except Directory.DoesNotExist:
dir = Directory(name=name, parent=parent)
dir.mark_all_dirty()
return dir
# TODO: rename function or even rewrite it
def add_files(translation_project, ignored_files, ext, relative_dir, db_dir,
file_filter=lambda _x: True):
podir_path = to_podir_path(relative_dir)
files, dirs = split_files_and_dirs(ignored_files, ext, podir_path,
file_filter)
file_set = set(files)
dir_set = set(dirs)
existing_stores = dict((store.name, store) for store in
db_dir.child_stores.live().exclude(file='')
.iterator())
existing_dirs = dict((dir.name, dir) for dir in
db_dir.child_dirs.live().iterator())
files, new_files = add_items(
file_set,
existing_stores,
lambda name: create_or_resurrect_store(
file=os.path.join(relative_dir, name),
parent=db_dir,
name=name,
translation_project=translation_project,
),
db_dir,
)
db_subdirs, new_db_subdirs = add_items(
dir_set,
existing_dirs,
lambda name: create_or_resurrect_dir(name=name, parent=db_dir),
db_dir,
)
is_empty = len(files) == 0
for db_subdir in db_subdirs:
fs_subdir = os.path.join(relative_dir, db_subdir.name)
_files, _new_files, _is_empty = \
add_files(translation_project, ignored_files, ext, fs_subdir,
db_subdir, file_filter)
files += _files
new_files += _new_files
is_empty &= _is_empty
if is_empty:
db_dir.makeobsolete()
return files, new_files, is_empty
def to_podir_path(path):
path = relative_real_path(path)
return os.path.join(settings.POOTLE_TRANSLATION_DIRECTORY, path)
def find_lang_postfix(filename):
"""Finds the language code at end of a filename."""
name = os.path.splitext(os.path.basename(filename))[0]
if LANGCODE_RE.match(name):
return name
match = LANGCODE_POSTFIX_RE.match(name)
if match:
return match.groups()[0]
for code in Language.objects.values_list('code', flat=True):
if (name.endswith('-'+code) or name.endswith('_'+code) or
name.endswith('.'+code) or
name.lower().endswith('-'+code.lower()) or
name.endswith('_'+code) or name.endswith('.'+code)):
return code
def translation_project_dir_exists(language, project):
"""Tests if there are translation files corresponding to the given
:param:`language` and :param:`project`.
"""
if project.get_treestyle() == "gnu":
# GNU style projects are tricky
if language.code == 'templates':
# Language is template look for template files
for dirpath, dirnames, filenames in os.walk(
project.get_real_path()):
for filename in filenames:
if (project.file_belongs_to_project(filename,
match_templates=True)
and match_template_filename(project, filename)):
return True
else:
# find files with the language name in the project dir
for dirpath, dirnames, filenames in os.walk(
project.get_real_path()):
for filename in filenames:
# FIXME: don't reuse already used file
if (project.file_belongs_to_project(filename,
match_templates=False)
and direct_language_match_filename(language.code,
filename)):
return True
else:
# find directory with the language name in the project dir
try:
dirpath, dirnames, filename = os.walk(
project.get_real_path()).next()
if language.code in dirnames:
return True
except StopIteration:
pass
return False
def init_store_from_template(translation_project, template_store):
"""Initialize a new file for `translation_project` using `template_store`.
"""
if translation_project.file_style == 'gnu':
target_pootle_path, target_path = get_translated_name_gnu(
translation_project, template_store)
else:
target_pootle_path, target_path = get_translated_name(
translation_project, template_store)
# Create the missing directories for the new TP.
target_dir = os.path.dirname(target_path)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
output_file = template_store.file.store
output_file.settargetlanguage(translation_project.language.code)
output_file.savefile(target_path)
def get_translated_name_gnu(translation_project, store):
"""Given a template :param:`store` and a :param:`translation_project` return
target filename.
"""
pootle_path_parts = store.pootle_path.split('/')
pootle_path_parts[1] = translation_project.language.code
pootle_path = '/'.join(pootle_path_parts[:-1])
if not pootle_path.endswith('/'):
pootle_path = pootle_path + '/'
suffix = "%s%s%s" % (translation_project.language.code, os.extsep,
translation_project.project.localfiletype)
# try loading file first
try:
target_store = translation_project.stores.live().get(
parent__pootle_path=pootle_path,
name__iexact=suffix,
)
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except Store.DoesNotExist:
target_store = None
# is this GNU-style with prefix?
use_prefix = (store.parent.child_stores.live().exclude(file="").count() > 1
or translation_project.stores.live().exclude(
name__iexact=suffix, file='').count())
if not use_prefix:
# let's make sure
for tp in translation_project.project.translationproject_set.exclude(
language__code='templates').iterator():
temp_suffix = \
"%s%s%s" % (tp.language.code, os.extsep,
translation_project.project.localfiletype)
if tp.stores.live().exclude(
name__iexact=temp_suffix).exclude(file="").count():
use_prefix = True
break
if use_prefix:
if store.translation_project.language.code == 'templates':
tprefix = os.path.splitext(store.name)[0]
# FIXME: we should detect separator
prefix = tprefix + '-'
else:
prefix = os.path.splitext(store.name)[0][:-len(
store.translation_project.language.code)]
tprefix = prefix[:-1]
try:
target_store = translation_project.stores.live().filter(
parent__pootle_path=pootle_path,
name__in=[
tprefix + '-' + suffix,
tprefix + '_' + suffix,
tprefix + '.' + suffix,
tprefix + '-' + suffix.lower(),
tprefix + '_' + suffix.lower(),
tprefix + '.' + suffix.lower(),
],
)[0]
return (target_store.pootle_path,
target_store.file and target_store.file.path)
except (Store.DoesNotExist, IndexError):
pass
else:
prefix = ""
if store.file:
path_parts = store.file.path.split(os.sep)
name = prefix + suffix
path_parts[-1] = name
pootle_path_parts[-1] = name
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
return '/'.join(pootle_path_parts), os.sep.join(path_parts)
def get_translated_name(translation_project, store):
name, ext = os.path.splitext(store.name)
if store.file:
path_parts = store.file.name.split(os.sep)
else:
path_parts = store.parent.get_real_path().split(os.sep)
path_parts.append(store.name)
pootle_path_parts = store.pootle_path.split('/')
# Replace language code
path_parts[1] = translation_project.language.code
pootle_path_parts[1] = translation_project.language.code
# Replace extension
path_parts[-1] = "%s.%s" % (name,
translation_project.project.localfiletype)
pootle_path_parts[-1] = \
"%s.%s" % (name, translation_project.project.localfiletype)
return ('/'.join(pootle_path_parts),
absolute_real_path(os.sep.join(path_parts)))
def does_not_exist(path):
if os.path.exists(path):
return False
try:
os.stat(path)
# what the hell?
except OSError as e:
if e.errno == errno.ENOENT:
# explicit no such file or directory
return True
| pavels/pootle | pootle/apps/pootle_app/project_tree.py | Python | gpl-3.0 | 16,304 |
#
# BitBox02 Electrum plugin code.
#
import hid
from typing import TYPE_CHECKING, Dict, Tuple, Optional, List, Any, Callable
from electrum_grs import bip32, constants
from electrum_grs.i18n import _
from electrum_grs.keystore import Hardware_KeyStore
from electrum_grs.transaction import PartialTransaction
from electrum_grs.wallet import Standard_Wallet, Multisig_Wallet, Deterministic_Wallet
from electrum_grs.util import bh2u, UserFacingException
from electrum_grs.base_wizard import ScriptTypeNotSupported, BaseWizard
from electrum_grs.logging import get_logger
from electrum_grs.plugin import Device, DeviceInfo, runs_in_hwd_thread
from electrum_grs.simple_config import SimpleConfig
from electrum_grs.json_db import StoredDict
from electrum_grs.storage import get_derivation_used_for_hw_device_encryption
from electrum_grs.bitcoin import OnchainOutputType
import electrum_grs.bitcoin as bitcoin
import electrum_grs.ecc as ecc
from ..hw_wallet import HW_PluginBase, HardwareClientBase
_logger = get_logger(__name__)
try:
from bitbox02 import bitbox02
from bitbox02 import util
from bitbox02.communication import (
devices,
HARDENED,
u2fhid,
bitbox_api_protocol,
FirmwareVersionOutdatedException,
)
requirements_ok = True
except ImportError as e:
if not (isinstance(e, ModuleNotFoundError) and e.name == 'bitbox02'):
_logger.exception('error importing bitbox02 plugin deps')
requirements_ok = False
class BitBox02Client(HardwareClientBase):
# handler is a BitBox02_Handler, importing it would lead to a circular dependency
def __init__(self, handler: Any, device: Device, config: SimpleConfig, *, plugin: HW_PluginBase):
HardwareClientBase.__init__(self, plugin=plugin)
self.bitbox02_device = None # type: Optional[bitbox02.BitBox02]
self.handler = handler
self.device_descriptor = device
self.config = config
self.bitbox_hid_info = None
if self.config.get("bitbox02") is None:
bitbox02_config: dict = {
"remote_static_noise_keys": [],
"noise_privkey": None,
}
self.config.set_key("bitbox02", bitbox02_config)
bitboxes = devices.get_any_bitbox02s()
for bitbox in bitboxes:
if (
bitbox["path"] == self.device_descriptor.path
and bitbox["interface_number"]
== self.device_descriptor.interface_number
):
self.bitbox_hid_info = bitbox
if self.bitbox_hid_info is None:
raise Exception("No BitBox02 detected")
def is_initialized(self) -> bool:
return True
@runs_in_hwd_thread
def close(self):
try:
self.bitbox02_device.close()
except:
pass
def has_usable_connection_with_device(self) -> bool:
if self.bitbox_hid_info is None:
return False
return True
@runs_in_hwd_thread
def get_soft_device_id(self) -> Optional[str]:
if self.handler is None:
# Can't do the pairing without the handler. This happens at wallet creation time, when
# listing the devices.
return None
if self.bitbox02_device is None:
self.pairing_dialog()
return self.bitbox02_device.root_fingerprint().hex()
@runs_in_hwd_thread
def pairing_dialog(self):
def pairing_step(code: str, device_response: Callable[[], bool]) -> bool:
msg = "Please compare and confirm the pairing code on your BitBox02:\n" + code
self.handler.show_message(msg)
try:
res = device_response()
except:
# Close the hid device on exception
hid_device.close()
raise
finally:
self.handler.finished()
return res
def exists_remote_static_pubkey(pubkey: bytes) -> bool:
bitbox02_config = self.config.get("bitbox02")
noise_keys = bitbox02_config.get("remote_static_noise_keys")
if noise_keys is not None:
if pubkey.hex() in [noise_key for noise_key in noise_keys]:
return True
return False
def set_remote_static_pubkey(pubkey: bytes) -> None:
if not exists_remote_static_pubkey(pubkey):
bitbox02_config = self.config.get("bitbox02")
if bitbox02_config.get("remote_static_noise_keys") is not None:
bitbox02_config["remote_static_noise_keys"].append(pubkey.hex())
else:
bitbox02_config["remote_static_noise_keys"] = [pubkey.hex()]
self.config.set_key("bitbox02", bitbox02_config)
def get_noise_privkey() -> Optional[bytes]:
bitbox02_config = self.config.get("bitbox02")
privkey = bitbox02_config.get("noise_privkey")
if privkey is not None:
return bytes.fromhex(privkey)
return None
def set_noise_privkey(privkey: bytes) -> None:
bitbox02_config = self.config.get("bitbox02")
bitbox02_config["noise_privkey"] = privkey.hex()
self.config.set_key("bitbox02", bitbox02_config)
def attestation_warning() -> None:
self.handler.show_error(
"The BitBox02 attestation failed.\nTry reconnecting the BitBox02.\nWarning: The device might not be genuine, if the\n problem persists please contact Shift support.",
blocking=True
)
class NoiseConfig(bitbox_api_protocol.BitBoxNoiseConfig):
"""NoiseConfig extends BitBoxNoiseConfig"""
def show_pairing(self, code: str, device_response: Callable[[], bool]) -> bool:
return pairing_step(code, device_response)
def attestation_check(self, result: bool) -> None:
if not result:
attestation_warning()
def contains_device_static_pubkey(self, pubkey: bytes) -> bool:
return exists_remote_static_pubkey(pubkey)
def add_device_static_pubkey(self, pubkey: bytes) -> None:
return set_remote_static_pubkey(pubkey)
def get_app_static_privkey(self) -> Optional[bytes]:
return get_noise_privkey()
def set_app_static_privkey(self, privkey: bytes) -> None:
return set_noise_privkey(privkey)
if self.bitbox02_device is None:
hid_device = hid.device()
hid_device.open_path(self.bitbox_hid_info["path"])
bitbox02_device = bitbox02.BitBox02(
transport=u2fhid.U2FHid(hid_device),
device_info=self.bitbox_hid_info,
noise_config=NoiseConfig(),
)
try:
bitbox02_device.check_min_version()
except FirmwareVersionOutdatedException:
raise
self.bitbox02_device = bitbox02_device
self.fail_if_not_initialized()
def fail_if_not_initialized(self) -> None:
assert self.bitbox02_device
if not self.bitbox02_device.device_info()["initialized"]:
raise Exception(
"Please initialize the BitBox02 using the BitBox app first before using the BitBox02 in electrum"
)
def coin_network_from_electrum_network(self) -> int:
if constants.net.TESTNET:
return bitbox02.btc.TBTC
return bitbox02.btc.BTC
@runs_in_hwd_thread
def get_password_for_storage_encryption(self) -> str:
derivation = get_derivation_used_for_hw_device_encryption()
derivation_list = bip32.convert_bip32_path_to_list_of_uint32(derivation)
xpub = self.bitbox02_device.electrum_encryption_key(derivation_list)
node = bip32.BIP32Node.from_xkey(xpub, net = constants.BitcoinMainnet()).subkey_at_public_derivation(())
return node.eckey.get_public_key_bytes(compressed=True).hex()
@runs_in_hwd_thread
def get_xpub(self, bip32_path: str, xtype: str, *, display: bool = False) -> str:
if self.bitbox02_device is None:
self.pairing_dialog()
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
self.fail_if_not_initialized()
xpub_keypath = bip32.convert_bip32_path_to_list_of_uint32(bip32_path)
coin_network = self.coin_network_from_electrum_network()
if xtype == "p2wpkh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.ZPUB
else:
out_type = bitbox02.btc.BTCPubRequest.VPUB
elif xtype == "p2wpkh-p2sh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.YPUB
else:
out_type = bitbox02.btc.BTCPubRequest.UPUB
elif xtype == "p2wsh-p2sh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_YPUB
else:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_UPUB
elif xtype == "p2wsh":
if coin_network == bitbox02.btc.BTC:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_ZPUB
else:
out_type = bitbox02.btc.BTCPubRequest.CAPITAL_VPUB
# The other legacy types are not supported
else:
raise Exception("invalid xtype:{}".format(xtype))
return self.bitbox02_device.btc_xpub(
keypath=xpub_keypath,
xpub_type=out_type,
coin=coin_network,
display=display,
)
@runs_in_hwd_thread
def label(self) -> str:
if self.handler is None:
# Can't do the pairing without the handler. This happens at wallet creation time, when
# listing the devices.
return super().label()
if self.bitbox02_device is None:
self.pairing_dialog()
# We add the fingerprint to the label, as if there are two devices with the same label, the
# device manager can mistake one for another and fail.
return "%s (%s)" % (
self.bitbox02_device.device_info()["name"],
self.bitbox02_device.root_fingerprint().hex(),
)
@runs_in_hwd_thread
def request_root_fingerprint_from_device(self) -> str:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
return self.bitbox02_device.root_fingerprint().hex()
def is_pairable(self) -> bool:
if self.bitbox_hid_info is None:
return False
return True
@runs_in_hwd_thread
def btc_multisig_config(
self, coin, bip32_path: List[int], wallet: Multisig_Wallet, xtype: str,
):
"""
Set and get a multisig config with the current device and some other arbitrary xpubs.
Registers it on the device if not already registered.
xtype: 'p2wsh' | 'p2wsh-p2sh'
"""
assert xtype in ("p2wsh", "p2wsh-p2sh")
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
account_keypath = bip32_path[:-2]
xpubs = wallet.get_master_public_keys()
our_xpub = self.get_xpub(
bip32.convert_bip32_intpath_to_strpath(account_keypath), xtype
)
multisig_config = bitbox02.btc.BTCScriptConfig(
multisig=bitbox02.btc.BTCScriptConfig.Multisig(
threshold=wallet.m,
xpubs=[util.parse_xpub(xpub) for xpub in xpubs],
our_xpub_index=xpubs.index(our_xpub),
script_type={
"p2wsh": bitbox02.btc.BTCScriptConfig.Multisig.P2WSH,
"p2wsh-p2sh": bitbox02.btc.BTCScriptConfig.Multisig.P2WSH_P2SH,
}[xtype]
)
)
is_registered = self.bitbox02_device.btc_is_script_config_registered(
coin, multisig_config, account_keypath
)
if not is_registered:
name = self.handler.name_multisig_account()
try:
self.bitbox02_device.btc_register_script_config(
coin=coin,
script_config=multisig_config,
keypath=account_keypath,
name=name,
)
except bitbox02.DuplicateEntryException:
raise
except:
raise UserFacingException("Failed to register multisig\naccount configuration on BitBox02")
return multisig_config
@runs_in_hwd_thread
def show_address(
self, bip32_path: str, address_type: str, wallet: Deterministic_Wallet
) -> str:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
address_keypath = bip32.convert_bip32_path_to_list_of_uint32(bip32_path)
coin_network = self.coin_network_from_electrum_network()
if address_type == "p2wpkh":
script_config = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH
)
elif address_type == "p2wpkh-p2sh":
script_config = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH
)
elif address_type in ("p2wsh-p2sh", "p2wsh"):
if type(wallet) is Multisig_Wallet:
script_config = self.btc_multisig_config(
coin_network, address_keypath, wallet, address_type,
)
else:
raise Exception("Can only use p2wsh-p2sh or p2wsh with multisig wallets")
else:
raise Exception(
"invalid address xtype: {} is not supported by the BitBox02".format(
address_type
)
)
return self.bitbox02_device.btc_address(
keypath=address_keypath,
coin=coin_network,
script_config=script_config,
display=True,
)
def _get_coin(self):
return bitbox02.btc.TBTC if constants.net.TESTNET else bitbox02.btc.BTC
@runs_in_hwd_thread
def sign_transaction(
self,
keystore: Hardware_KeyStore,
tx: PartialTransaction,
wallet: Deterministic_Wallet,
):
if tx.is_complete():
return
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
coin = self._get_coin()
tx_script_type = None
# Build BTCInputType list
inputs = []
for txin in tx.inputs():
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path is None:
raise Exception(
"A wallet owned pubkey was not found in the transaction input to be signed"
)
prev_tx = txin.utxo
if prev_tx is None:
raise UserFacingException(_('Missing previous tx.'))
prev_inputs: List[bitbox02.BTCPrevTxInputType] = []
prev_outputs: List[bitbox02.BTCPrevTxOutputType] = []
for prev_txin in prev_tx.inputs():
prev_inputs.append(
{
"prev_out_hash": prev_txin.prevout.txid[::-1],
"prev_out_index": prev_txin.prevout.out_idx,
"signature_script": prev_txin.script_sig,
"sequence": prev_txin.nsequence,
}
)
for prev_txout in prev_tx.outputs():
prev_outputs.append(
{
"value": prev_txout.value,
"pubkey_script": prev_txout.scriptpubkey,
}
)
inputs.append(
{
"prev_out_hash": txin.prevout.txid[::-1],
"prev_out_index": txin.prevout.out_idx,
"prev_out_value": txin.value_sats(),
"sequence": txin.nsequence,
"keypath": full_path,
"script_config_index": 0,
"prev_tx": {
"version": prev_tx.version,
"locktime": prev_tx.locktime,
"inputs": prev_inputs,
"outputs": prev_outputs,
},
}
)
if tx_script_type == None:
tx_script_type = txin.script_type
elif tx_script_type != txin.script_type:
raise Exception("Cannot mix different input script types")
if tx_script_type == "p2wpkh":
tx_script_type = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH
)
elif tx_script_type == "p2wpkh-p2sh":
tx_script_type = bitbox02.btc.BTCScriptConfig(
simple_type=bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH
)
elif tx_script_type in ("p2wsh-p2sh", "p2wsh"):
if type(wallet) is Multisig_Wallet:
tx_script_type = self.btc_multisig_config(coin, full_path, wallet, tx_script_type)
else:
raise Exception("Can only use p2wsh-p2sh or p2wsh with multisig wallets")
else:
raise UserFacingException(
"invalid input script type: {} is not supported by the BitBox02".format(
tx_script_type
)
)
# Build BTCOutputType list
outputs = []
for txout in tx.outputs():
assert txout.address
# check for change
if txout.is_change:
my_pubkey, change_pubkey_path = keystore.find_my_pubkey_in_txinout(txout)
outputs.append(
bitbox02.BTCOutputInternal(
keypath=change_pubkey_path, value=txout.value, script_config_index=0,
)
)
else:
addrtype, pubkey_hash = bitcoin.address_to_hash(txout.address)
if addrtype == OnchainOutputType.P2PKH:
output_type = bitbox02.btc.P2PKH
elif addrtype == OnchainOutputType.P2SH:
output_type = bitbox02.btc.P2SH
elif addrtype == OnchainOutputType.WITVER0_P2WPKH:
output_type = bitbox02.btc.P2WPKH
elif addrtype == OnchainOutputType.WITVER0_P2WSH:
output_type = bitbox02.btc.P2WSH
else:
raise UserFacingException(
"Received unsupported output type during transaction signing: {} is not supported by the BitBox02".format(
addrtype
)
)
outputs.append(
bitbox02.BTCOutputExternal(
output_type=output_type,
output_hash=pubkey_hash,
value=txout.value,
)
)
keypath_account = full_path[:-2]
sigs = self.bitbox02_device.btc_sign(
coin,
[bitbox02.btc.BTCScriptConfigWithKeypath(
script_config=tx_script_type,
keypath=keypath_account,
)],
inputs=inputs,
outputs=outputs,
locktime=tx.locktime,
version=tx.version,
)
# Fill signatures
if len(sigs) != len(tx.inputs()):
raise Exception("Incorrect number of inputs signed.") # Should never occur
signatures = [bh2u(ecc.der_sig_from_sig_string(x[1])) + "01" for x in sigs]
tx.update_signatures(signatures)
def sign_message(self, keypath: str, message: bytes, xtype: str) -> bytes:
if self.bitbox02_device is None:
raise Exception(
"Need to setup communication first before attempting any BitBox02 calls"
)
try:
simple_type = {
"p2wpkh-p2sh":bitbox02.btc.BTCScriptConfig.P2WPKH_P2SH,
"p2wpkh": bitbox02.btc.BTCScriptConfig.P2WPKH,
}[xtype]
except KeyError:
raise UserFacingException("The BitBox02 does not support signing messages for this address type: {}".format(xtype))
_, _, signature = self.bitbox02_device.btc_sign_msg(
self._get_coin(),
bitbox02.btc.BTCScriptConfigWithKeypath(
script_config=bitbox02.btc.BTCScriptConfig(
simple_type=simple_type,
),
keypath=bip32.convert_bip32_path_to_list_of_uint32(keypath),
),
message,
)
return signature
class BitBox02_KeyStore(Hardware_KeyStore):
hw_type = "bitbox02"
device = "BitBox02"
plugin: "BitBox02Plugin"
def __init__(self, d: dict):
super().__init__(d)
self.force_watching_only = False
self.ux_busy = False
def get_client(self):
return self.plugin.get_client(self)
def give_error(self, message: Exception, clear_client: bool = False):
self.logger.info(message)
if not self.ux_busy:
self.handler.show_error(message)
else:
self.ux_busy = False
if clear_client:
self.client = None
raise UserFacingException(message)
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(
_(
"Message encryption, decryption and signing are currently not supported for {}"
).format(self.device)
)
def sign_message(self, sequence, message, password):
if password:
raise Exception("BitBox02 does not accept a password from the host")
client = self.get_client()
keypath = self.get_derivation_prefix() + "/%d/%d" % sequence
xtype = self.get_bip32_node_for_xpub().xtype
return client.sign_message(keypath, message.encode("utf-8"), xtype)
@runs_in_hwd_thread
def sign_transaction(self, tx: PartialTransaction, password: str):
if tx.is_complete():
return
client = self.get_client()
assert isinstance(client, BitBox02Client)
try:
try:
self.handler.show_message("Authorize Transaction...")
client.sign_transaction(self, tx, self.handler.get_wallet())
finally:
self.handler.finished()
except Exception as e:
self.logger.exception("")
self.give_error(e, True)
return
@runs_in_hwd_thread
def show_address(
self, sequence: Tuple[int, int], txin_type: str, wallet: Deterministic_Wallet
):
client = self.get_client()
address_path = "{}/{}/{}".format(
self.get_derivation_prefix(), sequence[0], sequence[1]
)
try:
try:
self.handler.show_message(_("Showing address ..."))
dev_addr = client.show_address(address_path, txin_type, wallet)
finally:
self.handler.finished()
except Exception as e:
self.logger.exception("")
self.handler.show_error(e)
class BitBox02Plugin(HW_PluginBase):
keystore_class = BitBox02_KeyStore
minimum_library = (5, 2, 0)
DEVICE_IDS = [(0x03EB, 0x2403)]
SUPPORTED_XTYPES = ("p2wpkh-p2sh", "p2wpkh", "p2wsh", "p2wsh-p2sh")
def __init__(self, parent: HW_PluginBase, config: SimpleConfig, name: str):
super().__init__(parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
self.device_manager().register_devices(self.DEVICE_IDS, plugin=self)
def get_library_version(self):
try:
from bitbox02 import bitbox02
version = bitbox02.__version__
except:
version = "unknown"
if requirements_ok:
return version
else:
raise ImportError()
# handler is a BitBox02_Handler
@runs_in_hwd_thread
def create_client(self, device: Device, handler: Any) -> BitBox02Client:
if not handler:
self.handler = handler
return BitBox02Client(handler, device, self.config, plugin=self)
def setup_device(
self, device_info: DeviceInfo, wizard: BaseWizard, purpose: int
):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
assert isinstance(client, BitBox02Client)
if client.bitbox02_device is None:
wizard.run_task_without_blocking_gui(
task=lambda client=client: client.pairing_dialog())
client.fail_if_not_initialized()
return client
def get_xpub(
self, device_id: str, derivation: str, xtype: str, wizard: BaseWizard
):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(
_("This type of script is not supported with {}: {}").format(self.device, xtype)
)
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
assert isinstance(client, BitBox02Client)
assert client.bitbox02_device is not None
return client.get_xpub(derivation, xtype)
@runs_in_hwd_thread
def show_address(
self,
wallet: Deterministic_Wallet,
address: str,
keystore: BitBox02_KeyStore = None,
):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
txin_type = wallet.get_txin_type(address)
sequence = wallet.get_address_index(address)
keystore.show_address(sequence, txin_type, wallet)
@runs_in_hwd_thread
def show_xpub(self, keystore: BitBox02_KeyStore):
client = keystore.get_client()
assert isinstance(client, BitBox02Client)
derivation = keystore.get_derivation_prefix()
xtype = keystore.get_bip32_node_for_xpub().xtype
client.get_xpub(derivation, xtype, display=True)
def create_device_from_hid_enumeration(self, d: dict, *, product_key) -> 'Device':
device = super().create_device_from_hid_enumeration(d, product_key=product_key)
# The BitBox02's product_id is not unique per device, thus use the path instead to
# distinguish devices.
id_ = str(d['path'])
return device._replace(id_=id_)
| GroestlCoin/electrum-grs | electrum_grs/plugins/bitbox02/bitbox02.py | Python | gpl-3.0 | 27,314 |
# BSD 3-Clause License
#
# Copyright (c) 2016-19, University of Liverpool
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Parent classes for all parser classes
"""
__author__ = "Felix Simkovic"
__date__ = "04 Oct 2016"
__version__ = "0.1"
import abc
ABC = abc.ABCMeta("ABC", (object,), {})
from conkit.core.contact import Contact
from conkit.core.contactmap import ContactMap
from conkit.core.contactfile import ContactFile
from conkit.core.sequence import Sequence
from conkit.core.sequencefile import SequenceFile
class Parser(ABC):
"""Abstract class for all parsers
"""
@abc.abstractmethod
def read(self):
pass
@abc.abstractmethod
def write(self):
pass
@classmethod
def _reconstruct(cls, hierarchy):
"""Wrapper to re-construct full hierarchy when parts are provided"""
if isinstance(hierarchy, ContactFile):
h = hierarchy
elif isinstance(hierarchy, ContactMap):
h = ContactFile("conkit")
h.add(hierarchy)
elif isinstance(hierarchy, Contact):
h = ContactFile("conkit")
m = ContactMap("1")
m.add(hierarchy)
h.add(m)
elif isinstance(hierarchy, SequenceFile):
h = hierarchy
elif isinstance(hierarchy, Sequence):
h = SequenceFile("conkit")
h.add(hierarchy)
return h
class ContactFileParser(Parser):
"""General purpose class for all contact file parsers"""
pass
class SequenceFileParser(Parser):
"""General purpose class for all sequence file parsers"""
pass
| fsimkovic/cptbx | conkit/io/_parser.py | Python | gpl-3.0 | 3,072 |
#-*- coding: utf- -*-
import os
import sys
import random
import time
import json
import wikiquote
import tuitear
from threading import Thread
CONGIG_JSON = 'bots.json'
# Variable local, para modificar el intervalo real cambiar la configuración
INTERVALO = 1
stop = False
def start_bot(bot):
""" Hilo que inicia el bot pasado como argumento (diccionario) """
citas = []
for pagina in bot['paginas']:
print 'Cargando', pagina
quotes = wikiquote.get_quotes(pagina.encode('utf8'))
quotes = [(q, pagina) for q in quotes]
citas += quotes
tiempo = 0
while not stop:
if tiempo >= bot['intervalo']:
quote, pagina = random.choice(citas)
tweet = bot['format'].encode('utf8') % dict(pagina = \
pagina.encode('utf8'), frase = quote.encode('utf8'))
if len(tweet) > 138:
#print 'tweet largo'
continue
print "%s: %s" % (bot['name'], tweet.decode('utf8'))
tuitear.tuitear(tweet, bot['consumer_key'], bot['consumer_secret'],
bot['access_token'], bot['access_token_secret'])
tiempo = 0
tiempo += INTERVALO
time.sleep(INTERVALO)
print 'Thread para', bot['name'], 'detenido'
def main():
path = os.path.dirname(__file__)
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = os.path.join(path, CONGIG_JSON)
print 'Cargando bots en', filename
j = json.load(file(filename))
for bot in j['bots']:
if bot.get('disabled'):
continue
thread = Thread(target = start_bot, args=[bot])
thread.daemon = True
thread.start()
print 'Thread para', bot['name'], 'iniciado'
while True:
# Para que no terminen los hilos
pass
if __name__ == '__main__':
main()
| sh4r3m4n/twitter-wikiquote-bot | bot.py | Python | gpl-3.0 | 1,879 |
# Copyright (C) 2013 Canonical Ltd.
# Copyright 2019 Joyent, Inc.
#
# Author: Ben Howard <[email protected]>
#
# This file is part of cloud-init. See LICENSE file for license information.
'''This is a testcase for the SmartOS datasource.
It replicates a serial console and acts like the SmartOS console does in
order to validate return responses.
'''
from __future__ import print_function
from binascii import crc32
import json
import multiprocessing
import os
import os.path
import re
import signal
import stat
import unittest2
import uuid
from cloudinit import serial
from cloudinit.sources import DataSourceSmartOS
from cloudinit.sources.DataSourceSmartOS import (
convert_smartos_network_data as convert_net,
SMARTOS_ENV_KVM, SERIAL_DEVICE, get_smartos_environ,
identify_file)
from cloudinit.event import EventType
import six
from cloudinit import helpers as c_helpers
from cloudinit.util import (
b64e, subp, ProcessExecutionError, which, write_file)
from cloudinit.tests.helpers import (
CiTestCase, mock, FilesystemMockingTestCase, skipIf)
try:
import serial as _pyserial
assert _pyserial # avoid pyflakes error F401: import unused
HAS_PYSERIAL = True
except ImportError:
HAS_PYSERIAL = False
DSMOS = 'cloudinit.sources.DataSourceSmartOS'
SDC_NICS = json.loads("""
[
{
"nic_tag": "external",
"primary": true,
"mtu": 1500,
"model": "virtio",
"gateway": "8.12.42.1",
"netmask": "255.255.255.0",
"ip": "8.12.42.102",
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"gateways": [
"8.12.42.1"
],
"vlan_id": 324,
"mac": "90:b8:d0:f5:e4:f5",
"interface": "net0",
"ips": [
"8.12.42.102/24"
]
},
{
"nic_tag": "sdc_overlay/16187209",
"gateway": "192.168.128.1",
"model": "virtio",
"mac": "90:b8:d0:a5:ff:cd",
"netmask": "255.255.252.0",
"ip": "192.168.128.93",
"network_uuid": "4cad71da-09bc-452b-986d-03562a03a0a9",
"gateways": [
"192.168.128.1"
],
"vlan_id": 2,
"mtu": 8500,
"interface": "net1",
"ips": [
"192.168.128.93/22"
]
}
]
""")
SDC_NICS_ALT = json.loads("""
[
{
"interface": "net0",
"mac": "90:b8:d0:ae:64:51",
"vlan_id": 324,
"nic_tag": "external",
"gateway": "8.12.42.1",
"gateways": [
"8.12.42.1"
],
"netmask": "255.255.255.0",
"ip": "8.12.42.51",
"ips": [
"8.12.42.51/24"
],
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model": "virtio",
"mtu": 1500,
"primary": true
},
{
"interface": "net1",
"mac": "90:b8:d0:bd:4f:9c",
"vlan_id": 600,
"nic_tag": "internal",
"netmask": "255.255.255.0",
"ip": "10.210.1.217",
"ips": [
"10.210.1.217/24"
],
"network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model": "virtio",
"mtu": 1500
}
]
""")
SDC_NICS_DHCP = json.loads("""
[
{
"interface": "net0",
"mac": "90:b8:d0:ae:64:51",
"vlan_id": 324,
"nic_tag": "external",
"gateway": "8.12.42.1",
"gateways": [
"8.12.42.1"
],
"netmask": "255.255.255.0",
"ip": "8.12.42.51",
"ips": [
"8.12.42.51/24"
],
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model": "virtio",
"mtu": 1500,
"primary": true
},
{
"interface": "net1",
"mac": "90:b8:d0:bd:4f:9c",
"vlan_id": 600,
"nic_tag": "internal",
"netmask": "255.255.255.0",
"ip": "10.210.1.217",
"ips": [
"dhcp"
],
"network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model": "virtio",
"mtu": 1500
}
]
""")
SDC_NICS_MIP = json.loads("""
[
{
"interface": "net0",
"mac": "90:b8:d0:ae:64:51",
"vlan_id": 324,
"nic_tag": "external",
"gateway": "8.12.42.1",
"gateways": [
"8.12.42.1"
],
"netmask": "255.255.255.0",
"ip": "8.12.42.51",
"ips": [
"8.12.42.51/24",
"8.12.42.52/24"
],
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model": "virtio",
"mtu": 1500,
"primary": true
},
{
"interface": "net1",
"mac": "90:b8:d0:bd:4f:9c",
"vlan_id": 600,
"nic_tag": "internal",
"netmask": "255.255.255.0",
"ip": "10.210.1.217",
"ips": [
"10.210.1.217/24",
"10.210.1.151/24"
],
"network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model": "virtio",
"mtu": 1500
}
]
""")
SDC_NICS_MIP_IPV6 = json.loads("""
[
{
"interface": "net0",
"mac": "90:b8:d0:ae:64:51",
"vlan_id": 324,
"nic_tag": "external",
"gateway": "8.12.42.1",
"gateways": [
"8.12.42.1"
],
"netmask": "255.255.255.0",
"ip": "8.12.42.51",
"ips": [
"2001:4800:78ff:1b:be76:4eff:fe06:96b3/64",
"8.12.42.51/24"
],
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model": "virtio",
"mtu": 1500,
"primary": true
},
{
"interface": "net1",
"mac": "90:b8:d0:bd:4f:9c",
"vlan_id": 600,
"nic_tag": "internal",
"netmask": "255.255.255.0",
"ip": "10.210.1.217",
"ips": [
"10.210.1.217/24"
],
"network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model": "virtio",
"mtu": 1500
}
]
""")
SDC_NICS_IPV4_IPV6 = json.loads("""
[
{
"interface": "net0",
"mac": "90:b8:d0:ae:64:51",
"vlan_id": 324,
"nic_tag": "external",
"gateway": "8.12.42.1",
"gateways": ["8.12.42.1", "2001::1", "2001::2"],
"netmask": "255.255.255.0",
"ip": "8.12.42.51",
"ips": ["2001::10/64", "8.12.42.51/24", "2001::11/64",
"8.12.42.52/32"],
"network_uuid": "992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model": "virtio",
"mtu": 1500,
"primary": true
},
{
"interface": "net1",
"mac": "90:b8:d0:bd:4f:9c",
"vlan_id": 600,
"nic_tag": "internal",
"netmask": "255.255.255.0",
"ip": "10.210.1.217",
"ips": ["10.210.1.217/24"],
"gateways": ["10.210.1.210"],
"network_uuid": "98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model": "virtio",
"mtu": 1500
}
]
""")
SDC_NICS_SINGLE_GATEWAY = json.loads("""
[
{
"interface":"net0",
"mac":"90:b8:d0:d8:82:b4",
"vlan_id":324,
"nic_tag":"external",
"gateway":"8.12.42.1",
"gateways":["8.12.42.1"],
"netmask":"255.255.255.0",
"ip":"8.12.42.26",
"ips":["8.12.42.26/24"],
"network_uuid":"992fc7ce-6aac-4b74-aed6-7b9d2c6c0bfe",
"model":"virtio",
"mtu":1500,
"primary":true
},
{
"interface":"net1",
"mac":"90:b8:d0:0a:51:31",
"vlan_id":600,
"nic_tag":"internal",
"netmask":"255.255.255.0",
"ip":"10.210.1.27",
"ips":["10.210.1.27/24"],
"network_uuid":"98657fdf-11f4-4ee2-88a4-ce7fe73e33a6",
"model":"virtio",
"mtu":1500
}
]
""")
MOCK_RETURNS = {
'hostname': 'test-host',
'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
'disable_iptables_flag': None,
'enable_motd_sys_info': None,
'test-var1': 'some data',
'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
'sdc:datacenter_name': 'somewhere2',
'sdc:operator-script': '\n'.join(['bin/true', '']),
'sdc:uuid': str(uuid.uuid4()),
'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
'user-data': '\n'.join(['something', '']),
'user-script': '\n'.join(['/bin/true', '']),
'sdc:nics': json.dumps(SDC_NICS),
}
DMI_DATA_RETURN = 'smartdc'
# Useful for calculating the length of a frame body. A SUCCESS body will be
# followed by more characters or be one character less if SUCCESS with no
# payload. See Section 4.3 of https://eng.joyent.com/mdata/protocol.html.
SUCCESS_LEN = len('0123abcd SUCCESS ')
NOTFOUND_LEN = len('0123abcd NOTFOUND')
class PsuedoJoyentClient(object):
def __init__(self, data=None):
if data is None:
data = MOCK_RETURNS.copy()
self.data = data
self._is_open = False
return
def get(self, key, default=None, strip=False):
if key in self.data:
r = self.data[key]
if strip:
r = r.strip()
else:
r = default
return r
def get_json(self, key, default=None):
result = self.get(key, default=default)
if result is None:
return default
return json.loads(result)
def exists(self):
return True
def open_transport(self):
assert(not self._is_open)
self._is_open = True
def close_transport(self):
assert(self._is_open)
self._is_open = False
class TestSmartOSDataSource(FilesystemMockingTestCase):
jmc_cfact = None
get_smartos_environ = None
def setUp(self):
super(TestSmartOSDataSource, self).setUp()
self.add_patch(DSMOS + ".get_smartos_environ", "get_smartos_environ")
self.add_patch(DSMOS + ".jmc_client_factory", "jmc_cfact")
self.legacy_user_d = self.tmp_path('legacy_user_tmp')
os.mkdir(self.legacy_user_d)
self.add_patch(DSMOS + ".LEGACY_USER_D", "m_legacy_user_d",
autospec=False, new=self.legacy_user_d)
self.add_patch(DSMOS + ".identify_file", "m_identify_file",
return_value="text/plain")
def _get_ds(self, mockdata=None, mode=DataSourceSmartOS.SMARTOS_ENV_KVM,
sys_cfg=None, ds_cfg=None):
self.jmc_cfact.return_value = PsuedoJoyentClient(mockdata)
self.get_smartos_environ.return_value = mode
tmpd = self.tmp_dir()
dirs = {'cloud_dir': self.tmp_path('cloud_dir', tmpd),
'run_dir': self.tmp_path('run_dir')}
for d in dirs.values():
os.mkdir(d)
paths = c_helpers.Paths(dirs)
if sys_cfg is None:
sys_cfg = {}
if ds_cfg is not None:
sys_cfg['datasource'] = sys_cfg.get('datasource', {})
sys_cfg['datasource']['SmartOS'] = ds_cfg
return DataSourceSmartOS.DataSourceSmartOS(
sys_cfg, distro=None, paths=paths)
def test_no_base64(self):
ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
dsrc = self._get_ds(ds_cfg=ds_cfg)
ret = dsrc.get_data()
self.assertTrue(ret)
def test_uuid(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['sdc:uuid'],
dsrc.metadata['instance-id'])
def test_platform_info(self):
"""All platform-related attributes are properly set."""
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
self.assertEqual('joyent', dsrc.cloud_name)
self.assertEqual('joyent', dsrc.platform_type)
self.assertEqual('serial (/dev/ttyS1)', dsrc.subplatform)
def test_root_keys(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['root_authorized_keys'],
dsrc.metadata['public-keys'])
def test_hostname_b64(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
def test_hostname(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['hostname'],
dsrc.metadata['local-hostname'])
def test_hostname_if_no_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(my_returns['hostname'],
dsrc.metadata['local-hostname'])
def test_sdc_hostname_if_no_hostname(self):
my_returns = MOCK_RETURNS.copy()
my_returns['sdc:hostname'] = 'sdc-' + my_returns['hostname']
del my_returns['hostname']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(my_returns['sdc:hostname'],
dsrc.metadata['local-hostname'])
def test_sdc_uuid_if_no_hostname_or_sdc_hostname(self):
my_returns = MOCK_RETURNS.copy()
del my_returns['hostname']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(my_returns['sdc:uuid'],
dsrc.metadata['local-hostname'])
def test_userdata(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['user-data'],
dsrc.metadata['legacy-user-data'])
self.assertEqual(MOCK_RETURNS['cloud-init:user-data'],
dsrc.userdata_raw)
def test_sdc_nics(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(json.loads(MOCK_RETURNS['sdc:nics']),
dsrc.metadata['network-data'])
def test_sdc_scripts(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
print("legacy_script_f=%s" % legacy_script_f)
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
self.assertEqual(user_script_perm, '700')
def test_scripts_shebanged(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
with open(legacy_script_f, 'r') as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/bin/bash")
user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:]
self.assertEqual(user_script_perm, '700')
def test_scripts_shebang_not_added(self):
"""
Test that the SmartOS requirement that plain text scripts
are executable. This test makes sure that plain texts scripts
with out file magic have it added appropriately by cloud-init.
"""
my_returns = MOCK_RETURNS.copy()
my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl',
'print("hi")', ''])
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(my_returns['user-script'],
dsrc.metadata['user-script'])
legacy_script_f = "%s/user-script" % self.legacy_user_d
self.assertTrue(os.path.exists(legacy_script_f))
self.assertTrue(os.path.islink(legacy_script_f))
shebang = None
with open(legacy_script_f, 'r') as f:
shebang = f.readlines()[0].strip()
self.assertEqual(shebang, "#!/usr/bin/perl")
def test_userdata_removed(self):
"""
User-data in the SmartOS world is supposed to be written to a file
each and every boot. This tests to make sure that in the event the
legacy user-data is removed, the existing user-data is backed-up
and there is no /var/db/user-data left.
"""
user_data_f = "%s/mdata-user-data" % self.legacy_user_d
with open(user_data_f, 'w') as f:
f.write("PREVIOUS")
my_returns = MOCK_RETURNS.copy()
del my_returns['user-data']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertFalse(dsrc.metadata.get('legacy-user-data'))
found_new = False
for root, _dirs, files in os.walk(self.legacy_user_d):
for name in files:
name_f = os.path.join(root, name)
permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
if re.match(r'.*\/mdata-user-data$', name_f):
found_new = True
print(name_f)
self.assertEqual(permissions, '400')
self.assertFalse(found_new)
def test_vendor_data_not_default(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['sdc:vendor-data'],
dsrc.metadata['vendor-data'])
def test_default_vendor_data(self):
my_returns = MOCK_RETURNS.copy()
def_op_script = my_returns['sdc:vendor-data']
del my_returns['sdc:vendor-data']
dsrc = self._get_ds(mockdata=my_returns)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertNotEqual(def_op_script, dsrc.metadata['vendor-data'])
# we expect default vendor-data is a boothook
self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
def test_disable_iptables_flag(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['disable_iptables_flag'],
dsrc.metadata['iptables_disable'])
def test_motd_sys_info(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(MOCK_RETURNS['enable_motd_sys_info'],
dsrc.metadata['motd_sys_info'])
def test_default_ephemeral(self):
# Test to make sure that the builtin config has the ephemeral
# configuration.
dsrc = self._get_ds()
cfg = dsrc.get_config_obj()
ret = dsrc.get_data()
self.assertTrue(ret)
assert 'disk_setup' in cfg
assert 'fs_setup' in cfg
self.assertIsInstance(cfg['disk_setup'], dict)
self.assertIsInstance(cfg['fs_setup'], list)
def test_override_disk_aliases(self):
# Test to make sure that the built-in DS is overriden
builtin = DataSourceSmartOS.BUILTIN_DS_CONFIG
mydscfg = {'disk_aliases': {'FOO': '/dev/bar'}}
# expect that these values are in builtin, or this is pointless
for k in mydscfg:
self.assertIn(k, builtin)
dsrc = self._get_ds(ds_cfg=mydscfg)
ret = dsrc.get_data()
self.assertTrue(ret)
self.assertEqual(mydscfg['disk_aliases']['FOO'],
dsrc.ds_cfg['disk_aliases']['FOO'])
self.assertEqual(dsrc.device_name_to_device('FOO'),
mydscfg['disk_aliases']['FOO'])
def test_reconfig_network_on_boot(self):
# Test to ensure that network is configured from metadata on each boot
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
self.assertSetEqual(set([EventType.BOOT_NEW_INSTANCE, EventType.BOOT]),
dsrc.update_events['network'])
class TestIdentifyFile(CiTestCase):
"""Test the 'identify_file' utility."""
@skipIf(not which("file"), "command 'file' not available.")
def test_file_happy_path(self):
"""Test file is available and functional on plain text."""
fname = self.tmp_path("myfile")
write_file(fname, "plain text content here\n")
with self.allow_subp(["file"]):
self.assertEqual("text/plain", identify_file(fname))
@mock.patch(DSMOS + ".util.subp")
def test_returns_none_on_error(self, m_subp):
"""On 'file' execution error, None should be returned."""
m_subp.side_effect = ProcessExecutionError("FILE_FAILED", exit_code=99)
fname = self.tmp_path("myfile")
write_file(fname, "plain text content here\n")
self.assertEqual(None, identify_file(fname))
self.assertEqual(
[mock.call(["file", "--brief", "--mime-type", fname])],
m_subp.call_args_list)
class ShortReader(object):
"""Implements a 'read' interface for bytes provided.
much like io.BytesIO but the 'endbyte' acts as if EOF.
When it is reached a short will be returned."""
def __init__(self, initial_bytes, endbyte=b'\0'):
self.data = initial_bytes
self.index = 0
self.len = len(self.data)
self.endbyte = endbyte
@property
def emptied(self):
return self.index >= self.len
def read(self, size=-1):
"""Read size bytes but not past a null."""
if size == 0 or self.index >= self.len:
return b''
rsize = size
if size < 0 or size + self.index > self.len:
rsize = self.len - self.index
next_null = self.data.find(self.endbyte, self.index, rsize)
if next_null >= 0:
rsize = next_null - self.index + 1
i = self.index
self.index += rsize
ret = self.data[i:i + rsize]
if len(ret) and ret[-1:] == self.endbyte:
ret = ret[:-1]
return ret
class TestJoyentMetadataClient(FilesystemMockingTestCase):
invalid = b'invalid command\n'
failure = b'FAILURE\n'
v2_ok = b'V2_OK\n'
def setUp(self):
super(TestJoyentMetadataClient, self).setUp()
self.serial = mock.MagicMock(spec=serial.Serial)
self.request_id = 0xabcdef12
self.metadata_value = 'value'
self.response_parts = {
'command': 'SUCCESS',
'crc': 'b5a9ff00',
'length': SUCCESS_LEN + len(b64e(self.metadata_value)),
'payload': b64e(self.metadata_value),
'request_id': '{0:08x}'.format(self.request_id),
}
def make_response():
payloadstr = ''
if 'payload' in self.response_parts:
payloadstr = ' {0}'.format(self.response_parts['payload'])
return ('V2 {length} {crc} {request_id} '
'{command}{payloadstr}\n'.format(
payloadstr=payloadstr,
**self.response_parts).encode('ascii'))
self.metasource_data = None
def read_response(length):
if not self.metasource_data:
self.metasource_data = make_response()
self.metasource_data_len = len(self.metasource_data)
resp = self.metasource_data[:length]
self.metasource_data = self.metasource_data[length:]
return resp
self.serial.read.side_effect = read_response
self.patched_funcs.enter_context(
mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
mock.Mock(return_value=self.request_id)))
def _get_client(self):
return DataSourceSmartOS.JoyentMetadataClient(
fp=self.serial, smartos_type=DataSourceSmartOS.SMARTOS_ENV_KVM)
def _get_serial_client(self):
self.serial.timeout = 1
return DataSourceSmartOS.JoyentMetadataSerialClient(None,
fp=self.serial)
def assertEndsWith(self, haystack, prefix):
self.assertTrue(haystack.endswith(prefix),
"{0} does not end with '{1}'".format(
repr(haystack), prefix))
def assertStartsWith(self, haystack, prefix):
self.assertTrue(haystack.startswith(prefix),
"{0} does not start with '{1}'".format(
repr(haystack), prefix))
def assertNoMoreSideEffects(self, obj):
self.assertRaises(StopIteration, obj)
def test_get_metadata_writes_a_single_line(self):
client = self._get_client()
client.get('some_key')
self.assertEqual(1, self.serial.write.call_count)
written_line = self.serial.write.call_args[0][0]
self.assertEndsWith(written_line.decode('ascii'),
b'\n'.decode('ascii'))
self.assertEqual(1, written_line.count(b'\n'))
def _get_written_line(self, key='some_key'):
client = self._get_client()
client.get(key)
return self.serial.write.call_args[0][0]
def test_get_metadata_writes_bytes(self):
self.assertIsInstance(self._get_written_line(), six.binary_type)
def test_get_metadata_line_starts_with_v2(self):
foo = self._get_written_line()
self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
def test_get_metadata_uses_get_command(self):
parts = self._get_written_line().decode('ascii').strip().split(' ')
self.assertEqual('GET', parts[4])
def test_get_metadata_base64_encodes_argument(self):
key = 'my_key'
parts = self._get_written_line(key).decode('ascii').strip().split(' ')
self.assertEqual(b64e(key), parts[5])
def test_get_metadata_calculates_length_correctly(self):
parts = self._get_written_line().decode('ascii').strip().split(' ')
expected_length = len(' '.join(parts[3:]))
self.assertEqual(expected_length, int(parts[1]))
def test_get_metadata_uses_appropriate_request_id(self):
parts = self._get_written_line().decode('ascii').strip().split(' ')
request_id = parts[3]
self.assertEqual(8, len(request_id))
self.assertEqual(request_id, request_id.lower())
def test_get_metadata_uses_random_number_for_request_id(self):
line = self._get_written_line()
request_id = line.decode('ascii').strip().split(' ')[3]
self.assertEqual('{0:08x}'.format(self.request_id), request_id)
def test_get_metadata_checksums_correctly(self):
parts = self._get_written_line().decode('ascii').strip().split(' ')
expected_checksum = '{0:08x}'.format(
crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
checksum = parts[2]
self.assertEqual(expected_checksum, checksum)
def test_get_metadata_reads_a_line(self):
client = self._get_client()
client.get('some_key')
self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
def test_get_metadata_returns_valid_value(self):
client = self._get_client()
value = client.get('some_key')
self.assertEqual(self.metadata_value, value)
def test_get_metadata_throws_exception_for_incorrect_length(self):
self.response_parts['length'] = 0
client = self._get_client()
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
client.get, 'some_key')
def test_get_metadata_throws_exception_for_incorrect_crc(self):
self.response_parts['crc'] = 'deadbeef'
client = self._get_client()
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
client.get, 'some_key')
def test_get_metadata_throws_exception_for_request_id_mismatch(self):
self.response_parts['request_id'] = 'deadbeef'
client = self._get_client()
client._checksum = lambda _: self.response_parts['crc']
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
client.get, 'some_key')
def test_get_metadata_returns_None_if_value_not_found(self):
self.response_parts['payload'] = ''
self.response_parts['command'] = 'NOTFOUND'
self.response_parts['length'] = NOTFOUND_LEN
client = self._get_client()
client._checksum = lambda _: self.response_parts['crc']
self.assertIsNone(client.get('some_key'))
def test_negotiate(self):
client = self._get_client()
reader = ShortReader(self.v2_ok)
client.fp.read.side_effect = reader.read
client._negotiate()
self.assertTrue(reader.emptied)
def test_negotiate_short_response(self):
client = self._get_client()
# chopped '\n' from v2_ok.
reader = ShortReader(self.v2_ok[:-1] + b'\0')
client.fp.read.side_effect = reader.read
self.assertRaises(DataSourceSmartOS.JoyentMetadataTimeoutException,
client._negotiate)
self.assertTrue(reader.emptied)
def test_negotiate_bad_response(self):
client = self._get_client()
reader = ShortReader(b'garbage\n' + self.v2_ok)
client.fp.read.side_effect = reader.read
self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
client._negotiate)
self.assertEqual(self.v2_ok, client.fp.read())
def test_serial_open_transport(self):
client = self._get_serial_client()
reader = ShortReader(b'garbage\0' + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_failure(self):
client = self._get_serial_client()
reader = ShortReader(b'garbage' + b'\0' + self.failure +
self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_flush_many_timeouts(self):
client = self._get_serial_client()
reader = ShortReader(b'\0' * 100 + self.invalid + self.v2_ok)
client.fp.read.side_effect = reader.read
client.open_transport()
self.assertTrue(reader.emptied)
def test_list_metadata_returns_list(self):
parts = ['foo', 'bar']
value = b64e('\n'.join(parts))
self.response_parts['payload'] = value
self.response_parts['crc'] = '40873553'
self.response_parts['length'] = SUCCESS_LEN + len(value)
client = self._get_client()
self.assertEqual(client.list(), parts)
def test_list_metadata_returns_empty_list_if_no_customer_metadata(self):
del self.response_parts['payload']
self.response_parts['length'] = SUCCESS_LEN - 1
self.response_parts['crc'] = '14e563ba'
client = self._get_client()
self.assertEqual(client.list(), [])
class TestNetworkConversion(CiTestCase):
def test_convert_simple(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.102/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:f5:e4:f5'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'static',
'address': '192.168.128.93/22'}],
'mtu': 8500, 'mac_address': '90:b8:d0:a5:ff:cd'}]}
found = convert_net(SDC_NICS)
self.assertEqual(expected, found)
def test_convert_simple_alt(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.51/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'static',
'address': '10.210.1.217/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
found = convert_net(SDC_NICS_ALT)
self.assertEqual(expected, found)
def test_convert_simple_dhcp(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.51/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'dhcp4'}],
'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
found = convert_net(SDC_NICS_DHCP)
self.assertEqual(expected, found)
def test_convert_simple_multi_ip(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.51/24'},
{'type': 'static',
'address': '8.12.42.52/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'static',
'address': '10.210.1.217/24'},
{'type': 'static',
'address': '10.210.1.151/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
found = convert_net(SDC_NICS_MIP)
self.assertEqual(expected, found)
def test_convert_with_dns(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.51/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'dhcp4'}],
'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'},
{'type': 'nameserver',
'address': ['8.8.8.8', '8.8.8.1'], 'search': ["local"]}]}
found = convert_net(
network_data=SDC_NICS_DHCP, dns_servers=['8.8.8.8', '8.8.8.1'],
dns_domain="local")
self.assertEqual(expected, found)
def test_convert_simple_multi_ipv6(self):
expected = {
'version': 1,
'config': [
{'name': 'net0', 'type': 'physical',
'subnets': [{'type': 'static', 'address':
'2001:4800:78ff:1b:be76:4eff:fe06:96b3/64'},
{'type': 'static', 'gateway': '8.12.42.1',
'address': '8.12.42.51/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:ae:64:51'},
{'name': 'net1', 'type': 'physical',
'subnets': [{'type': 'static',
'address': '10.210.1.217/24'}],
'mtu': 1500, 'mac_address': '90:b8:d0:bd:4f:9c'}]}
found = convert_net(SDC_NICS_MIP_IPV6)
self.assertEqual(expected, found)
def test_convert_simple_both_ipv4_ipv6(self):
expected = {
'version': 1,
'config': [
{'mac_address': '90:b8:d0:ae:64:51', 'mtu': 1500,
'name': 'net0', 'type': 'physical',
'subnets': [{'address': '2001::10/64', 'gateway': '2001::1',
'type': 'static'},
{'address': '8.12.42.51/24',
'gateway': '8.12.42.1',
'type': 'static'},
{'address': '2001::11/64', 'type': 'static'},
{'address': '8.12.42.52/32', 'type': 'static'}]},
{'mac_address': '90:b8:d0:bd:4f:9c', 'mtu': 1500,
'name': 'net1', 'type': 'physical',
'subnets': [{'address': '10.210.1.217/24',
'type': 'static'}]}]}
found = convert_net(SDC_NICS_IPV4_IPV6)
self.assertEqual(expected, found)
def test_gateways_not_on_all_nics(self):
expected = {
'version': 1,
'config': [
{'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
'name': 'net0', 'type': 'physical',
'subnets': [{'address': '8.12.42.26/24',
'gateway': '8.12.42.1', 'type': 'static'}]},
{'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
'name': 'net1', 'type': 'physical',
'subnets': [{'address': '10.210.1.27/24',
'type': 'static'}]}]}
found = convert_net(SDC_NICS_SINGLE_GATEWAY)
self.assertEqual(expected, found)
def test_routes_on_all_nics(self):
routes = [
{'linklocal': False, 'dst': '3.0.0.0/8', 'gateway': '8.12.42.3'},
{'linklocal': False, 'dst': '4.0.0.0/8', 'gateway': '10.210.1.4'}]
expected = {
'version': 1,
'config': [
{'mac_address': '90:b8:d0:d8:82:b4', 'mtu': 1500,
'name': 'net0', 'type': 'physical',
'subnets': [{'address': '8.12.42.26/24',
'gateway': '8.12.42.1', 'type': 'static',
'routes': [{'network': '3.0.0.0/8',
'gateway': '8.12.42.3'},
{'network': '4.0.0.0/8',
'gateway': '10.210.1.4'}]}]},
{'mac_address': '90:b8:d0:0a:51:31', 'mtu': 1500,
'name': 'net1', 'type': 'physical',
'subnets': [{'address': '10.210.1.27/24', 'type': 'static',
'routes': [{'network': '3.0.0.0/8',
'gateway': '8.12.42.3'},
{'network': '4.0.0.0/8',
'gateway': '10.210.1.4'}]}]}]}
found = convert_net(SDC_NICS_SINGLE_GATEWAY, routes=routes)
self.maxDiff = None
self.assertEqual(expected, found)
@unittest2.skipUnless(get_smartos_environ() == SMARTOS_ENV_KVM,
"Only supported on KVM and bhyve guests under SmartOS")
@unittest2.skipUnless(os.access(SERIAL_DEVICE, os.W_OK),
"Requires write access to " + SERIAL_DEVICE)
@unittest2.skipUnless(HAS_PYSERIAL is True, "pyserial not available")
class TestSerialConcurrency(CiTestCase):
"""
This class tests locking on an actual serial port, and as such can only
be run in a kvm or bhyve guest running on a SmartOS host. A test run on
a metadata socket will not be valid because a metadata socket ensures
there is only one session over a connection. In contrast, in the
absence of proper locking multiple processes opening the same serial
port can corrupt each others' exchanges with the metadata server.
This takes on the order of 2 to 3 minutes to run.
"""
allowed_subp = ['mdata-get']
def setUp(self):
self.mdata_proc = multiprocessing.Process(target=self.start_mdata_loop)
self.mdata_proc.start()
super(TestSerialConcurrency, self).setUp()
def tearDown(self):
# os.kill() rather than mdata_proc.terminate() to avoid console spam.
os.kill(self.mdata_proc.pid, signal.SIGKILL)
self.mdata_proc.join()
super(TestSerialConcurrency, self).tearDown()
def start_mdata_loop(self):
"""
The mdata-get command is repeatedly run in a separate process so
that it may try to race with metadata operations performed in the
main test process. Use of mdata-get is better than two processes
using the protocol implementation in DataSourceSmartOS because we
are testing to be sure that cloud-init and mdata-get respect each
others locks.
"""
rcs = list(range(0, 256))
while True:
subp(['mdata-get', 'sdc:routes'], rcs=rcs)
def test_all_keys(self):
self.assertIsNotNone(self.mdata_proc.pid)
ds = DataSourceSmartOS
keys = [tup[0] for tup in ds.SMARTOS_ATTRIB_MAP.values()]
keys.extend(ds.SMARTOS_ATTRIB_JSON.values())
client = ds.jmc_client_factory(smartos_type=SMARTOS_ENV_KVM)
self.assertIsNotNone(client)
# The behavior that we are testing for was observed mdata-get running
# 10 times at roughly the same time as cloud-init fetched each key
# once. cloud-init would regularly see failures before making it
# through all keys once.
for _ in range(0, 3):
for key in keys:
# We don't care about the return value, just that it doesn't
# thrown any exceptions.
client.get(key)
self.assertIsNone(self.mdata_proc.exitcode)
# vi: ts=4 expandtab
| larsks/cloud-init | tests/unittests/test_datasource/test_smartos.py | Python | gpl-3.0 | 41,930 |
import os
import logging
import tornado.options as opt
from motherbrain.base import conf
from motherbrain.base.conf import get_config
SITE_CONF = conf.site_conf(os.getcwd())
DATADIR = SITE_CONF.get('env.motherbrain_data', '/tmp')
API_URL = SITE_CONF.get('env.api_url')
WEBCLIENT_URL = SITE_CONF.get('env.webclient_url')
_srv_opts = {
'config': {'default': 'api_server.cfg',
'help': 'Configuration File'},
'port': {'default': 8888, 'type': int,
'help': 'Tornado Port'},
'debug': {'default': True, 'type': bool},
'cors_hosts': {'default': '''http://api.urli.st '''
'''http://urli.st '''
'''http://localhost:9999 '''
'''http://next.urli.st '''
'''http://urli.st '''
'''http://next.api.urli.st''',
'help': 'Hosts allowed to perform Cross Domain Request'},
'media_path': {'default': os.path.join(DATADIR, 'urlist_media')},
'media_url': {'default': '/media'},
'static_url': {'default': 'http://static.urli.st'},
'base_url': {'default': API_URL},
'webclient_url': {'default': WEBCLIENT_URL}
}
_motherbrain_opts = {
'dispatcher_classname': {'default': 'MBDispatcherCluster',
'help': 'Motherbrain dispatcher class'},
'addresses': {'default': '''tcp://localhost:5555 tcp://localhost:5556 '''
'''tcp://localhost:5557 tcp://localhost:5558''',
'help': 'A space separated list of addresses'},
'datadir': {'default': os.path.join(DATADIR, 'motherbrain_data')}
}
_oauth_opts = {
'cookie_secret': {'default': 'XXX'},
'cookie_domain': {'default': SITE_CONF.get('oauth.cookie_domain')},
'facebook_secret': {'default': 'XXX'},
'facebook_api_key': {'default': 'XXX'},
'facebook_redirect_uri': {'default': '{}/login/facebook'.format(API_URL)},
'twitter_consumer_key': {'default': 'XXX'},
'twitter_consumer_secret': {'default': 'XXX'},
'urlist_salt': {'default': 'XXX'}
}
_db_opts = {
'dbname': {'default': 'urlist'},
'dbhost': {'default': 'mongo1'},
'dbport': {'default': 27017, 'type': int},
'dbusr': {'default': ''},
'dbpwd': {'default': ''},
}
# Get Tornado default options
_tornado_opts = {k: v.value() for k, v in opt.options.iteritems()}
_options = {'server': _srv_opts,
'database': _db_opts,
'tornado': _tornado_opts,
'oauth': _oauth_opts,
'motherbrain': _motherbrain_opts}
_cli_args = {'server': ['port', 'debug', 'config'],
'motherbrain': ['datadir']}
config = get_config(_options, _cli_args)
if SITE_CONF:
logging.info('CONF::SITE --- Read')
| urlist/urlist | motherbrain/api_server/conf.py | Python | gpl-3.0 | 2,842 |
#This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import imp
from flask.config import ConfigAttribute, Config as ConfigBase # noqa
class Config(ConfigBase):
"Configuration without the root_path"
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})
def from_pyfile(self, filename):
"""
Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
"""
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
| NaN-tic/nereid | nereid/config.py | Python | gpl-3.0 | 1,087 |
""" Here, we need some documentation...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import types
import threading
import time
import six
from DIRAC import gLogger
from DIRAC.Core.Utilities.DIRACSingleton import DIRACSingleton
@six.add_metaclass(DIRACSingleton)
class Devloader(object):
def __init__(self):
self.__log = gLogger.getSubLogger("Devloader")
self.__reloaded = False
self.__enabled = True
self.__reloadTask = False
self.__stuffToClose = []
self.__watchedFiles = []
self.__modifyTimes = {}
def addStuffToClose(self, stuff):
self.__stuffToClose.append(stuff)
@property
def enabled(self):
return self.__enabled
def watchFile(self, fp):
if os.path.isfile(fp):
self.__watchedFiles.append(fp)
return True
return False
def __restart(self):
self.__reloaded = True
for stuff in self.__stuffToClose:
try:
self.__log.always("Closing %s" % stuff)
sys.stdout.flush()
stuff.close()
except Exception:
gLogger.exception("Could not close %s" % stuff)
python = sys.executable
os.execl(python, python, * sys.argv)
def bootstrap(self):
if not self.__enabled:
return False
if self.__reloadTask:
return True
self.__reloadTask = threading.Thread(target=self.__reloadOnUpdate)
self.__reloadTask.setDaemon(1)
self.__reloadTask.start()
def __reloadOnUpdate(self):
while True:
time.sleep(1)
if self.__reloaded:
return
for modName in sys.modules:
modObj = sys.modules[modName]
if not isinstance(modObj, types.ModuleType):
continue
path = getattr(modObj, "__file__", None)
if not path:
continue
if path.endswith(".pyc") or path.endswith(".pyo"):
path = path[:-1]
self.__checkFile(path)
for path in self.__watchedFiles:
self.__checkFile(path)
def __checkFile(self, path):
try:
modified = os.stat(path).st_mtime
except Exception:
return
if path not in self.__modifyTimes:
self.__modifyTimes[path] = modified
return
if self.__modifyTimes[path] != modified:
self.__log.always("File system changed (%s). Restarting..." % (path))
self.__restart()
| yujikato/DIRAC | src/DIRAC/Core/Utilities/Devloader.py | Python | gpl-3.0 | 2,365 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Copyright (c) 2010 Jean-Baptiste Denis.
#
# This is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License version 3 and superior as published by the Free
# Software Foundation.
#
# A copy of the license has been included in the COPYING file.
import sys
import os
import logging
import threading
try:
# first we try system wide
import treewatcher
except ImportError:
# if it fails, we try it from the project source directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.path.pardir))
import treewatcher
from treewatcher import ThreadedEventsCallbacks, choose_source_tree_monitor
_LOGGER = logging.getLogger('_LOGGER')
_LOGGER.setLevel(logging.INFO)
_LOGGER.addHandler(logging.StreamHandler())
class MonitorCallbacks(ThreadedEventsCallbacks):
"""
Example callbacks which will output the event and path
This is a threaded type callbacks object : they will be
called from a different thread of the monitor.
We need to use logging here to prevent messy output.
You need to protect shared state from concurrent access
using Lock for example
"""
def create(self, path, is_dir):
""" callback called on a 'IN_CREATE' event """
_LOGGER.info("create: %s %s %s" % (path, is_dir, threading.current_thread().name))
def delete(self, path, is_dir):
""" callback called on a 'IN_DELETE' event """
_LOGGER.info("delete: %s %s %s" % (path, is_dir, threading.current_thread().name))
def close_write(self, path, is_dir):
""" callback called on a 'IN_CLOSE_WRITE' event """
_LOGGER.info("close_write: %s %s %s" % (path, is_dir, threading.current_thread().name))
def moved_from(self, path, is_dir):
""" callback called on a 'IN_MOVED_FROM' event """
_LOGGER.info("moved_from: %s %s %s" % (path, is_dir, threading.current_thread().name))
def moved_to(self, path, is_dir):
""" callback called on a 'IN_MOVED_TO' event """
_LOGGER.info("moved_to: %s %s %s" % (path, is_dir, threading.current_thread().name))
def modify(self, path, is_dir):
""" callback called on a 'IN_MODIFY' event """
_LOGGER.info("modify: %s %s %s" % (path, is_dir, threading.current_thread().name))
def attrib(self, path, is_dir):
""" callback called on a 'IN_ATTRIB' event """
_LOGGER.info("attrib: %s %s %s" % (path, is_dir, threading.current_thread().name))
def unmount(self, path, is_dir):
""" callback called on a 'IN_UNMOUNT' event """
_LOGGER.info("unmount: %s %s %s" % (path, is_dir, threading.current_thread().name))
if __name__ == '__main__':
# Yeah, command line parsing
if len(sys.argv) < 2:
print "usage:", sys.argv[0], "directory"
sys.exit(1)
# we check if the provided string is a valid directory
path_to_watch = sys.argv[1]
if not os.path.isdir(path_to_watch):
print path_to_watch, "is not a valid directory."
sys.exit(2)
# We instanciate our callbacks object
callbacks = MonitorCallbacks()
# we get a source tree monitor
stm = choose_source_tree_monitor()
# we set our callbacks
stm.set_events_callbacks(callbacks)
# we will use two threads to handle callbacks
stm.set_workers_number(2)
# we start the monitor
stm.start()
# after that, we can add the directory we want to watch
stm.add_source_dir(path_to_watch)
print "Watching directory", path_to_watch
print "Open a new terminal, and create/remove some folders and files in the", path_to_watch, "directory"
print "Ctrl-C to exit..."
try:
# without specific arguments, the next call will block forever
# open a terminal, and create/remove some folders and files
# this will last forever. use Ctrl-C to exit.
stm.process_events()
# see monitor-timeout-serial.py for an example with a timeout argument
except KeyboardInterrupt:
print "Stopping monitor."
finally:
# clean stop
stm.stop()
| jbd/treewatcher | examples/monitor-threaded.py | Python | gpl-3.0 | 4,149 |
#!/usr/bin/env python3
import sys
import os
import argparse
import zipfile
import tarfile
def make_zipfile(outname, filenames, prefix):
with zipfile.ZipFile(outname, "w", zipfile.ZIP_DEFLATED) as z:
for filename in filenames:
z.write(filename, prefix+filename)
def make_tarfile(outname, filenames, prefix, mode="w"):
with tarfile.open(outname, "w", zipfile.ZIP_DEFLATED) as z:
for filename in filenames:
z.add(filename, prefix+filename)
def make_tarfile_gz(outname, filenames, prefix):
return make_tarfile(outname, filenames, prefix, mode="w:gz")
def make_tarfile_bz2(outname, filenames, foldername):
return make_tarfile(outname, filenames, prefix, mode="w:bz2")
def make_tarfile_xz(outname, filenames, foldername):
return make_tarfile(outname, filenames, prefix, mode="w:xz")
formathandlers = [
(".zip", make_zipfile),
(".tar", make_tarfile),
(".tgz", make_tarfile_gz),
(".tar.gz", make_tarfile_gz),
(".tbz", make_tarfile_bz2),
(".tar.bz2", make_tarfile_bz2),
(".txz", make_tarfile_xz),
(".tar.xz", make_tarfile_xz),
]
tophelptext = """
Make a zip or tar archive containing specified files without a tar bomb.
"""
bottomhelptext = """
Supported output formats: """+", ".join(x[0] for x in formathandlers)
def parse_argv(argv):
p = argparse.ArgumentParser(
description=tophelptext, epilog=bottomhelptext
)
p.add_argument("filelist",
help="name of file containing newline-separated relative "
"paths to files to include, or - for standard input")
p.add_argument("foldername",
help="name of folder in archive (e.g. hello-1.2.5)")
p.add_argument("-o", "--output",
help="path of archive (default: foldername + .zip)")
return p.parse_args(argv[1:])
def get_writerfunc(outname):
outbaselower = os.path.basename(outname).lower()
for ext, writerfunc in formathandlers:
if outbaselower.endswith(ext):
return writerfunc
raise KeyError(os.path.splitext(outbaselower)[1])
def main(argv=None):
args = parse_argv(argv or sys.argv)
if args.filelist == '-':
filenames = set(sys.stdin)
else:
with open(args.filelist, "r") as infp:
filenames = set(infp)
filenames = set(x.strip() for x in filenames)
filenames = sorted(x for x in filenames if x)
outname = args.output or args.foldername + ".zip"
writerfunc = get_writerfunc(outname)
writerfunc(outname, filenames, args.foldername+"/")
if __name__=='__main__':
main()
| pinobatch/thwaite-nes | tools/zipup.py | Python | gpl-3.0 | 2,608 |
# This file is part of the "upq" program used on springfiles.com to manage file
# uploads, mirror distribution etc. It is published under the GPLv3.
#
#Copyright (C) 2011 Daniel Troeder (daniel #at# admin-box #dot# com)
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
# downloads a file
from upqjob import UpqJob
from upqdb import UpqDB
from time import time
import os
import shutil
import requests
class Download(UpqJob):
"""
"download url:$url"
"""
def run(self):
url=self.jobdata['url']
filename=os.path.basename(url)
tmpfile=os.path.join(self.getcfg('temppath', '/tmp'), filename)
self.jobdata['file']=tmpfile
self.logger.debug("going to download %s", url)
try:
response = requests.get(url, stream=True, verify=False)
with open(tmpfile, 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
self.logger.debug("downloaded to %s", tmpfile)
except Exception as e:
self.logger.error(str(e))
return False
return True
| springfiles/upq | jobs/download.py | Python | gpl-3.0 | 1,078 |
# -*- coding: utf-8 -*-
from bdchecker.api import NewDatabaseTaskChecker
from bdcheckerapp.autograding.zaj5.unit5.utils import Zaj5TaskChecker, UserList
class TaskChecker(NewDatabaseTaskChecker):
display_stdout = True
class TestSuite(Zaj5TaskChecker):
def test_has_procedure(self):
self.assert_has_procedure("add_user")
def test_view_is_empty_at_the_beginning(self):
self.assertEqual(len(list(self.session.query(UserList.username))), 0,
msg="Widok \"LIST_USERS\" powinien być pusty zaraz po stworzeniu schematu")
def test_user_role_can_add_users(self):
user = self.get_session("user")
try:
user.execute("SELECT add_user('foo', 'bar');")
user.flush()
except Exception as e:
raise AssertionError("Rola \"user\" nie mogła wywołać unkcji add_user") from e
self.assertEqual(list(user.query(self.UserList.username)), [("foo",)], msg="Po wykonaniu metody add_user nie było użytkownika w bazie danych")
def test_user_is_created_properly(self):
self.session.execute("SELECT add_user('foo', 'bar');")
self.assertEqual(
list(self.session.query(self.Users.username, self.Users.is_admin)), [("foo", 0)],
msg="Po stworzeniu użytkownika za pomocą add_user okazało się że nie został on stworzony poprawnie.")
| jbzdak/data-base-checker | bdcheckerapp/autograding/zaj5/unit5/task3.py | Python | gpl-3.0 | 1,459 |
#! /usr/bin/python3
import os
import requests
canary = 'mwtask111'
serverlist = ['mw101', 'mw102', 'mw111', 'mw112', 'mw121', 'mw122']
def check_up(server: str) -> bool:
headers = {'X-Miraheze-Debug': f'{server}.miraheze.org'}
req = requests.get('https://meta.miraheze.org/w/api.php?action=query&meta=siteinfo&formatversion=2&format=json', headers=headers)
if req.status_code == 200 and 'miraheze' in req.text and server in req.headers['X-Served-By']:
return True
return False
def check_ro(server: str) -> bool:
headers = {'X-Miraheze-Debug': f'{server}.miraheze.org'}
req = requests.get('https://meta.miraheze.org/w/api.php?action=query&meta=siteinfo&formatversion=2&format=json', headers=headers)
response = req.json()
if response['query']['general']['readonly']:
return True
return False
print('Welcome to the MediaWiki Upgrade tool!')
input('Please confirm you are running this script on the canary server: (press enter)')
input('MediaWiki -> RO - Running puppet to sync config')
os.system('sudo puppet agent -tv')
print('Config deployed')
print('Checking RO on Canary Server')
if not check_ro(canary):
input('Stopping deploy - RO check failed - Press enter to resume')
for server in serverlist:
print(f'Confirming RO on {server}')
if not check_ro(server):
input(f'RO check failed on {server} - Press enter to resume')
print('Starting staging update')
input('Press enter when branch updated in puppet: ')
os.system('sudo -u www-data rm -rf /srv/mediawiki-staging/w')
os.system('sudo puppet agent -tv')
print('Will now check mediawiki branch')
os.system('git -C /srv/mediawiki-staging/w rev-parse --abbrev-ref HEAD')
input('Confirm: ')
print('Will now deploy to canary server')
os.system('deploy-mediawiki --world --l10n --force --ignore-time --servers=skip')
if check_up(canary) and check_ro(canary):
print('Canary deploy done')
else:
print('Canary is not online')
input('Press enter to rollout: ')
for server in serverlist:
print(f'Will now deploy to {server}')
os.system(f'deploy-mediawiki --world --l10n --force --ignore-time --servers={server}')
if check_up(server) and check_ro(server):
print(f'{server} deploy done')
else:
input(f'{server} is not online - Proceed? ')
print('Deployment done')
input('Please merge RW change and press enter: ')
print('Running puppet')
os.system('sudo puppet agent -tv')
print('Deployment done')
| miraheze/puppet | modules/mediawiki/files/bin/mwupgradetool.py | Python | gpl-3.0 | 2,456 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Hou Shaohui
#
# Author: Hou Shaohui <[email protected]>
# Maintainer: Hou Shaohui <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gettext
import os
def get_parent_dir(filepath, level=1):
'''Get parent dir.'''
parent_dir = os.path.realpath(filepath)
while(level > 0):
parent_dir = os.path.dirname(parent_dir)
level -= 1
return parent_dir
LOCALE_DIR=os.path.join(get_parent_dir(__file__, 2), "locale")
if not os.path.exists(LOCALE_DIR):
LOCALE_DIR="/usr/share/locale"
_ = None
try:
_ = gettext.translation("deepin-music-player", LOCALE_DIR).gettext
except Exception, e:
_ = lambda i : i
| hillwoodroc/deepin-music-player | src/nls.py | Python | gpl-3.0 | 1,379 |
#!/usr/bin/env python
"""Module for calling Artist related last.fm web services API methods"""
__author__ = "Abhinav Sarkar <[email protected]>"
__version__ = "0.2"
__license__ = "GNU Lesser General Public License"
__package__ = "lastfm"
from lastfm.base import LastfmBase
from lastfm.mixin import mixin
from lastfm.decorators import cached_property, top_property
@mixin("crawlable", "shoutable", "sharable",
"taggable", "searchable", "cacheable", "property_adder")
class Artist(LastfmBase):
"""A class representing an artist."""
class Meta(object):
properties = ["name", "similar", "top_tags"]
fillable_properties = ["mbid", "url", "image",
"streamable", "stats", "bio"]
def init(self, api, subject = None, **kwargs):
"""
Create an Artist object by providing all the data related to it.
@param api: an instance of L{Api}
@type api: L{Api}
@param name: the artist name
@type name: L{str}
@param mbid: MBID of the artist
@type mbid: L{str}
@param url: URL of the artist on last.fm
@type url: L{str}
@param image: the images of the artist in various sizes
@type image: L{dict}
@param streamable: flag indicating if the artist is streamable from last.fm
@type streamable: L{bool}
@param stats: the artist statistics
@type stats: L{Stats}
@param similar: artists similar to the provided artist
@type similar: L{list} of L{Artist}
@param top_tags: top tags for the artist
@type top_tags: L{list} of L{Tag}
@param bio: biography of the artist
@type bio: L{Wiki}
@param subject: the subject to which this instance belongs to
@type subject: L{User} OR L{Artist} OR L{Tag} OR L{Track} OR L{WeeklyChart}
@raise InvalidParametersError: If an instance of L{Api} is not provided as the first
parameter then an Exception is raised.
"""
if not isinstance(api, Api):
raise InvalidParametersError("api reference must be supplied as an argument")
self._api = api
super(Artist, self).init(**kwargs)
self._stats = hasattr(self, "_stats") and Stats(
subject = self,
listeners = self._stats.listeners,
playcount = self._stats.playcount,
weight = self._stats.weight,
match = self._stats.match,
rank = self._stats.rank
) or None
self._bio = hasattr(self, "_bio") and Wiki(
subject = self,
published = self._bio.published,
summary = self._bio.summary,
content = self._bio.content
) or None
self._subject = subject
def get_similar(self, limit = None):
"""
Get the artists similar to this artist.
@param limit: the number of artists returned (optional)
@type limit: L{int}
@return: artists similar to this artist
@rtype: L{list} of L{Artist}
"""
params = self._default_params({'method': 'artist.getSimilar'})
if limit is not None:
params.update({'limit': limit})
data = self._api._fetch_data(params).find('similarartists')
self._similar = [
Artist(
self._api,
subject = self,
name = a.findtext('name'),
mbid = a.findtext('mbid'),
stats = Stats(
subject = a.findtext('name'),
match = float(a.findtext('match')),
),
url = 'http://' + a.findtext('url'),
image = {'large': a.findtext('image')}
)
for a in data.findall('artist')
]
return self._similar[:]
@property
def similar(self):
"""
artists similar to this artist
@rtype: L{list} of L{Artist}
"""
if not hasattr(self, "_similar") or self._similar is None or len(self._similar) < 6:
return self.get_similar()
return self._similar[:]
@top_property("similar")
def most_similar(self):
"""
artist most similar to this artist
@rtype: L{Artist}
"""
pass
@property
def top_tags(self):
"""
top tags for the artist
@rtype: L{list} of L{Tag}
"""
if not hasattr(self, "_top_tags") or self._top_tags is None or len(self._top_tags) < 6:
params = self._default_params({'method': 'artist.getTopTags'})
data = self._api._fetch_data(params).find('toptags')
self._top_tags = [
Tag(
self._api,
subject = self,
name = t.findtext('name'),
url = t.findtext('url')
)
for t in data.findall('tag')
]
return self._top_tags[:]
@top_property("top_tags")
def top_tag(self):
"""
top tag for the artist
@rtype: L{Tag}
"""
pass
@cached_property
def events(self):
"""
events for the artist
@rtype: L{lazylist} of L{Event}
"""
params = self._default_params({'method': 'artist.getEvents'})
data = self._api._fetch_data(params).find('events')
return [
Event.create_from_data(self._api, e)
for e in data.findall('event')
]
@cached_property
def top_albums(self):
"""
top albums of the artist
@rtype: L{list} of L{Album}
"""
params = self._default_params({'method': 'artist.getTopAlbums'})
data = self._api._fetch_data(params).find('topalbums')
return [
Album(
self._api,
subject = self,
name = a.findtext('name'),
artist = self,
mbid = a.findtext('mbid'),
url = a.findtext('url'),
image = dict([(i.get('size'), i.text) for i in a.findall('image')]),
stats = Stats(
subject = a.findtext('name'),
playcount = int(a.findtext('playcount')),
rank = int(a.attrib['rank'])
)
)
for a in data.findall('album')
]
@top_property("top_albums")
def top_album(self):
"""
top album of the artist
@rtype: L{Album}
"""
pass
@cached_property
def top_fans(self):
"""
top fans of the artist
@rtype: L{list} of L{User}
"""
params = self._default_params({'method': 'artist.getTopFans'})
data = self._api._fetch_data(params).find('topfans')
return [
User(
self._api,
subject = self,
name = u.findtext('name'),
url = u.findtext('url'),
image = dict([(i.get('size'), i.text) for i in u.findall('image')]),
stats = Stats(
subject = u.findtext('name'),
weight = int(u.findtext('weight'))
)
)
for u in data.findall('user')
]
@top_property("top_fans")
def top_fan(self):
"""
top fan of the artist
@rtype: L{User}"""
pass
@cached_property
def top_tracks(self):
"""
top tracks of the artist
@rtype: L{list} of L{Track}
"""
params = self._default_params({'method': 'artist.getTopTracks'})
data = self._api._fetch_data(params).find('toptracks')
return [
Track(
self._api,
subject = self,
name = t.findtext('name'),
artist = self,
mbid = t.findtext('mbid'),
stats = Stats(
subject = t.findtext('name'),
playcount = int(t.findtext('playcount')),
rank = int(t.attrib['rank'])
),
streamable = (t.findtext('streamable') == '1'),
full_track = (t.find('streamable').attrib['fulltrack'] == '1'),
image = dict([(i.get('size'), i.text) for i in t.findall('image')]),
)
for t in data.findall('track')
]
@top_property("top_tracks")
def top_track(self):
"""
topmost track of the artist
@rtype: L{Track}
"""
pass
@staticmethod
def get_info(api, artist = None, mbid = None):
"""
Get the data for the artist.
@param api: an instance of L{Api}
@type api: L{Api}
@param artist: the name of the artist
@type artist: L{str}
@param mbid: MBID of the artist
@type mbid: L{str}
@return: an Artist object corresponding the provided artist name
@rtype: L{Artist}
@raise lastfm.InvalidParametersError: Either artist or mbid parameter has to
be provided. Otherwise exception is raised.
@note: Use the L{Api.get_artist} method instead of using this method directly.
"""
data = Artist._fetch_data(api, artist, mbid)
a = Artist(api, name = data.findtext('name'))
a._fill_info()
return a
@staticmethod
def _get_all(seed_artist):
return (seed_artist, ['name'],
lambda api, hsh: Artist(api, **hsh).similar)
def _default_params(self, extra_params = None):
if not self.name:
raise InvalidParametersError("artist has to be provided.")
params = {'artist': self.name}
if extra_params is not None:
params.update(extra_params)
return params
@staticmethod
def _fetch_data(api,
artist = None,
mbid = None):
params = {'method': 'artist.getInfo'}
if not (artist or mbid):
raise InvalidParametersError("either artist or mbid has to be given as argument.")
if artist:
params.update({'artist': artist})
elif mbid:
params.update({'mbid': mbid})
return api._fetch_data(params).find('artist')
def _fill_info(self):
data = Artist._fetch_data(self._api, self.name)
self._name = data.findtext('name')
self._mbid = data.findtext('mbid')
self._url = data.findtext('url')
self._image = dict([(i.get('size'), i.text) for i in data.findall('image')])
self._streamable = (data.findtext('streamable') == 1)
if not self._stats:
self._stats = Stats(
subject = self,
listeners = int(data.findtext('stats/listeners')),
playcount = int(data.findtext('stats/playcount'))
)
# self._similar = [
# Artist(
# self._api,
# subject = self,
# name = a.findtext('name'),
# url = a.findtext('url'),
# image = dict([(i.get('size'), i.text) for i in a.findall('image')])
# )
# for a in data.findall('similar/artist')
# ]
self._top_tags = [
Tag(
self._api,
subject = self,
name = t.findtext('name'),
url = t.findtext('url')
)
for t in data.findall('tags/tag')
]
self._bio = Wiki(
self,
published = data.findtext('bio/published').strip() and
datetime(*(time.strptime(
data.findtext('bio/published').strip(),
'%a, %d %b %Y %H:%M:%S +0000'
)[0:6])),
summary = data.findtext('bio/summary'),
content = data.findtext('bio/content')
)
@staticmethod
def _search_yield_func(api, artist):
return Artist(
api,
name = artist.findtext('name'),
mbid = artist.findtext('mbid'),
url = artist.findtext('url'),
image = dict([(i.get('size'), i.text) for i in artist.findall('image')]),
streamable = (artist.findtext('streamable') == '1'),
)
@staticmethod
def _hash_func(*args, **kwds):
try:
return hash(kwds['name'].lower())
except KeyError:
try:
return hash(args[1].lower())
except IndexError:
raise InvalidParametersError("name has to be provided for hashing")
def __hash__(self):
return self.__class__._hash_func(name = self.name)
def __eq__(self, other):
if self.mbid and other.mbid:
return self.mbid == other.mbid
if self.url and other.url:
return self.url == other.url
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<lastfm.Artist: %s>" % self._name
from datetime import datetime
import time
from lastfm.album import Album
from lastfm.api import Api
from lastfm.error import InvalidParametersError
from lastfm.event import Event
from lastfm.stats import Stats
from lastfm.tag import Tag
from lastfm.track import Track
from lastfm.user import User
from lastfm.wiki import Wiki
| scaidermern/topTracks2playlist | lastfm/artist.py | Python | gpl-3.0 | 15,646 |
__author__ = 'shahbaz'
# ###############################################################################
# Utility functions #
# ###############################################################################
import sys
from functools import wraps
from logging import StreamHandler
from bitstring import BitArray
def singleton(f):
"""
:param f:
:return:
"""
return f()
def cached(f):
"""
:param f:
:return:
"""
@wraps(f)
def wrapper(*args):
"""
:param args:
:return:
"""
try:
return wrapper.cache[args]
except KeyError:
wrapper.cache[args] = v = f(*args)
return v
wrapper.cache = {}
return wrapper
class frozendict(object):
__slots__ = ["_dict", "_cached_hash"]
def __init__(self, new_dict=None, **kwargs):
"""
:param new_dict:
:param kwargs:
:return:
"""
self._dict = dict()
if new_dict is not None:
self._dict.update(new_dict)
self._dict.update(kwargs)
def update(self, new_dict=None, **kwargs):
"""
:param new_dict:
:param kwargs:
:return:
"""
d = self._dict.copy()
if new_dict is not None:
d.update(new_dict)
d.update(kwargs)
return self.__class__(d)
def remove(self, ks):
"""
:param ks:
:return:
"""
d = self._dict.copy()
for k in ks:
if k in d:
del d[k]
return self.__class__(d)
def pop(self, *ks):
"""
:param ks:
:return:
"""
result = []
for k in ks:
result.append(self[k])
result.append(self.remove(*ks))
return result
def __repr__(self):
"""
:return:
"""
return repr(self._dict)
def __iter__(self):
"""
:return:
"""
return iter(self._dict)
def __contains__(self, key):
"""
:param key:
:return:
"""
return key in self._dict
def keys(self):
"""
:return:
"""
return self._dict.keys()
def values(self):
"""
:return:
"""
return self._dict.values()
def items(self):
"""
:return:
"""
return self._dict.items()
def iterkeys(self):
"""
:return:
"""
return self._dict.iterkeys()
def itervalues(self):
"""
:return:
"""
return self._dict.itervalues()
def iteritems(self):
"""
:return:
"""
return self._dict.iteritems()
def get(self, key, default=None):
"""
:param key:
:param default:
:return:
"""
return self._dict.get(key, default)
def __getitem__(self, item):
"""
:param item:
:return:
"""
return self._dict[item]
def __hash__(self):
"""
:return:
"""
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(frozenset(self._dict.items()))
return h
def __eq__(self, other):
"""
:param other:
:return:
"""
return self._dict == other._dict
def __ne__(self, other):
"""
:param other:
:return:
"""
return self._dict != other._dict
def __len__(self):
"""
:return:
"""
return len(self._dict)
def indent_str(s, indent=4):
"""
:param s:
:param indent:
:return:
"""
return "\n".join(indent * " " + i for i in s.splitlines())
def repr_plus(ss, indent=4, sep="\n", prefix=""):
"""
:param ss:
:param indent:
:param sep:
:param prefix:
:return:
"""
if isinstance(ss, basestring):
ss = [ss]
return indent_str(sep.join(prefix + repr(s) for s in ss), indent)
class LockStreamHandler(StreamHandler):
'''Relies on a multiprocessing.Lock to serialize multiprocess writes to a
stream.'''
def __init__(self, lock, stream=sys.stderr):
"""
:param lock:
:param stream:
:return:
"""
self.lock = lock
super(MultiprocessStreamHandler, self).__init__(stream)
def emit(self, record):
"""
Acquire the lock before emitting the record.
:param record:
:return:
"""
self.lock.acquire()
super(LockStreamHandler, self).emit(record)
self.lock.release()
class QueueStreamHandler(StreamHandler):
"""
Relies on a multiprocessing.Lock to serialize multiprocess writes to a
stream.
"""
def __init__(self, queue, stream=sys.stderr):
"""
:param queue:
:param stream:
:return:
"""
self.queue = queue
super(QueueStreamHandler, self).__init__(stream)
def emit(self, record):
"""
Acquire the lock before emitting the record.
:param record:
:return:
"""
self.queue.put(record)
def get_bitarray(packet, fields):
"""
:param packet:
:param fields:
:return:
"""
o = 0
a = BitArray()
for h in fields:
l = packet[h]['length']
a[o:(o + l)] = packet[h]['value']
o += l
return a | NetASM/PyDatapath | pydatapath/utils/__init__.py | Python | gpl-3.0 | 5,607 |
# Irish Dictionary GUI app
# saved as qt_gui.py
# Last edit by Davis Sandefur 15.07.2015
import sys
import os
from PyQt5 import QtCore, QtWidgets, QtGui, QtMultimedia
from PyQt5 import QtNetwork
from irish_dictionary import irish_dictionary, gaeilge_gaeilge
from audio_grabber import entry_search, related_matches
class Text(QtWidgets.QWidget):
""" This class creates the text widget"""
def __init__(self, parent=None):
super().__init__(parent)
self.text_entry = QtWidgets.QTextEdit(parent)
self.text_entry.setReadOnly(True)
class IrishLabel(QtWidgets.QWidget):
def __init__(self, parent=None):
""" This class creates the Irish language label, entry box, and version switcher """
super().__init__(parent)
self.irish_label = QtWidgets.QLabel("Cuir d'fhocal anseo:")
self.irish_entry = QtWidgets.QLineEdit()
self.english_language_button = QtWidgets.QPushButton("English Version")
self.english_language_button.clicked.connect(lambda: self.irish_to_english())
@staticmethod
def irish_to_english():
""" This method converts the Irish language version to English """
irish_version.hide()
english_version.show()
irish_version.layout().removeWidget(irish_version.text_entry)
english_version.layout().addWidget(english_version.text_entry, 3, 0, 24, 8)
english_version.resize(200, 400)
english_version.center()
class IrishButtons(IrishLabel):
""" this class creates the Irish language buttons"""
def __init__(self, parent=None):
super().__init__(parent)
# Set buttons and enabled status
self.bearla_button = QtWidgets.QPushButton("Béarla")
self.gaeilge_button = QtWidgets.QPushButton("Gaeilge")
self.connacht_button = QtWidgets.QPushButton("Cúige Chonnacht")
self.ulster_button = QtWidgets.QPushButton("Cúige Uladh")
self.munster_button = QtWidgets.QPushButton("Cúige Mhumhan")
self.connacht_button.setEnabled(False)
self.ulster_button.setEnabled(False)
self.munster_button.setEnabled(False)
# Set callbacks
self.bearla_button.clicked.connect(lambda: self.audio_check('English'))
self.gaeilge_button.clicked.connect(lambda: self.audio_check('Irish'))
self.munster_button.clicked.connect(lambda: self.play_audio('Munster'))
self.connacht_button.clicked.connect(lambda: self.play_audio('Connacht'))
self.ulster_button.clicked.connect(lambda: self.play_audio('Ulster'))
def audio_check(self, language):
audio = self.callback(language)
if audio:
self.ulster_button.setEnabled(True)
self.connacht_button.setEnabled(True)
self.munster_button.setEnabled(True)
if not audio:
self.ulster_button.setEnabled(False)
self.connacht_button.setEnabled(False)
self.munster_button.setEnabled(False)
def callback(self, language):
""" Irish version search """
entry = str(self.irish_entry.text()).lower()
entries, suggestions, wordlist, grammatical = irish_dictionary(entry, language, 'gaeilge')
entries2 = None
if language == 'Irish':
entries2 = gaeilge_gaeilge(entry)
audio_exists = entry_search(entry)
if audio_exists:
related = related_matches(entry)
else:
related = 'Níl aon rud ann'
if grammatical is not None:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(grammatical + '\n\n')
for i in entries:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(i + '\n\n')
self.text_entry.moveCursor(QtGui.QTextCursor.End)
if entries2:
self.text_entry.insertPlainText("As Gaeilge:\n\n")
for i in entries2:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(i + '\n\n')
self.text_entry.insertPlainText(suggestions + "\n\nNa focail is déanaí: " + str(wordlist) +
"\n\n" + '(Fuaim) Torthaí gaolmhara:' + str(related) + '\n\n')
self.text_entry.moveCursor(QtGui.QTextCursor.End)
return audio_exists
@staticmethod
def play_audio(dialect):
appdata = os.getenv('APPDATA')
file_names = {'Munster': 'CanM.mp3', 'Connacht': 'CanC.mp3', 'Ulster': 'CanU.mp3'}
if appdata:
url = QtCore.QUrl.fromLocalFile(os.path.abspath(os.path.join(appdata, file_names[dialect])))
else:
url = QtCore.QUrl.fromLocalFile(os.path.abspath(os.path.join("./", file_names[dialect])))
content = QtMultimedia.QMediaContent(url)
player = QtMultimedia.QMediaPlayer()
player.setMedia(content)
player.play()
player.stateChanged.connect(lambda: player.disconnect())
class IrishVersion(IrishButtons, Text):
""" This class brings together all the Irish version widgets and
lays them out in the correct order. Also controls window title and maximize button
"""
def __init__(self, parent=None):
super().__init__(parent)
grid = QtWidgets.QGridLayout()
grid.setSpacing(5)
grid.addWidget(self.irish_label, 0, 0)
grid.addWidget(self.irish_entry, 0, 1, 1, 4)
grid.addWidget(self.english_language_button, 0, 6)
grid.addWidget(self.bearla_button, 1, 2)
grid.addWidget(self.gaeilge_button, 1, 4)
grid.addWidget(self.ulster_button, 2, 2)
grid.addWidget(self.connacht_button, 2, 3)
grid.addWidget(self.munster_button, 2, 4)
self.setLayout(grid)
self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)
self.setWindowTitle("Foclóir")
self.resize(200, 400)
def center(self):
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
class EnglishLabel(QtWidgets.QWidget):
""" This class Creates English labels"""
def __init__(self, parent=None):
super().__init__(parent)
self.english_label = QtWidgets.QLabel("Enter your word here:")
self.english_entry = QtWidgets.QLineEdit()
self.irish_language_button = QtWidgets.QPushButton("Leagan Gaeilge")
self.irish_language_button.clicked.connect(lambda: self.english_to_irish())
@staticmethod
def english_to_irish():
""" This method converts the English language version to Irish"""
english_version.hide()
global irish_version
irish_version = IrishVersion()
irish_version.show()
english_version.layout().removeWidget(english_version.text_entry)
irish_version.layout().addWidget(irish_version.text_entry, 3, 0, 24, 8)
irish_version.resize(200, 400)
irish_version.center()
class EnglishButtons(EnglishLabel):
""" This class creates the English version buttons"""
def __init__(self, parent=None):
super().__init__(parent)
# Define buttons
self.english_button = QtWidgets.QPushButton("English")
self.irish_button = QtWidgets.QPushButton("Irish")
self.audio = False # Initial audio setting
self.ulster_button = QtWidgets.QPushButton("Ulster Dialect")
self.connacht_button = QtWidgets.QPushButton("Connacht Dialect")
self.munster_button = QtWidgets.QPushButton("Munster Dialect")
# Define Callback procedures
self.english_button.clicked.connect(lambda: self.audio_check("English"))
self.irish_button.clicked.connect(lambda: self.audio_check('Irish'))
self.munster_button.clicked.connect(lambda: self.play_audio('Munster'))
self.connacht_button.clicked.connect(lambda: self.play_audio('Connacht'))
self.ulster_button.clicked.connect(lambda: self.play_audio('Ulster'))
# Initial disabling of audio buttons
self.ulster_button.setEnabled(False)
self.munster_button.setEnabled(False)
self.connacht_button.setEnabled(False)
def audio_check(self, language):
""" Runs callback which prints all entries, suggestions, grammatical forms, etc. Callback also determines if
an audio recording exists for the word in <language>. If it doesn't, it disables audio buttons. If audio exists,
it enables buttons.
"""
self.audio = self.callback(language)
if self.audio:
self.ulster_button.setEnabled(True)
self.connacht_button.setEnabled(True)
self.munster_button.setEnabled(True)
if not self.audio:
self.ulster_button.setEnabled(False)
self.connacht_button.setEnabled(False)
self.munster_button.setEnabled(False)
def callback(self, language):
""" Callback function that prints entries, suggestions, etc. and returns a boolean for whether the word(s)
contain(s) audio."""
entry = str(self.english_entry.text()).lower()
entries, suggestions, wordlist, grammatical = irish_dictionary(entry, language, 'english')
entries2 = None
if language == 'Irish':
entries2 = gaeilge_gaeilge(entry)
audio_exists = entry_search(entry)
if audio_exists:
related = related_matches(entry)
else:
related = 'None'
if grammatical is not None:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(grammatical + '\n\n')
for i in entries:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(i + '\n\n')
if entries2:
self.text_entry.insertPlainText("In Irish:\n\n")
for i in entries2:
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(i + '\n\n')
self.text_entry.moveCursor(QtGui.QTextCursor.End)
self.text_entry.insertPlainText(suggestions + "\n\nRecently used words: " + str(wordlist) +
"\n\n" + 'Related Audio Matches: ' + str(related) + '\n\n')
self.text_entry.moveCursor(QtGui.QTextCursor.End)
return audio_exists
@staticmethod
def play_audio(dialect):
appdata = os.getenv('APPDATA')
file_names = {'Munster': 'CanM.mp3', 'Connacht': 'CanC.mp3', 'Ulster': 'CanU.mp3'}
if appdata:
url = QtCore.QUrl.fromLocalFile(os.path.abspath(os.path.join(appdata, file_names[dialect])))
else:
url = QtCore.QUrl.fromLocalFile(os.path.abspath(os.path.join("./", file_names[dialect])))
content = QtMultimedia.QMediaContent(url)
player = QtMultimedia.QMediaPlayer()
player.setMedia(content)
player.play()
player.stateChanged.connect(lambda: player.disconnect())
class EnglishVersion(EnglishButtons, Text):
""" This class brings together all the English version widgets and lays them out in the correct
order. Also controls the English version window title and disables the maximize button
"""
def __init__(self, parent=None):
super().__init__(parent)
grid = QtWidgets.QGridLayout()
grid.setSpacing(5)
grid.addWidget(self.english_label, 0, 0)
grid.addWidget(self.english_entry, 0, 1, 1, 4)
grid.addWidget(self.irish_language_button, 0, 6)
grid.addWidget(self.english_button, 1, 2)
grid.addWidget(self.irish_button, 1, 4)
grid.addWidget(self.ulster_button, 2, 2)
grid.addWidget(self.connacht_button, 2, 3)
grid.addWidget(self.munster_button, 2, 4)
grid.addWidget(self.text_entry, 3, 0, 24, 8)
self.setLayout(grid)
self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
self.setWindowFlags(QtCore.Qt.WindowCloseButtonHint)
self.setWindowTitle("teanglann.ie Searcher")
self.resize(200, 400)
def center(self):
qr = self.frameGeometry()
cp = QtWidgets.QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def main():
app = QtWidgets.QApplication(sys.argv)
global english_version
english_version = EnglishVersion()
english_version.show()
english_version.center()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| davissandefur/Irish-Dictionary-with-GUI | Irish-Dictionary-GUI/qt_gui.py | Python | gpl-3.0 | 12,607 |
# -*- coding: UTF-8 -*-
from HowOldWebsite.estimators.estimator_sex import EstimatorSex
from HowOldWebsite.models import RecordSex
__author__ = 'Hao Yu'
def sex_estimate(database_face_array, feature_jar):
success = False
database_record = None
try:
n_faces = len(database_face_array)
result_estimated = __do_estimate(feature_jar, n_faces)
database_record = \
__do_save_to_database(database_face_array, result_estimated)
success = True
except Exception as e:
# print(e)
pass
return success, database_record
def __do_estimate(feature_jar, n_faces):
feature = EstimatorSex.feature_combine(feature_jar)
feature = EstimatorSex.feature_reduce(feature)
result = EstimatorSex.estimate(feature)
return result
def __do_save_to_database(database_face, sex):
database_record = []
for ith in range(len(database_face)):
record = RecordSex(original_face=database_face[ith],
value_predict=sex[ith])
database_record.append(record)
return database_record
| jinyu121/HowOldAreYou | HowOldWebsite/process/process_estimate_sex.py | Python | gpl-3.0 | 1,099 |
from FaustBot.Communication import Connection
from FaustBot.Model.RemoteUser import RemoteUser
from FaustBot.Modules.MagicNumberObserverPrototype import MagicNumberObserverPrototype
from FaustBot.Modules.ModuleType import ModuleType
from FaustBot.Modules.PingObserverPrototype import PingObserverPrototype
from FaustBot.Modules.UserList import UserList
class WhoObserver(MagicNumberObserverPrototype, PingObserverPrototype):
@staticmethod
def cmd():
return None
@staticmethod
def help():
return None
def __init__(self, user_list: UserList):
super().__init__()
self.user_list = user_list
self.pings_seen = 1
self.pending_whos = []
@staticmethod
def get_module_types():
return [ModuleType.ON_MAGIC_NUMBER, ModuleType.ON_PING]
def update_on_magic_number(self, data, connection):
if data['number'] == '352': # RPL_WHOREPLY
self.input_who(data, connection)
elif data['number'] == '315': # RPL_ENDOFWHO
self.end_who()
def input_who(self, data, connection: Connection):
# target #channel user host server nick status :0 gecos
target, channel, user, host, server, nick, *ign = data['arguments'].split(' ')
self.pending_whos.append(RemoteUser(nick, user, host))
def end_who(self):
self.user_list.clear_list()
for remuser in self.pending_whos:
self.user_list.add_user(remuser)
self.pending_whos = []
def update_on_ping(self, data, connection: Connection):
if self.pings_seen % 90 == 0: # 90 * 2 min = 3 Stunden
connection.raw_send('WHO ' + connection.details.get_channel())
self.pings_seen += 1
| SophieBartmann/Faust-Bot | FaustBot/Modules/WhoObserver.py | Python | gpl-3.0 | 1,729 |
__all__ = ['pleth_analysis', 'ekg_analysis'] | drcgw/bass | modules/__init__.py | Python | gpl-3.0 | 44 |
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from nose.tools import assert_raises
from horton import *
def test_typecheck():
m = IOData(coordinates=np.array([[1, 2, 3], [2, 3, 1]]))
assert issubclass(m.coordinates.dtype.type, float)
assert not hasattr(m, 'numbers')
m = IOData(numbers=np.array([2, 3]), coordinates=np.array([[1, 2, 3], [2, 3, 1]]))
m = IOData(numbers=np.array([2.0, 3.0]), pseudo_numbers=np.array([1, 1]), coordinates=np.array([[1, 2, 3], [2, 3, 1]]))
assert issubclass(m.numbers.dtype.type, int)
assert issubclass(m.pseudo_numbers.dtype.type, float)
assert hasattr(m, 'numbers')
del m.numbers
assert not hasattr(m, 'numbers')
m = IOData(cube_data=np.array([[[1, 2], [2, 3], [3, 2]]]), coordinates=np.array([[1, 2, 3]]))
with assert_raises(TypeError):
IOData(coordinates=np.array([[1, 2], [2, 3]]))
with assert_raises(TypeError):
IOData(numbers=np.array([[1, 2], [2, 3]]))
with assert_raises(TypeError):
IOData(numbers=np.array([2, 3]), pseudo_numbers=np.array([1]))
with assert_raises(TypeError):
IOData(numbers=np.array([2, 3]), coordinates=np.array([[1, 2, 3]]))
with assert_raises(TypeError):
IOData(cube_data=np.array([[1, 2], [2, 3], [3, 2]]), coordinates=np.array([[1, 2, 3]]))
with assert_raises(TypeError):
IOData(cube_data=np.array([1, 2]))
def test_copy():
fn_fchk = context.get_fn('test/water_sto3g_hf_g03.fchk')
fn_log = context.get_fn('test/water_sto3g_hf_g03.log')
mol1 = IOData.from_file(fn_fchk, fn_log)
mol2 = mol1.copy()
assert mol1 != mol2
vars1 = vars(mol1)
vars2 = vars(mol2)
assert len(vars1) == len(vars2)
for key1, value1 in vars1.iteritems():
assert value1 is vars2[key1]
def test_dm_water_sto3g_hf():
fn_fchk = context.get_fn('test/water_sto3g_hf_g03.fchk')
mol = IOData.from_file(fn_fchk)
dm = mol.get_dm_full()
assert abs(dm.get_element(0, 0) - 2.10503807) < 1e-7
assert abs(dm.get_element(0, 1) - -0.439115917) < 1e-7
assert abs(dm.get_element(1, 1) - 1.93312061) < 1e-7
def test_dm_lih_sto3g_hf():
fn_fchk = context.get_fn('test/li_h_3-21G_hf_g09.fchk')
mol = IOData.from_file(fn_fchk)
dm = mol.get_dm_full()
assert abs(dm.get_element(0, 0) - 1.96589709) < 1e-7
assert abs(dm.get_element(0, 1) - 0.122114249) < 1e-7
assert abs(dm.get_element(1, 1) - 0.0133112081) < 1e-7
assert abs(dm.get_element(10, 10) - 4.23924688E-01) < 1e-7
dm = mol.get_dm_spin()
assert abs(dm.get_element(0, 0) - 1.40210760E-03) < 1e-9
assert abs(dm.get_element(0, 1) - -2.65370873E-03) < 1e-9
assert abs(dm.get_element(1, 1) - 5.38701212E-03) < 1e-9
assert abs(dm.get_element(10, 10) - 4.23889148E-01) < 1e-7
def test_dm_ch3_rohf_g03():
fn_fchk = context.get_fn('test/ch3_rohf_sto3g_g03.fchk')
mol = IOData.from_file(fn_fchk)
olp = mol.obasis.compute_overlap(mol.lf)
dm = mol.get_dm_full()
assert abs(olp.contract_two('ab,ab', dm) - 9) < 1e-6
dm = mol.get_dm_spin()
assert abs(olp.contract_two('ab,ab', dm) - 1) < 1e-6
| eustislab/horton | horton/io/test/test_molecule.py | Python | gpl-3.0 | 3,917 |
from pyrocko import pz, io, trace
from pyrocko.example import get_example_data
# Download example data
get_example_data('STS2-Generic.polezero.txt')
get_example_data('test.mseed')
# read poles and zeros from SAC format pole-zero file
zeros, poles, constant = pz.read_sac_zpk('STS2-Generic.polezero.txt')
# one more zero to convert from velocity->counts to displacement->counts
zeros.append(0.0j)
rest_sts2 = trace.PoleZeroResponse(
zeros=zeros,
poles=poles,
constant=constant)
traces = io.load('test.mseed')
out_traces = list(traces)
for tr in traces:
displacement = tr.transfer(
1000., # rise and fall of time window taper in [s]
(0.001, 0.002, 5., 10.), # frequency domain taper in [Hz]
transfer_function=rest_sts2,
invert=True) # to change to (counts->displacement)
# change channel id, so we can distinguish the traces in a trace viewer.
displacement.set_codes(channel='D'+tr.channel[-1])
out_traces.append(displacement)
io.save(out_traces, 'displacement.mseed')
| pyrocko/pyrocko | examples/trace_restitution_pz.py | Python | gpl-3.0 | 1,069 |
# $Id$
# installer for pmon
# Copyright 2014 Matthew Wall
from setup import ExtensionInstaller
def loader():
return ProcessMonitorInstaller()
class ProcessMonitorInstaller(ExtensionInstaller):
def __init__(self):
super(ProcessMonitorInstaller, self).__init__(
version="0.2",
name='pmon',
description='Collect and display process memory usage.',
author="Matthew Wall",
author_email="[email protected]",
process_services='user.pmon.ProcessMonitor',
config={
'ProcessMonitor': {
'data_binding': 'pmon_binding',
'process': 'weewxd'},
'DataBindings': {
'pmon_binding': {
'database': 'pmon_sqlite',
'table_name': 'archive',
'manager': 'weewx.manager.DaySummaryManager',
'schema': 'user.pmon.schema'}},
'Databases': {
'pmon_sqlite': {
'database_name': 'pmon.sdb',
'driver': 'weedb.sqlite'}},
'StdReport': {
'pmon': {
'skin': 'pmon',
'HTML_ROOT': 'pmon'}}},
files=[('bin/user', ['bin/user/pmon.py']),
('skins/pmon', ['skins/pmon/skin.conf',
'skins/pmon/index.html.tmpl'])]
)
| sai9/weewx-gitsvn | extensions/pmon/install.py | Python | gpl-3.0 | 1,514 |
from distutils.core import setup
import py2exe
import os, sys
from glob import glob
import PyQt5
data_files=[('',['C:/Python34/DLLs/sqlite3.dll','C:/Python34/Lib/site-packages/PyQt5/icuuc53.dll','C:/Python34/Lib/site-packages/PyQt5/icudt53.dll','C:/Python34/Lib/site-packages/PyQt5/icuin53.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Gui.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Core.dll','C:/Python34/Lib/site-packages/PyQt5/Qt5Widgets.dll']),
('data',['data/configure','data/model.sqlite','data/loading.jpg']),
('platforms',['C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qminimal.dll','C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qoffscreen.dll','C:/Python34/Lib/site-packages/PyQt5/plugins/platforms/qwindows.dll'])
]
qt_platform_plugins = [("platforms", glob(PyQt5.__path__[0] + r'\plugins\platforms\*.*'))]
data_files.extend(qt_platform_plugins)
msvc_dlls = [('.', glob(r'''C:/Windows/System32/msvc?100.dll'''))]
data_files.extend(msvc_dlls)
setup(
windows = ["ChemDB.py"],
zipfile = None,
data_files = data_files,
options = {
'py2exe': {
'includes' : ['sip','PyQt5.QtCore','PyQt5.QtGui',"sqlite3",'xlrd','xlwt',"_sqlite3","PyQt5"],
}
},
) | dedichan/ChemDB | setup_win.py | Python | gpl-3.0 | 1,223 |
from django.core.cache import cache
from django.conf import settings
from django.template.loader import render_to_string
from tendenci.apps.navs.cache import NAV_PRE_KEY
def cache_nav(nav, show_title=False):
"""
Caches a nav's rendered html code
"""
keys = [settings.CACHE_PRE_KEY, NAV_PRE_KEY, str(nav.id)]
key = '.'.join(keys)
value = render_to_string("navs/render_nav.html", {'nav':nav, "show_title": show_title})
cache.set(key, value, 432000) #5 days
return value
def get_nav(id):
"""
Get the nav from the cache.
"""
keys = [settings.CACHE_PRE_KEY, NAV_PRE_KEY, str(id)]
key = '.'.join(keys)
nav = cache.get(key)
return nav
def clear_nav_cache(nav):
"""
Clear nav cache
"""
keys = [settings.CACHE_PRE_KEY, NAV_PRE_KEY, str(nav.id)]
key = '.'.join(keys)
cache.delete(key)
| alirizakeles/tendenci | tendenci/apps/navs/utils.py | Python | gpl-3.0 | 866 |
from Estructura import espaceado
class Arbol_Sintactico_Abstracto:
def __init__(self,alcance,hijos):
self.hijos = hijos
self.alcance = alcance
self.cont = 1
def imprimir(self,tabulacion):
if (len(self.hijos) > 1):
print tabulacion + "SECUENCIA"
for hijo in self.hijos:
hijo.nivel = 1
hijo.imprimir(espaceado(tabulacion))
def ejecutar(self):
for hijo in self.hijos:
hijo.nivel = 1
hijo.ejecutar() | danmt/NEO | Codigo_Fuente/etapa4/Instrucciones/Arbol_Sintactico_Abstracto.py | Python | gpl-3.0 | 428 |
# Author: Nic Wolfe <[email protected]>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import datetime
import os
import sys
import re
import urllib2
import copy
import itertools
import operator
import collections
import sickbeard
from sickbeard import helpers, classes, logger, db
from sickbeard.common import Quality, MULTI_EP_RESULT, SEASON_RESULT#, SEED_POLICY_TIME, SEED_POLICY_RATIO
from sickbeard import tvcache
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
from lib.hachoir_parser import createParser
from sickbeard.name_parser.parser import NameParser, InvalidNameException
from sickbeard import scene_numbering
from sickbeard.common import Quality, Overview
class GenericProvider:
NZB = "nzb"
TORRENT = "torrent"
def __init__(self, name):
# these need to be set in the subclass
self.providerType = None
self.name = name
self.url = ''
self.supportsBacklog = False
self.cache = tvcache.TVCache(self)
def getID(self):
return GenericProvider.makeID(self.name)
@staticmethod
def makeID(name):
return re.sub("[^\w\d_]", "_", name.strip().lower())
def imageName(self):
return self.getID() + '.png'
def _checkAuth(self):
return
def isActive(self):
if self.providerType == GenericProvider.NZB and sickbeard.USE_NZBS:
return self.isEnabled()
elif self.providerType == GenericProvider.TORRENT and sickbeard.USE_TORRENTS:
return self.isEnabled()
else:
return False
def isEnabled(self):
"""
This should be overridden and should return the config setting eg. sickbeard.MYPROVIDER
"""
return False
def getResult(self, episodes):
"""
Returns a result of the correct type for this provider
"""
if self.providerType == GenericProvider.NZB:
result = classes.NZBSearchResult(episodes)
elif self.providerType == GenericProvider.TORRENT:
result = classes.TorrentSearchResult(episodes)
else:
result = classes.SearchResult(episodes)
result.provider = self
return result
def getURL(self, url, post_data=None, headers=None):
"""
By default this is just a simple urlopen call but this method should be overridden
for providers with special URL requirements (like cookies)
"""
if not headers:
headers = []
data = helpers.getURL(url, post_data, headers)
if not data:
logger.log(u"Error loading " + self.name + " URL: " + url, logger.ERROR)
return None
return data
def downloadResult(self, result):
"""
Save the result to disk.
"""
logger.log(u"Downloading a result from " + self.name+" at " + result.url)
data = self.getURL(result.url)
if data == None:
return False
# use the appropriate watch folder
if self.providerType == GenericProvider.NZB:
saveDir = sickbeard.NZB_DIR
writeMode = 'w'
elif self.providerType == GenericProvider.TORRENT:
saveDir = sickbeard.TORRENT_DIR
writeMode = 'wb'
else:
return False
# use the result name as the filename
file_name = ek.ek(os.path.join, saveDir, helpers.sanitizeFileName(result.name) + '.' + self.providerType)
logger.log(u"Saving to " + file_name, logger.DEBUG)
try:
with open(file_name, writeMode) as fileOut:
fileOut.write(data)
helpers.chmodAsParent(file_name)
except EnvironmentError, e:
logger.log("Unable to save the file: " + ex(e), logger.ERROR)
return False
# as long as it's a valid download then consider it a successful snatch
return self._verify_download(file_name)
def _verify_download(self, file_name=None):
"""
Checks the saved file to see if it was actually valid, if not then consider the download a failure.
"""
# primitive verification of torrents, just make sure we didn't get a text file or something
if self.providerType == GenericProvider.TORRENT:
parser = createParser(file_name)
if parser:
mime_type = parser._getMimeType()
try:
parser.stream._input.close()
except:
pass
if mime_type != 'application/x-bittorrent':
logger.log(u"Result is not a valid torrent file", logger.WARNING)
return False
return True
def searchRSS(self):
self._checkAuth()
self.cache.updateCache()
return self.cache.findNeededEpisodes()
def getQuality(self, item):
"""
Figures out the quality of the given RSS item node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns a Quality value obtained from the node's data
"""
(title, url) = self._get_title_and_url(item) # @UnusedVariable
quality = Quality.sceneQuality(title)
return quality
def _doSearch(self):
return []
def _get_season_search_strings(self, show, season, wantedEp, searchSeason=False):
return []
def _get_episode_search_strings(self, ep_obj):
return []
def _get_title_and_url(self, item):
"""
Retrieves the title and URL data from the item XML node
item: An elementtree.ElementTree element representing the <item> tag of the RSS feed
Returns: A tuple containing two strings representing title and URL respectively
"""
title = helpers.get_xml_text(item.find('title'))
if title:
title = title.replace(' ', '.')
url = helpers.get_xml_text(item.find('link'))
if url:
url = url.replace('&', '&')
return (title, url)
def findEpisode(self, episode, manualSearch=False):
self._checkAuth()
# XEM episode scene numbering
sceneEpisode = copy.copy(episode)
sceneEpisode.convertToSceneNumbering()
logger.log(u'Searching "%s" for "%s" as "%s"'
% (self.name, episode.prettyName() , sceneEpisode.prettyName()))
self.cache.updateCache()
results = self.cache.searchCache(episode, manualSearch)
logger.log(u"Cache results: " + str(results), logger.DEBUG)
logger.log(u"manualSearch: " + str(manualSearch), logger.DEBUG)
# if we got some results then use them no matter what.
# OR
# return anyway unless we're doing a manual search
if results or not manualSearch:
return results
itemList = []
for cur_search_string in self._get_episode_search_strings(sceneEpisode):
itemList += self._doSearch(cur_search_string, show=episode.show)
for item in itemList:
(title, url) = self._get_title_and_url(item)
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(title, True)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING)
continue
if episode.show.air_by_date:
if parse_result.air_date != episode.airdate:
logger.log(u"Episode " + title + " didn't air on " + str(episode.airdate) + ", skipping it", logger.DEBUG)
continue
elif parse_result.season_number != episode.season or episode.episode not in parse_result.episode_numbers:
logger.log(u"Episode " + title + " isn't " + str(episode.season) + "x" + str(episode.episode) + ", skipping it", logger.DEBUG)
continue
quality = self.getQuality(item)
if not episode.show.wantEpisode(episode.season, episode.episode, quality, manualSearch):
logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
result = self.getResult([episode])
result.url = url
result.name = title
result.quality = quality
result.provider = self
result.content = None
results.append(result)
return results
def findSeasonResults(self, show, season):
itemList = []
results = {}
sceneSeasons = {}
searchSeason = False
# convert wanted seasons and episodes to XEM scene numbering
seasonEp = show.getAllEpisodes(season)
wantedEp = [x for x in seasonEp if show.getOverview(x.status) in (Overview.WANTED, Overview.QUAL)]
map(lambda x: x.convertToSceneNumbering(), wantedEp)
for x in wantedEp: sceneSeasons.setdefault(x.season,[]).append(x)
if wantedEp == seasonEp and not show.air_by_date:
searchSeason = True
for sceneSeason,sceneEpisodes in sceneSeasons.iteritems():
for curString in self._get_season_search_strings(show, str(sceneSeason), sceneEpisodes, searchSeason):
itemList += self._doSearch(curString)
for item in itemList:
(title, url) = self._get_title_and_url(item)
quality = self.getQuality(item)
# parse the file name
try:
myParser = NameParser(False)
parse_result = myParser.parse(title, True)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + title + " into a valid episode", logger.WARNING)
continue
if not show.air_by_date:
# this check is meaningless for non-season searches
if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1):
logger.log(u"The result " + title + " doesn't seem to be a valid episode for season " + str(season) + ", ignoring", logger.DEBUG)
continue
# we just use the existing info for normal searches
actual_season = season
actual_episodes = parse_result.episode_numbers
else:
if not parse_result.air_by_date:
logger.log(u"This is supposed to be an air-by-date search but the result "+title+" didn't parse as one, skipping it", logger.DEBUG)
continue
myDB = db.DBConnection()
sql_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?", [show.tvdbid, parse_result.air_date.toordinal()])
if len(sql_results) != 1:
logger.log(u"Tried to look up the date for the episode "+title+" but the database didn't give proper results, skipping it", logger.WARNING)
continue
actual_season = int(sql_results[0]["season"])
actual_episodes = [int(sql_results[0]["episode"])]
# make sure we want the episode
wantEp = True
for epNo in actual_episodes:
if not show.wantEpisode(actual_season, epNo, quality):
wantEp = False
break
if not wantEp:
logger.log(u"Ignoring result " + title + " because we don't want an episode that is " + Quality.qualityStrings[quality], logger.DEBUG)
continue
logger.log(u"Found result " + title + " at " + url, logger.DEBUG)
# make a result object
epObj = []
for curEp in actual_episodes:
epObj.append(show.getEpisode(actual_season, curEp))
result = self.getResult(epObj)
result.url = url
result.name = title
result.quality = quality
result.provider = self
result.content = None
if len(epObj) == 1:
epNum = epObj[0].episode
elif len(epObj) > 1:
epNum = MULTI_EP_RESULT
logger.log(u"Separating multi-episode result to check for later - result contains episodes: " + str(parse_result.episode_numbers), logger.DEBUG)
elif len(epObj) == 0:
epNum = SEASON_RESULT
result.extraInfo = [show]
logger.log(u"Separating full season result to check for later", logger.DEBUG)
if epNum in results:
results[epNum].append(result)
else:
results[epNum] = [result]
return results
def findPropers(self, search_date=None):
results = self.cache.listPropers(search_date)
return [classes.Proper(x['name'], x['url'], datetime.datetime.fromtimestamp(x['time'])) for x in results]
class NZBProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.NZB
class TorrentProvider(GenericProvider):
def __init__(self, name):
GenericProvider.__init__(self, name)
self.providerType = GenericProvider.TORRENT
# self.option = {SEED_POLICY_TIME : '',
# SEED_POLICY_RATIO: '',
# 'PROCESS_METHOD': ''
# }
# def get_provider_options(self):
# pass
#
# def set_provider_options(self):
# self.option[SEED_POLICY_TIME] + '|' + self.option[SEED_POLICY_RATIO] + '|' + self.option['PROCESS_METHOD']
| vertigo235/Sick-Beard-XEM | sickbeard/providers/generic.py | Python | gpl-3.0 | 15,171 |
#!/usr/bin/env python
# encoding: utf-8
"""
Functional tests of the RabbitMQ Workers
"""
import mock
import json
import unittest
import ADSDeploy.app as app
from ADSDeploy.pipeline.workers import IntegrationTestWorker, \
DatabaseWriterWorker
from ADSDeploy.webapp.views import MiniRabbit
from ADSDeploy.models import Base, Deployment
RABBITMQ_URL = 'amqp://guest:[email protected]:6672/adsdeploy_test?' \
'socket_timeout=10&backpressure_detection=t'
class TestIntegrationTestWorker(unittest.TestCase):
"""
Tests the functionality of the Integration Worker
"""
def setUp(self):
# Create queue
with MiniRabbit(RABBITMQ_URL) as w:
w.make_queue('in', exchange='test')
w.make_queue('out', exchange='test')
w.make_queue('database', exchange='test')
# Create database
app.init_app({
'SQLALCHEMY_URL': 'sqlite://',
'SQLALCHEMY_ECHO': False,
})
Base.metadata.bind = app.session.get_bind()
Base.metadata.create_all()
self.app = app
def tearDown(self):
# Destroy queue
with MiniRabbit(RABBITMQ_URL) as w:
w.delete_queue('in', exchange='test')
w.delete_queue('out', exchange='test')
w.delete_queue('database', exchange='test')
# Destroy database
Base.metadata.drop_all()
self.app.close_app()
@mock.patch('ADSDeploy.pipeline.integration_tester.IntegrationTestWorker.run_test')
def test_workflow_of_integration_worker(self, mock_run_test):
"""
General work flow of the integration worker from receiving a packet,
to finishing with a packet.
"""
# Worker receives a packet, most likely from the deploy worker
# Example packet:
#
# {
# 'application': 'staging',
# 'service': 'adsws',
# 'release': '',
# 'config': {},
# }
#
#
example_packet = {
'application': 'staging',
'service': 'adsws',
'version': 'v1.0.0',
'config': {},
'action': 'test'
}
expected_packet = example_packet.copy()
expected_packet['tested'] = True
# Override the run test returned value. This means the logic of the test
# does not have to be mocked
mock_run_test.return_value = expected_packet
with MiniRabbit(RABBITMQ_URL) as w:
w.publish(route='in', exchange='test', payload=json.dumps(example_packet))
# Worker runs the tests
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'in',
'publish': 'out',
'status': 'database',
'TEST_RUN': True
}
test_worker = IntegrationTestWorker(params=params)
test_worker.run()
test_worker.connection.close()
# Worker sends a packet to the next worker
with MiniRabbit(RABBITMQ_URL) as w:
m_in = w.message_count(queue='in')
m_out = w.message_count(queue='out')
p = w.get_packet(queue='out')
self.assertEqual(m_in, 0)
self.assertEqual(m_out, 1)
# Remove values that are not in the starting packet
self.assertTrue(p.pop('tested'))
self.assertEqual(
p,
example_packet
)
@mock.patch('ADSDeploy.pipeline.integration_tester.IntegrationTestWorker.run_test')
def test_db_writes_on_test_pass(self, mocked_run_test):
"""
Check that the database is being written to when a test passes
"""
# Stub data
packet = {
'application': 'adsws',
'environment': 'staging',
'version': 'v1.0.0',
}
expected_packet = packet.copy()
expected_packet['tested'] = True
mocked_run_test.return_value = expected_packet
# Start the IntegrationTester worker
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'in',
'publish': 'out',
'status': 'database',
'TEST_RUN': True
}
# Push to rabbitmq
with MiniRabbit(RABBITMQ_URL) as w:
w.publish(route='in', exchange='test', payload=json.dumps(packet))
test_worker = IntegrationTestWorker(params=params)
test_worker.run()
test_worker.connection.close()
# Assert there is a packet on the publish queue
with MiniRabbit(RABBITMQ_URL) as w:
self.assertEqual(w.message_count('out'), 1)
self.assertEqual(w.message_count('database'), 1)
# Start the DB Writer worker
params = {
'RABBITMQ_URL': RABBITMQ_URL,
'exchange': 'test',
'subscribe': 'database',
'TEST_RUN': True
}
db_worker = DatabaseWriterWorker(params=params)
db_worker.app = self.app
db_worker.run()
db_worker.connection.close()
with self.app.session_scope() as session:
all_deployments = session.query(Deployment).all()
self.assertEqual(
len(all_deployments),
1,
msg='More (or less) than 1 deployment entry: {}'
.format(all_deployments)
)
deployment = all_deployments[0]
for key in packet:
self.assertEqual(
packet[key],
getattr(deployment, key)
)
self.assertEqual(deployment.tested, True)
| adsabs/ADSDeploy | ADSDeploy/tests/test_functional/test_integration_tester.py | Python | gpl-3.0 | 5,702 |
from __future__ import unicode_literals
import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
# Create your models here.
@python_2_unicode_compatible # only if you need to support Python 2
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
@python_2_unicode_compatible # only if you need to support Python 2
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| Chasego/nie | proj/libdemo/django/mysite/polls/models.py | Python | gpl-3.0 | 910 |
# Definition for binary tree with next pointer.
# class TreeLinkNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
def find_next(parent, child):
parent = parent.next
while parent:
if parent.left:
child.next = parent.left
return
elif parent.right:
child.next = parent.right
return
else:
parent = parent.next
if not root: return
q = [root]
while q:
nxt = []
for node in q:
if node.left:
if node.right:
node.left.next = node.right
else:
find_next(node, node.left)
nxt.append(node.left)
if node.right:
find_next(node, node.right)
nxt.append(node.right)
q = nxt | YiqunPeng/Leetcode-pyq | solutions/117PopulatingNextRightPointersInEachNodeII.py | Python | gpl-3.0 | 1,205 |
x0 = 1.0
y0 = 0.1
b = 1.0
p = 1.0
r = 1.0
d = 1.0
T = 30
dt = 0.01
noise = 0.1
import modex
log = modex.log()
import random
t=0
x=x0
y=y0
while t<T:
f = b - p*y + random.gauss(0, noise)
g = r*x - d + random.gauss(0, noise)
x += x*f*dt
y += y*g*dt
if x<0: x = 0
if y<0: y = 0
t+=dt
log.time = t
log.x = x
log.y = y
| tcstewar/model-explorer | examples/prey/prey.py | Python | gpl-3.0 | 369 |
class Word:
def __init__(self, data, index):
self.data = data
self.index = index
def printAnagrams(arr):
dupArray = []
size = len(arr)
for i in range(size):
dupArray.append(Word(arr[i], i))
for i in range(size):
dupArray[i].data = ''.join(sorted(dupArray[i].data))
dupArray = sorted(dupArray, key=lambda x: x.data)
for i in range(size):
print arr[dupArray[i].index]
def main():
print "Hello, world"
arr = ["dog", "act", "cat", "god", "tac"]
printAnagrams(arr)
if __name__== '__main__':
main() | ruchikd/Algorithms | Python/FindAnagrams/anagrams.py | Python | gpl-3.0 | 521 |
from optparse import make_option
from optparse import OptionParser
import logging
#import os
#import sys
import contextlib
#import hashlib
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
#from django.db.models import Q
import dateutil
import netCDF4
from lizard_neerslagradar import netcdf
logger = logger = logging.getLogger(__name__)
class Command(BaseCommand):
args = ""
help = "Create a geotiff per timestep from the radar.nc file."
option_list = BaseCommand.option_list + (
make_option(
"--from", action="store", type="string",
dest="from_", default="2011-01-07",
help="Generate geotiffs starting from this datetime. "
"Use a string in the format YYYY-MM-DD HH:MM "
"(fuzzy substrings are allowed)"),
make_option("--skip-existing", action="store_true",
dest="skip_existing", default=False,
help="Skip existing geotiffs"),
)
def handle(self, *args, **options):
parser = OptionParser(option_list=self.option_list)
(options, args) = parser.parse_args()
logger.warn("IGNORED from=%s", options.from_)
logger.warn("IGNORED skip_existing=%s", options.skip_existing)
time_from = dateutil.parser.parse('2011-01-07T00:00:00.000Z')
time_to = dateutil.parser.parse('2011-01-08T00:00:00.000Z')
times_list = [time_from]
if time_to:
interval = datetime.timedelta(minutes=5)
time = time_from
while time < time_to:
time += interval
times_list.append(time)
nc = netCDF4.Dataset(settings.RADAR_NC_PATH, 'r')
with contextlib.closing(nc):
for time in times_list:
try:
path = netcdf.time_2_path(time)
netcdf.mk_geotiff(nc, time, path)
logger.info('Created geotiff for {}'.format(time))
except:
logger.exception(
'While creating geotiff for {}'.format(time))
| lizardsystem/lizard-neerslagradar | lizard_neerslagradar/management/commands/create_geotiffs.py | Python | gpl-3.0 | 2,139 |
class DrawingDimensioningWorkbench (Workbench):
# Icon generated using by converting linearDimension.svg to xpm format using Gimp
Icon = '''
/* XPM */
static char * linearDimension_xpm[] = {
"32 32 10 1",
" c None",
". c #000000",
"+ c #0008FF",
"@ c #0009FF",
"# c #000AFF",
"$ c #00023D",
"% c #0008F7",
"& c #0008EE",
"* c #000587",
"= c #000001",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". +@@ + .",
". @+@@+ +@@+@ .",
". +@+@@@@@@ @@@@@@@# .",
"$%@@@@@@@@@+@@@@@@@@@@@@@@@@@@&$",
". #@@@@@@@@ #+@@@@@@@@*=",
". @+@@+ +@@@@@ .",
". +@ #@++ .",
". # .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". .",
". ."};
'''
MenuText = 'Drawing Dimensioning'
def Initialize(self):
import importlib, os
from dimensioning import __dir__, debugPrint, iconPath
import linearDimension
import linearDimension_stack
import deleteDimension
import circularDimension
import grabPointAdd
import textAdd
import textEdit
import textMove
import escapeDimensioning
import angularDimension
import radiusDimension
import centerLines
import noteCircle
import toleranceAdd
commandslist = [
'dd_linearDimension', #where dd is short-hand for drawing dimensioning
'dd_linearDimensionStack',
'dd_circularDimension',
'dd_radiusDimension',
'dd_angularDimension',
'dd_centerLines',
'dd_centerLine',
'dd_noteCircle',
'dd_grabPoint',
'dd_addText',
'dd_editText',
'dd_moveText',
'dd_addTolerance',
'dd_deleteDimension',
'dd_escapeDimensioning',
]
self.appendToolbar('Drawing Dimensioning', commandslist)
import unfold
import unfold_bending_note
import unfold_export_to_dxf
unfold_cmds = [
'dd_unfold',
'dd_bendingNote',
]
if hasattr(os,'uname') and os.uname()[0] == 'Linux' : #this command only works on Linux systems
unfold_cmds.append('dd_exportToDxf')
self.appendToolbar( 'Drawing Dimensioning Folding', unfold_cmds )
import weldingSymbols
if int( FreeCAD.Version()[1] > 15 ) and int( FreeCAD.Version()[2].split()[0] ) > 5165:
weldingCommandList = ['dd_weldingGroupCommand']
else:
weldingCommandList = weldingSymbols.weldingCmds
self.appendToolbar('Drawing Dimensioning Welding Symbols', weldingCommandList)
self.appendToolbar('Drawing Dimensioning Help', [ 'dd_help' ])
FreeCADGui.addIconPath(iconPath)
FreeCADGui.addPreferencePage( os.path.join( __dir__, 'Resources', 'ui', 'drawing_dimensioing_prefs-base.ui'),'Drawing Dimensioning' )
Gui.addWorkbench(DrawingDimensioningWorkbench())
| ulikoehler/FreeCAD_drawing_dimensioning | InitGui.py | Python | gpl-3.0 | 3,832 |
import os, shutil, xbmc, xbmcgui
pDialog = xbmcgui.DialogProgress()
dialog = xbmcgui.Dialog()
Game_Directories = [ "E:\\Games\\", "F:\\Games\\", "G:\\Games\\", "E:\\Applications\\", "F:\\Applications\\", "G:\\Applications\\", "E:\\Homebrew\\", "F:\\Homebrew\\", "G:\\Homebrew\\", "E:\\Apps\\", "F:\\Apps\\", "G:\\Apps\\", "E:\\Ports\\", "F:\\Ports\\", "G:\\Ports\\" ]
for Game_Directories in Game_Directories:
if os.path.isdir( Game_Directories ):
pDialog.create( "PARSING XBOX GAMES","Initializing" )
pDialog.update(0,"Removing _Resources Folders","","This can take some time, please be patient.")
for Items in sorted( os.listdir( Game_Directories ) ):
if os.path.isdir(os.path.join( Game_Directories, Items)):
Game_Directory = os.path.join( Game_Directories, Items )
_Resources = os.path.join( Game_Directory, "_Resources" )
DefaultTBN = os.path.join( Game_Directory, "default.tbn" )
FanartJPG = os.path.join( Game_Directory, "fanart.jpg" )
if os.path.isdir(_Resources):
shutil.rmtree(_Resources)
else:
print "Cannot find: " + _Resources
if os.path.isfile(DefaultTBN):
os.remove(DefaultTBN)
else:
print "Cannot find: " + DefaultTBN
if os.path.isfile(FanartJPG):
os.remove(FanartJPG)
else:
print "Cannot find: " + FanartJPG
pDialog.close()
dialog.ok("COMPLETE","Done, _Resources Folders Removed.") | Rocky5/XBMC-Emustation | Mod Files/emustation/scripts/not used/Other/Remove _Resources.py | Python | gpl-3.0 | 1,405 |
# Copyright (C) 2012 Alex Nitz, Josh Willis, Andrew Miller
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""
This module provides classes that describe banks of waveforms
"""
import types
import logging
import os.path
import h5py
from copy import copy
import numpy as np
from ligo.lw import table, lsctables, utils as ligolw_utils
import pycbc.waveform
import pycbc.pnutils
import pycbc.waveform.compress
from pycbc import DYN_RANGE_FAC
from pycbc.types import FrequencySeries, zeros
import pycbc.io
from pycbc.io.ligolw import LIGOLWContentHandler
import hashlib
def sigma_cached(self, psd):
""" Cache sigma calculate for use in tandem with the FilterBank class
"""
if not hasattr(self, '_sigmasq'):
from pycbc.opt import LimitedSizeDict
self._sigmasq = LimitedSizeDict(size_limit=2**5)
key = id(psd)
if not hasattr(psd, '_sigma_cached_key'):
psd._sigma_cached_key = {}
if key not in self._sigmasq or id(self) not in psd._sigma_cached_key:
psd._sigma_cached_key[id(self)] = True
# If possible, we precalculate the sigmasq vector for all possible waveforms
if pycbc.waveform.waveform_norm_exists(self.approximant):
if not hasattr(psd, 'sigmasq_vec'):
psd.sigmasq_vec = {}
if self.approximant not in psd.sigmasq_vec:
psd.sigmasq_vec[self.approximant] = \
pycbc.waveform.get_waveform_filter_norm(
self.approximant,
psd,
len(psd),
psd.delta_f,
self.min_f_lower
)
if not hasattr(self, 'sigma_scale'):
# Get an amplitude normalization (mass dependant constant norm)
amp_norm = pycbc.waveform.get_template_amplitude_norm(
self.params, approximant=self.approximant)
amp_norm = 1 if amp_norm is None else amp_norm
self.sigma_scale = (DYN_RANGE_FAC * amp_norm) ** 2.0
curr_sigmasq = psd.sigmasq_vec[self.approximant]
kmin = int(self.f_lower / psd.delta_f)
self._sigmasq[key] = self.sigma_scale * \
(curr_sigmasq[self.end_idx-1] - curr_sigmasq[kmin])
else:
if not hasattr(self, 'sigma_view'):
from pycbc.filter.matchedfilter import get_cutoff_indices
N = (len(self) -1) * 2
kmin, kmax = get_cutoff_indices(
self.min_f_lower or self.f_lower, self.end_frequency,
self.delta_f, N)
self.sslice = slice(kmin, kmax)
self.sigma_view = self[self.sslice].squared_norm() * 4.0 * self.delta_f
if not hasattr(psd, 'invsqrt'):
psd.invsqrt = 1.0 / psd
self._sigmasq[key] = self.sigma_view.inner(psd.invsqrt[self.sslice])
return self._sigmasq[key]
# helper function for parsing approximant strings
def boolargs_from_apprxstr(approximant_strs):
"""Parses a list of strings specifying an approximant and where that
approximant should be used into a list that can be understood by
FieldArray.parse_boolargs.
Parameters
----------
apprxstr : (list of) string(s)
The strings to parse. Each string should be formatted `APPRX:COND`,
where `APPRX` is the approximant and `COND` is a string specifying
where it should be applied (see `FieldArgs.parse_boolargs` for examples
of conditional strings). The last string in the list may exclude a
conditional argument, which is the same as specifying ':else'.
Returns
-------
boolargs : list
A list of tuples giving the approximant and where to apply them. This
can be passed directly to `FieldArray.parse_boolargs`.
"""
if not isinstance(approximant_strs, list):
approximant_strs = [approximant_strs]
return [tuple(arg.split(':')) for arg in approximant_strs]
def add_approximant_arg(parser, default=None, help=None):
"""Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
"""
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, 'Apprx:(mtotal > 4) & "
"(mchirp <= 5)' would use approximant 'Apprx' where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
"'else' as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs='+', type=str, default=default,
metavar='APPRX[:COND]',
help=help)
def parse_approximant_arg(approximant_arg, warray):
"""Given an approximant arg (see add_approximant_arg) and a field
array, figures out what approximant to use for each template in the array.
Parameters
----------
approximant_arg : list
The approximant argument to parse. Should be the thing returned by
ArgumentParser when parsing the argument added by add_approximant_arg.
warray : FieldArray
The array to parse. Must be an instance of a FieldArray, or a class
that inherits from FieldArray.
Returns
-------
array
A numpy array listing the approximants to use for each element in
the warray.
"""
return warray.parse_boolargs(boolargs_from_apprxstr(approximant_arg))[0]
def tuple_to_hash(tuple_to_be_hashed):
"""
Return a hash for a numpy array, avoids native (unsafe) python3 hash function
Parameters
----------
tuple_to_be_hashed: tuple
The tuple which is being hashed
Must be convertible to a numpy array
Returns
-------
int
an integer representation of the hashed array
"""
h = hashlib.blake2b(np.array(tuple_to_be_hashed).tobytes('C'),
digest_size=8)
return np.fromstring(h.digest(), dtype=int)[0]
class TemplateBank(object):
"""Class to provide some basic helper functions and information
about elements of a template bank.
Parameters
----------
filename : string
The name of the file to load. Must end in '.xml[.gz]' or '.hdf'. If an
hdf file, it should have a 'parameters' in its `attrs` which gives a
list of the names of fields to load from the file. If no 'parameters'
are found, all of the top-level groups in the file will assumed to be
parameters (a warning will be printed to stdout in this case). If an
xml file, it must have a `SnglInspiral` table.
approximant : {None, (list of) string(s)}
Specify the approximant(s) for each template in the bank. If None
provided, will try to load the approximant from the file. The
approximant may either be a single string (in which case the same
approximant will be used for all templates) or a list of strings and
conditionals specifying where to use the approximant. See
`boolargs_from_apprxstr` for syntax.
parameters : {None, (list of) sting(s)}
Specify what parameters to load from the file. If None, all of the
parameters in the file (if an xml file, this is all of the columns in
the SnglInspiral table, if an hdf file, this is given by the
parameters attribute in the file). The list may include parameters that
are derived from the file's parameters, or functions thereof. For a
full list of possible parameters, see `WaveformArray.default_fields`.
If a derived parameter is specified, only the parameters needed to
compute that parameter will be loaded from the file. For example, if
`parameters='mchirp'`, then only `mass1, mass2` will be loaded from
the file. Note that derived parameters can only be used if the
needed parameters are in the file; e.g., you cannot use `chi_eff` if
`spin1z`, `spin2z`, `mass1`, and `mass2` are in the input file.
\**kwds :
Any additional keyword arguments are stored to the `extra_args`
attribute.
Attributes
----------
table : WaveformArray
An instance of a WaveformArray containing all of the information about
the parameters of the bank.
has_compressed_waveforms : {False, bool}
True if compressed waveforms are present in the the (hdf) file; False
otherwise.
parameters : tuple
The parameters loaded from the input file. Same as `table.fieldnames`.
indoc : {None, xmldoc}
If an xml file was provided, an in-memory representation of the xml.
Otherwise, None.
filehandler : {None, h5py.File}
If an hdf file was provided, the file handler pointing to the hdf file
(left open after initialization). Otherwise, None.
extra_args : {None, dict}
Any extra keyword arguments that were provided on initialization.
"""
def __init__(self, filename, approximant=None, parameters=None,
**kwds):
self.has_compressed_waveforms = False
ext = os.path.basename(filename)
if ext.endswith(('.xml', '.xml.gz', '.xmlgz')):
self.filehandler = None
self.indoc = ligolw_utils.load_filename(
filename, False, contenthandler=LIGOLWContentHandler)
self.table = table.get_table(
self.indoc, lsctables.SnglInspiralTable.tableName)
self.table = pycbc.io.WaveformArray.from_ligolw_table(self.table,
columns=parameters)
# inclination stored in xml alpha3 column
names = list(self.table.dtype.names)
names = tuple([n if n != 'alpha3' else 'inclination' for n in names])
# low frequency cutoff in xml alpha6 column
names = tuple([n if n!= 'alpha6' else 'f_lower' for n in names])
self.table.dtype.names = names
elif ext.endswith(('hdf', '.h5')):
self.indoc = None
f = h5py.File(filename, 'r')
self.filehandler = f
try:
fileparams = list(f.attrs['parameters'])
except KeyError:
# just assume all of the top-level groups are the parameters
fileparams = list(f.keys())
logging.info("WARNING: no parameters attribute found. "
"Assuming that %s " %(', '.join(fileparams)) +
"are the parameters.")
tmp_params = []
# At this point fileparams might be bytes. Fix if it is
for param in fileparams:
try:
param = param.decode()
tmp_params.append(param)
except AttributeError:
tmp_params.append(param)
fileparams = tmp_params
# use WaveformArray's syntax parser to figure out what fields
# need to be loaded
if parameters is None:
parameters = fileparams
common_fields = list(pycbc.io.WaveformArray(1,
names=parameters).fieldnames)
add_fields = list(set(parameters) &
(set(fileparams) - set(common_fields)))
# load
dtype = []
data = {}
for key in common_fields+add_fields:
data[key] = f[key][:]
dtype.append((key, data[key].dtype))
num = f[fileparams[0]].size
self.table = pycbc.io.WaveformArray(num, dtype=dtype)
for key in data:
self.table[key] = data[key]
# add the compressed waveforms, if they exist
self.has_compressed_waveforms = 'compressed_waveforms' in f
else:
raise ValueError("Unsupported template bank file extension %s" %(
ext))
# if approximant is specified, override whatever was in the file
# (if anything was in the file)
if approximant is not None:
# get the approximant for each template
dtype = h5py.string_dtype(encoding='utf-8')
apprxs = np.array(self.parse_approximant(approximant),
dtype=dtype)
if 'approximant' not in self.table.fieldnames:
self.table = self.table.add_fields(apprxs, 'approximant')
else:
self.table['approximant'] = apprxs
self.extra_args = kwds
self.ensure_hash()
@property
def parameters(self):
return self.table.fieldnames
def ensure_hash(self):
"""Ensure that there is a correctly populated template_hash.
Check for a correctly populated template_hash and create if it doesn't
already exist.
"""
fields = self.table.fieldnames
if 'template_hash' in fields:
return
# The fields to use in making a template hash
hash_fields = ['mass1', 'mass2', 'inclination',
'spin1x', 'spin1y', 'spin1z',
'spin2x', 'spin2y', 'spin2z',]
fields = [f for f in hash_fields if f in fields]
template_hash = np.array([tuple_to_hash(v) for v in zip(*[self.table[p]
for p in fields])])
if not np.unique(template_hash).size == template_hash.size:
raise RuntimeError("Some template hashes clash. This should not "
"happen.")
self.table = self.table.add_fields(template_hash, 'template_hash')
def write_to_hdf(self, filename, start_index=None, stop_index=None,
force=False, skip_fields=None,
write_compressed_waveforms=True):
"""Writes self to the given hdf file.
Parameters
----------
filename : str
The name of the file to write to. Must end in '.hdf'.
start_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
first template in the slice
stop_index : If a specific slice of the template bank is to be
written to the hdf file, this would specify the index of the
last template in the slice
force : {False, bool}
If the file already exists, it will be overwritten if True.
Otherwise, an OSError is raised if the file exists.
skip_fields : {None, (list of) strings}
Do not write the given fields to the hdf file. Default is None,
in which case all fields in self.table.fieldnames are written.
write_compressed_waveforms : {True, bool}
Write compressed waveforms to the output (hdf) file if this is
True, which is the default setting. If False, do not write the
compressed waveforms group, but only the template parameters to
the output file.
Returns
-------
h5py.File
The file handler to the output hdf file (left open).
"""
if not filename.endswith('.hdf'):
raise ValueError("Unrecoginized file extension")
if os.path.exists(filename) and not force:
raise IOError("File %s already exists" %(filename))
f = h5py.File(filename, 'w')
parameters = self.parameters
if skip_fields is not None:
if not isinstance(skip_fields, list):
skip_fields = [skip_fields]
parameters = [p for p in parameters if p not in skip_fields]
# save the parameters
f.attrs['parameters'] = parameters
write_tbl = self.table[start_index:stop_index]
for p in parameters:
f[p] = write_tbl[p]
if write_compressed_waveforms and self.has_compressed_waveforms:
for tmplt_hash in write_tbl.template_hash:
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
compressed_waveform.write_to_hdf(f, tmplt_hash)
return f
def end_frequency(self, index):
""" Return the end frequency of the waveform at the given index value
"""
if hasattr(self.table[index], 'f_final'):
return self.table[index].f_final
return pycbc.waveform.get_waveform_end_frequency(
self.table[index],
approximant=self.approximant(index),
**self.extra_args)
def parse_approximant(self, approximant):
"""Parses the given approximant argument, returning the approximant to
use for each template in self. This is done by calling
`parse_approximant_arg` using self's table as the array; see that
function for more details."""
return parse_approximant_arg(approximant, self.table)
def approximant(self, index):
""" Return the name of the approximant ot use at the given index
"""
if 'approximant' not in self.table.fieldnames:
raise ValueError("approximant not found in input file and no "
"approximant was specified on initialization")
apx = self.table["approximant"][index]
if hasattr(apx, 'decode'):
apx = apx.decode()
return apx
def __len__(self):
return len(self.table)
def template_thinning(self, inj_filter_rejector):
"""Remove templates from bank that are far from all injections."""
if not inj_filter_rejector.enabled or \
inj_filter_rejector.chirp_time_window is None:
# Do nothing!
return
injection_parameters = inj_filter_rejector.injection_params.table
fref = inj_filter_rejector.f_lower
threshold = inj_filter_rejector.chirp_time_window
m1= self.table['mass1']
m2= self.table['mass2']
tau0_temp, _ = pycbc.pnutils.mass1_mass2_to_tau0_tau3(m1, m2, fref)
indices = []
sort = tau0_temp.argsort()
tau0_temp = tau0_temp[sort]
for inj in injection_parameters:
tau0_inj, _ = \
pycbc.pnutils.mass1_mass2_to_tau0_tau3(inj.mass1, inj.mass2,
fref)
lid = np.searchsorted(tau0_temp, tau0_inj - threshold)
rid = np.searchsorted(tau0_temp, tau0_inj + threshold)
inj_indices = sort[lid:rid]
indices.append(inj_indices)
indices_combined = np.concatenate(indices)
indices_unique= np.unique(indices_combined)
self.table = self.table[indices_unique]
def ensure_standard_filter_columns(self, low_frequency_cutoff=None):
""" Initialize FilterBank common fields
Parameters
----------
low_frequency_cutoff: {float, None}, Optional
A low frequency cutoff which overrides any given within the
template bank file.
"""
# Make sure we have a template duration field
if not hasattr(self.table, 'template_duration'):
self.table = self.table.add_fields(np.zeros(len(self.table),
dtype=np.float32), 'template_duration')
# Make sure we have a f_lower field
if low_frequency_cutoff is not None:
if not hasattr(self.table, 'f_lower'):
vec = np.zeros(len(self.table), dtype=np.float32)
self.table = self.table.add_fields(vec, 'f_lower')
self.table['f_lower'][:] = low_frequency_cutoff
self.min_f_lower = min(self.table['f_lower'])
if self.f_lower is None and self.min_f_lower == 0.:
raise ValueError('Invalid low-frequency cutoff settings')
class LiveFilterBank(TemplateBank):
def __init__(self, filename, sample_rate, minimum_buffer,
approximant=None, increment=8, parameters=None,
low_frequency_cutoff=None,
**kwds):
self.increment = increment
self.filename = filename
self.sample_rate = sample_rate
self.minimum_buffer = minimum_buffer
self.f_lower = low_frequency_cutoff
super(LiveFilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
self.param_lookup = {}
for i, p in enumerate(self.table):
key = (p.mass1, p.mass2, p.spin1z, p.spin2z)
assert(key not in self.param_lookup) # Uh, oh, template confusion!
self.param_lookup[key] = i
def round_up(self, num):
"""Determine the length to use for this waveform by rounding.
Parameters
----------
num : int
Proposed size of waveform in seconds
Returns
-------
size: int
The rounded size to use for the waveform buffer in seconds. This
is calculaed using an internal `increment` attribute, which determines
the discreteness of the rounding.
"""
inc = self.increment
size = np.ceil(num / self.sample_rate / inc) * self.sample_rate * inc
return size
def getslice(self, sindex):
instance = copy(self)
instance.table = self.table[sindex]
return instance
def id_from_param(self, param_tuple):
"""Get the index of this template based on its param tuple
Parameters
----------
param_tuple : tuple
Tuple of the parameters which uniquely identify this template
Returns
--------
index : int
The ordered index that this template has in the template bank.
"""
return self.param_lookup[param_tuple]
def __getitem__(self, index):
if isinstance(index, slice):
return self.getslice(index)
return self.get_template(index)
def get_template(self, index, min_buffer=None):
approximant = self.approximant(index)
f_end = self.end_frequency(index)
flow = self.table[index].f_lower
# Determine the length of time of the filter, rounded up to
# nearest power of two
if min_buffer is None:
min_buffer = self.minimum_buffer
min_buffer += 0.5
from pycbc.waveform.waveform import props
p = props(self.table[index])
p.pop('approximant')
buff_size = pycbc.waveform.get_waveform_filter_length_in_time(approximant, **p)
tlen = self.round_up((buff_size + min_buffer) * self.sample_rate)
flen = int(tlen / 2 + 1)
delta_f = self.sample_rate / float(tlen)
if f_end is None or f_end >= (flen * delta_f):
f_end = (flen-1) * delta_f
logging.info("Generating %s, %ss, %i, starting from %s Hz",
approximant, 1.0/delta_f, index, flow)
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
htilde = pycbc.waveform.get_waveform_filter(
zeros(flen, dtype=np.complex64), self.table[index],
approximant=approximant, f_lower=flow, f_final=f_end,
delta_f=delta_f, delta_t=1.0/self.sample_rate, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = -1
time_offset = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
if hasattr(htilde, 'time_offset'):
time_offset = htilde.time_offset
self.table[index].template_duration = template_duration
htilde = htilde.astype(np.complex64)
htilde.f_lower = flow
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
if time_offset:
htilde.time_offset = time_offset
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde.id = self.id_from_param((htilde.params.mass1,
htilde.params.mass2,
htilde.params.spin1z,
htilde.params.spin2z))
return htilde
class FilterBank(TemplateBank):
def __init__(self, filename, filter_length, delta_f, dtype,
out=None, max_template_length=None,
approximant=None, parameters=None,
enable_compressed_waveforms=True,
low_frequency_cutoff=None,
waveform_decompression_method=None,
**kwds):
self.out = out
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
self.enable_compressed_waveforms = enable_compressed_waveforms
self.waveform_decompression_method = waveform_decompression_method
super(FilterBank, self).__init__(filename, approximant=approximant,
parameters=parameters, **kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def get_decompressed_waveform(self, tempout, index, f_lower=None,
approximant=None, df=None):
"""Returns a frequency domain decompressed waveform for the template
in the bank corresponding to the index taken in as an argument. The
decompressed waveform is obtained by interpolating in frequency space,
the amplitude and phase points for the compressed template that are
read in from the bank."""
from pycbc.waveform.waveform import props
from pycbc.waveform import get_waveform_filter_length_in_time
# Get the template hash corresponding to the template index taken in as argument
tmplt_hash = self.table.template_hash[index]
# Read the compressed waveform from the bank file
compressed_waveform = pycbc.waveform.compress.CompressedWaveform.from_hdf(
self.filehandler, tmplt_hash,
load_now=True)
# Get the interpolation method to be used to decompress the waveform
if self.waveform_decompression_method is not None :
decompression_method = self.waveform_decompression_method
else :
decompression_method = compressed_waveform.interpolation
logging.info("Decompressing waveform using %s", decompression_method)
if df is not None :
delta_f = df
else :
delta_f = self.delta_f
# Create memory space for writing the decompressed waveform
decomp_scratch = FrequencySeries(tempout[0:self.filter_length], delta_f=delta_f, copy=False)
# Get the decompressed waveform
hdecomp = compressed_waveform.decompress(out=decomp_scratch, f_lower=f_lower, interpolation=decompression_method)
p = props(self.table[index])
p.pop('approximant')
try:
tmpltdur = self.table[index].template_duration
except AttributeError:
tmpltdur = None
if tmpltdur is None or tmpltdur==0.0 :
tmpltdur = get_waveform_filter_length_in_time(approximant, **p)
hdecomp.chirp_length = tmpltdur
hdecomp.length_in_time = hdecomp.chirp_length
return hdecomp
def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f,
low_frequency_cutoff=None,
cached_mem=None):
"""Generate the template with index t_num using custom length."""
approximant = self.approximant(t_num)
# Don't want to use INTERP waveforms in here
if approximant.endswith('_INTERP'):
approximant = approximant.replace('_INTERP', '')
# Using SPAtmplt here is bad as the stored cbrt and logv get
# recalculated as we change delta_f values. Fall back to TaylorF2
# in lalsimulation.
if approximant == 'SPAtmplt':
approximant = 'TaylorF2'
if cached_mem is None:
wav_len = int(max_freq / delta_f) + 1
cached_mem = zeros(wav_len, dtype=np.complex64)
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(cached_mem, t_num,
f_lower=low_frequency_cutoff,
approximant=approximant,
df=delta_f)
else :
htilde = pycbc.waveform.get_waveform_filter(
cached_mem, self.table[t_num], approximant=approximant,
f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f,
distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq))
return htilde
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out is None:
tempout = zeros(self.filter_length, dtype=self.dtype)
else:
tempout = self.out
approximant = self.approximant(index)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz' % (index, approximant, f_low))
# Clear the storage memory
poke = tempout.data # pylint:disable=unused-variable
tempout.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
if self.has_compressed_waveforms and self.enable_compressed_waveforms:
htilde = self.get_decompressed_waveform(tempout, index, f_lower=f_low,
approximant=approximant, df=None)
else :
htilde = pycbc.waveform.get_waveform_filter(
tempout[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low, f_final=f_end,
delta_f=self.delta_f, delta_t=self.delta_t, distance=distance,
**self.extra_args)
# If available, record the total duration (which may
# include ringdown) and the duration up to merger since they will be
# erased by the type conversion below.
ttotal = template_duration = None
if hasattr(htilde, 'length_in_time'):
ttotal = htilde.length_in_time
if hasattr(htilde, 'chirp_length'):
template_duration = htilde.chirp_length
self.table[index].template_duration = template_duration
htilde = htilde.astype(self.dtype)
htilde.f_lower = f_low
htilde.min_f_lower = self.min_f_lower
htilde.end_idx = int(f_end / htilde.delta_f)
htilde.params = self.table[index]
htilde.chirp_length = template_duration
htilde.length_in_time = ttotal
htilde.approximant = approximant
htilde.end_frequency = f_end
# Add sigmasq as a method of this instance
htilde.sigmasq = types.MethodType(sigma_cached, htilde)
htilde._sigmasq = {}
return htilde
def find_variable_start_frequency(approximant, parameters, f_start, max_length,
delta_f = 1):
""" Find a frequency value above the starting frequency that results in a
waveform shorter than max_length.
"""
if (f_start is None):
f = parameters.f_lower
elif (max_length is not None):
l = max_length + 1
f = f_start - delta_f
while l > max_length:
f += delta_f
l = pycbc.waveform.get_waveform_filter_length_in_time(approximant,
parameters, f_lower=f)
else :
f = f_start
return f
class FilterBankSkyMax(TemplateBank):
def __init__(self, filename, filter_length, delta_f,
dtype, out_plus=None, out_cross=None,
max_template_length=None, parameters=None,
low_frequency_cutoff=None, **kwds):
self.out_plus = out_plus
self.out_cross = out_cross
self.dtype = dtype
self.f_lower = low_frequency_cutoff
self.filename = filename
self.delta_f = delta_f
self.N = (filter_length - 1 ) * 2
self.delta_t = 1.0 / (self.N * self.delta_f)
self.filter_length = filter_length
self.max_template_length = max_template_length
super(FilterBankSkyMax, self).__init__(filename, parameters=parameters,
**kwds)
self.ensure_standard_filter_columns(low_frequency_cutoff=low_frequency_cutoff)
def __getitem__(self, index):
# Make new memory for templates if we aren't given output memory
if self.out_plus is None:
tempoutplus = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutplus = self.out_plus
if self.out_cross is None:
tempoutcross = zeros(self.filter_length, dtype=self.dtype)
else:
tempoutcross = self.out_cross
approximant = self.approximant(index)
# Get the end of the waveform if applicable (only for SPAtmplt atm)
f_end = self.end_frequency(index)
if f_end is None or f_end >= (self.filter_length * self.delta_f):
f_end = (self.filter_length-1) * self.delta_f
# Find the start frequency, if variable
f_low = find_variable_start_frequency(approximant,
self.table[index],
self.f_lower,
self.max_template_length)
logging.info('%s: generating %s from %s Hz', index, approximant, f_low)
# What does this do???
poke1 = tempoutplus.data # pylint:disable=unused-variable
poke2 = tempoutcross.data # pylint:disable=unused-variable
# Clear the storage memory
tempoutplus.clear()
tempoutcross.clear()
# Get the waveform filter
distance = 1.0 / DYN_RANGE_FAC
hplus, hcross = pycbc.waveform.get_two_pol_waveform_filter(
tempoutplus[0:self.filter_length],
tempoutcross[0:self.filter_length], self.table[index],
approximant=approximant, f_lower=f_low,
f_final=f_end, delta_f=self.delta_f, delta_t=self.delta_t,
distance=distance, **self.extra_args)
if hasattr(hplus, 'chirp_length') and hplus.chirp_length is not None:
self.table[index].template_duration = hplus.chirp_length
hplus = hplus.astype(self.dtype)
hcross = hcross.astype(self.dtype)
hplus.f_lower = f_low
hcross.f_lower = f_low
hplus.min_f_lower = self.min_f_lower
hcross.min_f_lower = self.min_f_lower
hplus.end_frequency = f_end
hcross.end_frequency = f_end
hplus.end_idx = int(hplus.end_frequency / hplus.delta_f)
hcross.end_idx = int(hplus.end_frequency / hplus.delta_f)
hplus.params = self.table[index]
hcross.params = self.table[index]
hplus.approximant = approximant
hcross.approximant = approximant
# Add sigmasq as a method of this instance
hplus.sigmasq = types.MethodType(sigma_cached, hplus)
hplus._sigmasq = {}
hcross.sigmasq = types.MethodType(sigma_cached, hcross)
hcross._sigmasq = {}
return hplus, hcross
__all__ = ('sigma_cached', 'boolargs_from_apprxstr', 'add_approximant_arg',
'parse_approximant_arg', 'tuple_to_hash', 'TemplateBank',
'LiveFilterBank', 'FilterBank', 'find_variable_start_frequency',
'FilterBankSkyMax')
| tdent/pycbc | pycbc/waveform/bank.py | Python | gpl-3.0 | 39,353 |
"""
scraping
the utility functions for the actual web scraping
"""
import ssl
import datetime
import requests
import re
# this is the endpoint that my new version of this program will
# abuse with possible store ids. this is a much more reliable "darts at the wall"
# technique than the previous location-based one
QUERY_URL = "https://www.wawa.com/Handlers/LocationByStoreNumber.ashx"
# from testing, I have confirmed certain "series" of store IDs
# 0000 series are all old stores in PA, NJ, MD, DE, and VA
# 5000 series are all stores in FL
# 8000 series are all new stores in PA, NJ, MD, DE, and VA
POSSIBLE_STORE_NUMS = list(range(5000, 6000))
POSSIBLE_STORE_NUMS.extend(list(range(0, 1000)))
POSSIBLE_STORE_NUMS.extend(list(range(8000, 9000)))
# currently only tracking these gas types to keep a consistent csv schema.
# other types are not consistent across all wawas
GAS_TYPES = ["diesel", "plus", "unleaded", "premium"]
def parse_gas_prices(in_location):
"""
Breaks open the json for the gas prices
:param in_location: The Wawa location we are looking at (dict)
:return: The gas price info (dict)
"""
out_data = {}
try:
fuel_data = in_location["fuelTypes"]
for ft in fuel_data:
lowered = ft["description"].lower()
if lowered in GAS_TYPES:
out_data[lowered + "_price"] = ft["price"]
# no gas sold at this Wawa
except KeyError:
for gt in GAS_TYPES:
out_data[gt + "_price"] = ""
return out_data
def camel_to_underscore(in_string):
"""
Basic function that converts a camel-cased word to use underscores
:param in_string: The camel-cased string (str)
:return: The underscore'd string (str)
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', in_string)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def parse_amenities(in_location):
"""
Breaks open the json for the amenities offered at the Wawa location
:param in_location: The Wawa location (dict)
:return: The amenity info (dict)
"""
out_data = {}
for amenity, value in in_location["amenities"].items():
out_data["has_" + camel_to_underscore(amenity).lower()] = value
return out_data
def get_addresses(in_location):
"""
Parses info for the Wawa address and coordinates
:param in_location: The Wawa location (dict)
:return: The address and coordincate info (dict)
"""
friendly = in_location["addresses"][0]
physical = in_location["addresses"][1]
out_friendly = {
"address": friendly["address"],
"city": friendly["city"],
"state": friendly["state"],
"zip": friendly["zip"]
}
out_physical = {
"longitude": physical["loc"][1],
"latitude": physical["loc"][0],
}
return {"address": out_friendly, "coordinates": out_physical}
def get_wawa_data(limit=None):
"""
Hits the store number url endpoint to pull down Wawa locations and
parse each one's information. We don't know the store numbers as there
is not list of store numbers. Through testing I was able to narrow down
"series" of store numbers, so we iterate through ranges of possible
store numbers, skipping any 404 errors (invalid store id responses
returned by url calls).
:param limit: A cap on the number of Wawa results returned (int) (optional)
:return: Parsed Wawa information (list<dict>)
"""
ssl._create_default_https_context = ssl._create_unverified_context
output = []
for i in POSSIBLE_STORE_NUMS:
response = requests.get(QUERY_URL, params={"storeNumber": i})
if response.status_code != 404:
location = response.json()
geographic_data = get_addresses(location)
address = geographic_data["address"]
coordinates = geographic_data["coordinates"]
gas_prices = parse_gas_prices(location)
amenities = parse_amenities(location)
this_location_output = {
"has_menu": location["hasMenu"],
"last_updated": datetime.datetime.strptime(location["lastUpdated"], "%m/%d/%Y %I:%M %p"),
"location_id": location["locationID"],
"open_24_hours": location["open24Hours"],
"regional_director": location["regionalDirector"],
"store_close": location["storeClose"],
"store_name": location["storeName"],
"store_number": location["storeNumber"],
"store_open": location["storeOpen"],
"telephone": location["telephone"]
}
this_location_output = {**this_location_output, **address}
this_location_output = {**this_location_output, **coordinates}
this_location_output = {**this_location_output, **gas_prices}
this_location_output = {**this_location_output, **amenities}
output.append(this_location_output)
if limit and len(output) == limit:
break
return output
| cfh294/WawaGeoScraper | utils/scraping/__init__.py | Python | gpl-3.0 | 5,149 |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
# ---------------------------------------------------------------------------
import sys
import os
import random
import tempfile
from datetime import date
# ---------------------------------------------------------------------------
class GenName():
"""
@authors: Brigitte Bigi
@contact: [email protected]
@license: GPL
@summary: A class to generates a random file name of a non-existing file.
"""
def __init__(self,extension=""):
self.name = "/"
while (os.path.exists(self.name)==True):
self.set_name(extension)
def set_name(self, extension):
"""
Set a new file name.
"""
# random float value
randval = str(int(random.random()*10000))
# process pid
pid = str(os.getpid())
# today's date
today = str(date.today())
# filename
filename = "tmp_"+today+"_"+pid+"_"+randval
# final file name is path/filename
self.name = filename + extension
def get_name(self):
"""
Get the current file name.
"""
return str(self.name)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print GenName().get_name()
# ---------------------------------------------------------------------------
| brigittebigi/proceed | proceed/src/TagPDF/name.py | Python | gpl-3.0 | 3,002 |
# -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| 01001110 01100101 01110100 01111010 01101111 01100010 |
#| |
#| Netzob : Inferring communication protocols |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Frédéric Guihéry |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.netzob.org |
#| @contact : [email protected] |
#| @sponsors : Amossys, http://www.amossys.fr |
#| Supélec, http://www.rennes.supelec.fr/ren/rd/cidre/ |
#+---------------------------------------------------------------------------+
from netzob.Common.Type.Endianess import Endianess
from common.NetzobTestCase import NetzobTestCase
class test_Endianess(NetzobTestCase):
def test_BIG(self):
self.assertEqual(Endianess.BIG, "big-endian")
def test_LITTLE(self):
self.assertEqual(Endianess.LITTLE, "little-endian")
| nagyistoce/netzob | test/src/test_netzob/test_Common/test_Type/test_Endianess.py | Python | gpl-3.0 | 2,233 |
"""
Copyright (C) 2013 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
class RDBDATA_data_entry:
def __init__(self, offset, file_pointer):
old_offset = file_pointer.tell()
file_pointer.seek(offset)
self.data_type, = struct.unpack("<I", file_pointer.read(4))
self.RDB_id, = = struct.unpack("<I", file_pointer.read(4))
self.data_length, = struct.unpack("<I", file_pointer.read(4))
self.unknown, = struct.unpack("<I", file_pointer.read(4))
self.data = file_pointer.read(self.data_length)
file_pointer.seek(old_offset)
class RDBDATA_file:
def __init__(self, filepath=None):
self.filepath = filepath
self.header = None #RDB0
self.data = None
if self.filepath != None:
self.open(filepath)
def open(self, filepath=None):
if filepath == None and self.filepath == None:
print "File path is empty"
return
if self.filepath == None:
self.filepath = filepath
def dump(self, dest_filepath=os.getcwd(), verbose=False):
with open(self.filepath, "rb") as f:
self.header = struct.unpack("IIII", f.read(4))
self.data = f.read()
| sbobovyc/GameTools | TSW/src/rdbdata.py | Python | gpl-3.0 | 1,836 |
Subsets and Splits