code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__author__ = 'chimezieogbuji'
import hashlib
from wheezy.http.cache import etag_md5crc32
from wheezy.caching.patterns import Cached
from wheezy.http.middleware import http_cache_middleware_factory
from wheezy.http.middleware import environ_cache_adapter_middleware_factory
from wheezy.http.middleware import wsgi_adapter_middleware_factory
from wheezy.caching.memcache import MemcachedClient
from wheezy.http import bootstrap_http_defaults, CacheProfile, WSGIApplication
from datetime import timedelta
class memcached(object):
"""
Decorator that uses memcache to cache response to
calling the function where the key is based on
the values of the arguments given
modified from http://www.zieglergasse.at/blog/2011/python/memcached-decorator-for-python/
"""
def __init__(self,memcache_socket):
self.memcache_socket = memcache_socket
def __call__(self, f):
def newfn(*args, **kwargs):
mc = MemcachedClient([self.memcache_socket], debug=0)
# generate md5 out of args and function
m = hashlib.md5()
margs = [x.__repr__() for x in args]
mkwargs = [x.__repr__() for x in kwargs.values()]
map(m.update, margs + mkwargs)
m.update(f.__name__)
m.update(f.__class__.__name__)
key = m.hexdigest()
value = mc.get(key)
if value:
return value
else:
value = f(*args, **kwargs)
mc.set(key, value, 60)
return value
return newfn
def normalize_to_list(item):
if item is None:
return []
else:
return item if isinstance(item,list) else [item]
class WheezyCachingAdapterSetup(object):
def __init__(self,
cache_location='public',
static_dependency = None,
queries = None,
ttl=15,
debug=False,
max_age=None,
memcache_socket = 'unix:/tmp/memcached.sock'):
"""
Used as an Akara WSGI wrapper, i.e.:
@simple_service(...,wsgi_wrapper=WheezyCachingAdapterSetup( ... ))
Produces a wheezy.http.WSGIApplication that implements HTTP caching
cache_location is used for the CacheProfile
( see: http://pythonhosted.org/wheezy.http/userguide.html#cache-location )
static_dependency indicates the name of a master_key for a CacheDependency associated
with the wrapped Akara service
( see: http://pythonhosted.org/wheezy.caching/userguide.html#cachedependency )
queries is a list of query key's that will be used to compose the key for caching
responses to requests
ttl is the time to live for the content (server) caching in seconds
IF specified, max_age is used on the policy to set the corresponding
HTTP cache control header (an indication to accept a response whose age
is no greater than the specified time in seconds)
memcache_socket is the memcache socket to use for content caching purposes
Within an Akara service decorated in this way, a wheezy.http.HTTPCachePolicy instance can be created:
profile = request.environ['wheezy.http.cache_profile']
policy = profile.cache_policy()
And then its various methods can be used to control cache-specific HTTP headers
of the response:
See: http://packages.python.org/wheezy.http/userguide.html#cache-policy
Then request.environ['wheezy.http.cache_policy'] needs to be set to the policy:
request.environ['wheezy.http.cache_policy'] = policy
As an alternative to providing a static dependency name via the static_dependency
keyword argument, a dependency with a dynamic master key can be provided via:
request.environ['wheezy.http.cache_dependency'] = ['.. cache name ..', ..., ]
Cache can be invalidated by (dependency) name via:
request.environ['akamu.wheezy.invalidate']('..cache name..')
"""
assert cache_location in ['none','server','client','public']
self.cache = MemcachedClient([memcache_socket])
self.cache_location = cache_location
self.debug = debug
self.cached = Cached(self.cache, time=ttl)
self.static_dependency = static_dependency
self.max_age = max_age
self.cache_profile = CacheProfile(
cache_location,
vary_query=queries,
enabled=True,
etag_func=etag_md5crc32,
duration=timedelta(seconds=ttl))
def __call__(self,akara_application):
"""
Called by Akara to provide the akara application
as a WSGI application to be 'wrapped'
Returns a wsgi_application that wraps the akara service and facilitating
the use of WSGI environ variables for http content caching middleware.
"""
self.akara_application = akara_application
def wsgi_wrapper(environ, start_response):
if self.cache_location != 'none':
environ['wheezy.http.cache_profile'] = self.cache_profile
def InvalidateCacheViaDependency(cacheName):
from wheezy.caching.dependency import CacheDependency
dependency = CacheDependency(
self.cache,
# namespace=self.cache_profile.namespace
)
if self.debug:
print "###","Invalidating cache: ", cacheName,"###"
dependency.delete(cacheName)
# self.cached.delete(cacheName)
#Provide hook for cache dependency invalidation to akara service
environ['akamu.wheezy.invalidate'] = InvalidateCacheViaDependency
if self.debug:
print "###","Calling akara application from wheezy.http","###"
rt = akara_application(environ, start_response)
if 'wheezy.http.cache_dependency' in environ:
item = environ['wheezy.http.cache_dependency']
environ['wheezy.http.cache_dependency'] = normalize_to_list(item)
if self.debug:
print "###","Dependency key(s): ", environ['wheezy.http.cache_dependency'],"###"
elif self.static_dependency:
environ['wheezy.http.cache_dependency'
] = normalize_to_list(self.static_dependency)
if self.debug:
print "###","Dependency key(s): ", self.static_dependency,"###"
if self.max_age is not None:
if self.debug:
print "###","Setting max_age and etag via function","###"
policy = self.cache_profile.cache_policy()
policy.max_age(self.max_age)
policy.etag(self.cache_profile.etag_func(rt))
return rt
return WSGIApplication([
bootstrap_http_defaults,
http_cache_middleware_factory,
environ_cache_adapter_middleware_factory,
wsgi_adapter_middleware_factory
],
{
'wsgi_app' : wsgi_wrapper,
'http_cache': self.cache
}
) | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/wheezy/__init__.py | __init__.py |
import os, akara, time
from akara import registry
from rdflib import plugin, URIRef, OWL, RDFS, RDF, BNode
from rdflib.store import Store, NO_STORE
from rdflib.Graph import Graph, ConjunctiveGraph
from rdflib.store.SPARQL import GET,POST
OWL_PROPERTIES_QUERY=\
"""
SELECT ?literalProperty ?resourceProperty
WHERE {
{ ?literalProperty a owl:DatatypeProperty }
UNION
{ ?resourceProperty a ?propType
FILTER(
?propType = owl:ObjectProperty ||
?propType = owl:TransitiveProperty ||
?propType = owl:SymmetricProperty ||
?propType = owl:InverseFunctionalProperty ) }
}"""
def GetGraphStoreForProtocol():
configDict = akara.module_config()
return configDict.get('graph_store_name'), configDict.get('graph_store_url')
def GetExternalGraphStoreURL():
configDict = akara.module_config()
return configDict.get('external_graph_store_url')
def ConfigureTriclops(datasetName,nsBindings,litProps,resProps):
"""
Adapts akara configuration to Triclops configuration
"""
#ontGraph,
#ruleSet,
#definingOntology,
#builtinTemplateGraph,
#defaultDerivedPreds):
datasetConfig = akara.module_config().get(datasetName)
connectStr = 'user=%s,password=%s,db=%s,port=%s,host=%s' % (
datasetConfig.get('mysqluser'),
datasetConfig.get('mysqlpw'),
datasetConfig.get('mysqldb'),
datasetConfig.get('mysqlPort',3306),
datasetConfig.get('mysqlhost')
)
triclopsConf = {
'result_xslt_directory' : akara.module_config().get('result_xslt_directory'),
'store_identifier' : datasetConfig.get('mysqlStoreId'),
'connection' : connectStr,
'store' : datasetConfig.get('type'),
'debugQuery' : akara.module_config().get('debugQuery',False),
'NO_BASE_RESOLUTION' : akara.module_config().get('NO_BASE_RESOLUTION',False),
'IgnoreQueryDataset' : akara.module_config().get('IgnoreQueryDataset',False),
'MYSQL_ORDER' : datasetConfig.get('MYSQL_ORDER',False),
'endpointURL' : akara.module_config().get('endpointURL'),
}
proxy = None# @@TODO: Add support for proxies global_conf.get('sparql_proxy')
nsBindings = dict([ (k,URIRef(v)) for k,v in akara.module_config().get("nsPrefixes",{}).items()])
dataStoreOWL = akara.module_config().get('datastore_owl')
dataStoreOntGraph = Graph()
if not proxy and datasetConfig.get('type') == 'MySQL':
litProps.update(OWL.literalProperties)
litProps.update(RDFS.literalProperties)
resProps.update(RDFS.resourceProperties)
litProps.update(
map(URIRef,akara.module_config().get("sqlLiteralProps",[]))
)
resProps.update(
map(URIRef,akara.module_config().get("sqlResourceProps",[]))
)
if dataStoreOWL:
for dsOwl in dataStoreOWL.split(','):
dataStoreOntGraph.parse(dsOwl)
for litProp,resProp in dataStoreOntGraph.query(OWL_PROPERTIES_QUERY,
initNs={u'owl':OWL_NS}):
if litProp:
litProps.add(litProp)
if resProp:
#Need to account for OWL Full, where datatype properties
#can be IFPs
if (resProp,
RDF.type,
OWL.DatatypeProperty) not in dataStoreOntGraph:
resProps.add(resProp)
else:
triclopsConf['datastore_owl'] = 'N/A'
print "Registered %s owl:DatatypeProperties"%len(litProps)
print "Registered %s owl:ObjectProperties"%len(resProps)
if False:# @@TODO support for SPARQL RIF Core entailment global_conf.get('topDownEntailment',False):
pass
# from FuXi.DLP.DLNormalization import NormalFormReduction
# from FuXi.DLP import DisjunctiveNormalForm
# from FuXi.Horn.HornRules import HornFromDL, HornFromN3, Ruleset
# from FuXi.Syntax.InfixOWL import *
# from FuXi.Horn import DATALOG_SAFETY_STRICT
# from FuXi.Rete.Magic import IdentifyDerivedPredicates
# complementExpanded =[]
# _ruleSet = Ruleset()
# if global_conf.get('SkipComplementExpansion'):
# for kvStr in global_conf.get('SkipComplementExpansion').split('|') :
# pref,uri=kvStr.split(':')
# complementExpanded.append(URIRef(nsBindings[pref]+uri))
#
# definingOntology = global_conf.get('entailment_owl')
# for ont in definingOntology.split(','):
# if os.path.exists(ont):
# ontGraphPath = OsPathToUri(ont)
# else:
# ontGraphPath = ont
# print >>sys.stderr, "Parsing Semantic Web root Graph.. ", ontGraphPath
# for owlImport in ontGraph.parse(ontGraphPath).objects(predicate=OWL_NS.imports):
# ontGraph.parse(owlImport)
# print >>sys.stderr, "Parsed Semantic Web Graph.. ", owlImport
#
# for prefix,uri in nsBindings.items():
# ontGraph.bind(prefix,uri)
#
# builtins = global_conf.get('builtins')
# if global_conf.get('entailment_n3'):
# #setup rules / builtins
# if builtins:
# import imp
# userFuncs = imp.load_source('builtins', builtins)
# rs = HornFromN3(global_conf.get('entailment_n3'),
# additionalBuiltins=userFuncs.ADDITIONAL_FILTERS)
# else:
# rs = HornFromN3(global_conf.get('entailment_n3'))
# print "Parsed %s rules from %s"%(len(rs.formulae),global_conf.get('entailment_n3'))
# _ruleSet.formulae.extend(rs)
#
# #Setup builtin template graph
# builtinTemplates = global_conf.get('builtinTemplates',False)
# if builtinTemplates:
# builtinTemplateGraph.parse(builtinTemplates,format='n3')
# #setup ddl graph
# ddlGraph = global_conf.get('ddlGraph')
# if ddlGraph:
# ddlGraph = Graph().parse(ddlGraph,
# format='n3')
# print "Registering DDL metadata"
# defaultDerivedPreds.extend(
# IdentifyDerivedPredicates(
# ddlGraph,
# ontGraph,
# _ruleSet))
# #Reduce the DL expressions to a normal form
# NormalFormReduction(ontGraph)
# #extract rules form normalized ontology graph
# dlp=HornFromDL(ontGraph,
# derivedPreds=defaultDerivedPreds,
# complSkip=complementExpansion(ontGraph))
# _ruleSet.formulae.extend(dlp)
# #normalize the ruleset
# ruleSet.formulae.extend(set(DisjunctiveNormalForm(_ruleSet,safety=DATALOG_SAFETY_STRICT)))
return triclopsConf
def ReplaceGraph(datasetOrName,
graphUri,
srcStream,
format='xml',
storeName=True,
baseUri=None,
smartDiff=False,
debug=False):
#TODO: do a lazy replace (only the diff - ala 4Suite repository)
store = ConnectToDataset(datasetOrName) if storeName else datasetOrName
g = Graph(store, graphUri)
if smartDiff:
def hasBNodes(triple):
return filter(lambda term:isinstance(term,BNode),triple)
new_graph = Graph().parse(srcStream,publicID=baseUri)
stmsToAdd = [ s for s in new_graph
if s not in g or hasBNodes(s) ]
stmsToDel = [ s for s in g
if s not in new_graph or hasBNodes(s) ]
for s in stmsToDel:
g.remove(s)
for s in stmsToAdd:
g.add(s)
if debug:
print "Removed %s triples and added %s from/to %s"%(
len(stmsToDel),
len(stmsToAdd),
graphUri
)
else:
g.remove((None, None, None))
g.parse(srcStream,publicID=baseUri)
store.commit()
def ClearGraph(datasetOrName,graphUri,storeName=True):
#TODO: do a lazy replace (only the diff - ala 4Suite repository)
store = ConnectToDataset(datasetOrName) if storeName else datasetOrName
g = Graph(store, graphUri)
g.remove((None, None, None))
store.commit()
def DestroyOrCreateDataset(datasetName):
"""
Initialize dataset (if exists) or create it if it doesn't
"""
datasetConfig = akara.module_config().get(datasetName)
assert datasetConfig is not None, datasetName
if datasetConfig['type'] == 'MySQL':
configStr = 'user=%s,password=%s,db=%s,port=%s,host=%s' % (
datasetConfig.get('mysqluser'),
datasetConfig.get('mysqlpw'),
datasetConfig.get('mysqldb'),
datasetConfig.get('mysqlPort',3306),
datasetConfig.get('mysqlhost')
)
store = plugin.get('MySQL', Store)(datasetConfig.get('mysqlStoreId'))
rt = store.open(configStr,create=False)
if rt == NO_STORE:
store.open(configStr,create=True)
else:
store.destroy(configStr)
store.open(configStr,create=True)
return store
else:
raise NotImplementedError("Only dataset supported by Akamu is MySQL")
def ConnectToDataset(datasetName):
"""
Return rdflib store corresponding to the named dataset, whose connection
parameters are specified in the configuration file
"""
datasetConfig = akara.module_config().get(datasetName)
assert datasetConfig is not None
if datasetConfig['type'] == 'MySQL':
configStr = 'user=%s,password=%s,db=%s,port=%s,host=%s' % (
datasetConfig.get('mysqluser'),
datasetConfig.get('mysqlpw'),
datasetConfig.get('mysqldb'),
datasetConfig.get('mysqlPort',3306),
datasetConfig.get('mysqlhost')
)
store = plugin.get('MySQL', Store)(datasetConfig.get('mysqlStoreId'))
store.open(configStr, create=False)
store.literal_properties.update(
map(URIRef,akara.module_config().get("sqlLiteralProps",[]))
)
store.resource_properties.update(
map(URIRef,akara.module_config().get("sqlResourceProps",[]))
)
return store
elif datasetConfig['type'] == 'SPARQLService':
if 'endpoint' not in datasetConfig:
raise SyntaxError('Missing "endpoint" directive')
sparql_store = plugin.get('SPARQL', Store)(datasetConfig.get('endpoint'))
for k,v in datasetConfig.get('extraQueryParams',{}).items():
sparql_store._querytext.append((k,v))
sparql_store.method = POST if datasetConfig.get(
'method','GET').lower() == 'post' else GET
return sparql_store
else:
raise NotImplementedError("Only dataset supported by Akamu is MySQL")
def Ask(queryFile,datasetName,graphUri=None,params=None,debug=False):
"""
Same as Query but where query is ASK (returns boolean)
"""
store = ConnectToDataset(datasetName)
g = ConjunctiveGraph(store) if graphUri is None else Graph(store,graphUri)
qFile = os.path.join(akara.module_config().get("sparqlQueryFiles"),queryFile)
query = open(qFile).read()
query = query if params is None else query % params
if debug:
print query
initNs = dict([ (k,URIRef(v)) for k,v in akara.module_config().get("nsPrefixes",{}).items()])
if debug:
then = time.time()
rt = g.query(query,initNs=initNs,DEBUG=debug).serialize(format='python')
print "Query time", time.time() - then
else:
rt = g.query(query,initNs=initNs,DEBUG=debug).serialize(format='python')
return rt
def Query(queryFile,datasetName,graphUri=None,params=None,debug=False):
"""
Evaluate a query (stored in a SPARQL file in the location indicated in the
configuration) against the given dataset (and optional named graph within it)
using the optional parameters given
"""
store = ConnectToDataset(datasetName)
g = ConjunctiveGraph(store) if graphUri is None else Graph(store,graphUri)
qFile = os.path.join(akara.module_config().get("sparqlQueryFiles"),queryFile)
query = open(qFile).read()
query = query if params is None else query % params
if debug:
print query
initNs = dict([ (k,URIRef(v)) for k,v in akara.module_config().get("nsPrefixes",{}).items()])
for rt in g.query(
query,
initNs=initNs,
DEBUG=debug):
yield rt
def GetParameterizedQuery(queryFile,params=None):
qFile = os.path.join(akara.module_config().get("sparqlQueryFiles"),queryFile)
query = open(qFile).read()
return query if params is None else query % params | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/config/dataset.py | dataset.py |
import inspect
from webob import Request
from akara import request, response, global_config, registry
from akamu.config.dataset import ConnectToDataset
from rdflib.Graph import ConjunctiveGraph
from rdflib import plugin, URIRef, OWL, RDFS, RDF, Namespace
FOAF_NS = Namespace('http://xmlns.com/foaf/0.1/')
WEB_ACL_NS = Namespace('http://www.w3.org/ns/auth/acl#')
WORLD = FOAF_NS.Agent
AKAMU_WEB_ACL_NS = Namespace('https://code.google.com/p/akamu/WebACL#')
EXECUTE_MODE = AKAMU_WEB_ACL_NS.Execute
ACL_WORLD_QUERY=\
"""
PREFIX acl: <http://www.w3.org/ns/auth/acl#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
ASK {
[] acl:accessTo %s;
acl:mode %s;
acl:agentClass foaf:Agent .
}"""
ACL_CHECK_DIRECTLY_QUERY= \
"""
PREFIX acl: <http://www.w3.org/ns/auth/acl#>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
ASK {
[] a ?class;
foaf:name "%s" .
[] acl:accessTo %s;
acl:mode %s;
acl:agentClass ?class .
}"""
ACCESS_MAP = {
"GET" : WEB_ACL_NS.Read,
"PUT" : WEB_ACL_NS.Write,
"POST" : EXECUTE_MODE,#WEB_ACL_NS.Append,
"DELETE" : WEB_ACL_NS.Write,
"PATCH" : WEB_ACL_NS.Write,
}
class web_acl(object):
"""
Decorator of Akara services which is expected to be used with repoze.who middleware
and manages access to the decorated service using a (configured) AkamuGraphStore
(specified by the first argument) comprising assertions with terms from the
WAC vocabulary:
http://www.w3.org/wiki/WebAccessControl/Vocabulary
as well as assertions about users:
[] a .. agent class ..;
foaf:name "ikenna" .
The classes associated with the user via rdf:type statements correspond to
agentClasses used in statements such as:
[] acl:accessTo <http://example.com/service/1>;
acl:agentClass .. agent class .. .
Then a service decorated this way
@simple_service('GET', '<http://example.com/service/1>','service.1','text/plain',wsgi_wrapper=..)
@web_acl('.. akamu graph store ..','<http://example.com/service/1>')
def service_2():
..snip..
(where .. akamu graph store .. is a graph store with the assertions above) will control
access, ensuring that the request has been properly authenticated by repoze.who and
that the WAC assertions indicate the user has access to the service, returning a 403 or 401
otherwise, depending on the circumstance.
RDF statements made using the acl:mode property are currently ignored
"""
def __init__(self,acl_dataset,accessMap = None,simple_service=True,debug=False):
self.simple_service = simple_service
self.cache = {}
self.acl_dataset = acl_dataset
self.accessMap = ACCESS_MAP.copy()
self.debug = debug
if accessMap:
self.accessMap.update(accessMap)
def __call__(self, func):
def innerHandler(*args, **kwds):
req = Request(request.environ)
_path = req.path[1:] if req.path[0] == '/' else req.path
service_uri = URIRef(
registry._current_registry._registered_services[_path].ident
)
user = request.environ.get('REMOTE_USER')
if not user:
if self.simple_service:
response.code = 401
return "Not authorized to access this resource"
else:
environ, start_response = args
start_response("401 Unauthorized",[])
return "Not authorized to access this resource"
else:
if '_' in kwds:
del kwds['_']
allowed = self.cache.get(user)
if allowed is None:
accessMode = self.accessMap.get(req.method)
if accessMode is None:
allowed = False
if debug:
print "HTTP method not mapped, no access granted!"
else:
cg = ConjunctiveGraph(
ConnectToDataset(self.acl_dataset)
)
query = ACL_WORLD_QUERY%(service_uri.n3(),accessMode.n3())
any_user = cg.query(query).serialize(format='python')
if self.debug:
print query, any_user
query = ACL_CHECK_DIRECTLY_QUERY%(
user,
service_uri.n3(),
accessMode.n3()
)
allowed_by_group = cg.query(query).serialize(format='python')
if self.debug:
print query, allowed_by_group
allowed = allowed_by_group or any_user
self.cache[user] = allowed
if allowed:
if self.simple_service:
argInfo = inspect.getargspec(func)
vargs = argInfo.varargs
keywords = argInfo.keywords
if keywords is None and argInfo.defaults:
keywords = argInfo.args[-len(argInfo.defaults):]
vargs = argInfo.args[:-len(argInfo.defaults)]
if vargs and keywords:
return func(*args, **kwds)
elif vargs:
return func(*args)
elif keywords:
return func(**kwds)
else:
return func()
else:
environ, start_response = args
return func(environ, start_response)
else:
if self.simple_service:
response.code = 403
return "The authenticated user is forbidden from accessing this resource"
else:
environ, start_response = args
start_response("403 Forbidden",[])
return "The authenticated user is forbidden from accessing this resource"
return innerHandler | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/security/acl.py | acl.py |
import httplib2, os,cgi,urllib2,datetime
from akara import request, logger, module_config as config
from rdflib import URIRef
from rdflib.Graph import Graph
from rdflib_tools.GraphIsomorphism import IsomorphicTestableGraph
from amara.lib import iri
from amara.lib.util import *
from amara.xslt import transform
from amara.writers.struct import structwriter, E, NS, ROOT, RAW
from akara.services import simple_service, service
from akara import response
from akamu.xslt import xslt_rest, NOOPXML
from akamu.config.dataset import GetParameterizedQuery
from akamu.diglot import layercake_mimetypes, XML_MT, layercake_parse_mimetypes
from cStringIO import StringIO
from urlparse import urlparse
from webob import Request
XHTML_IMT = 'application/xhtml+xml'
HTML_IMT = 'text/html'
XML_IMT = 'application/xml'
SERVICE_ID = 'http://code.google.com/p/akamu/wiki/GraphStoreProtocol'
TEST_NS = 'http://www.w3.org/2009/sparql/docs/tests/data-sparql11/http-rdf-update/tests.html'
@simple_service('GET', SERVICE_ID, 'gsp.validator.form',HTML_IMT+';charset=utf-8')
@xslt_rest(
os.path.join(
config().get('demo_path'),
'gsp_validator.xslt'))
def validator_form(message=None):
if message:
return NOOPXML, { u'message':message }
else:
return NOOPXML
def post_multipart(host, selector, files):
"""
from http://code.activestate.com/recipes/146306/
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
import httplib
content_type, body = encode_multipart_formdata(files)
h = httplib.HTTPSConnection(host)
header = {
'Content-Type' : content_type,
'Content-Length': len(body)
}
h.request('POST', selector, body, header)
res = h.getresponse()
return res.status, res.reason, res.read()
def encode_multipart_formdata(files):
"""
from http://code.activestate.com/recipes/146306/
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY1 = 'ThIs_Is_tHe_outer_bouNdaRY_$'
BOUNDARY2 = 'ThIs_Is_tHe_inner_bouNdaRY_$'
CRLF = '\r\n'
L = []
L.append('--' + BOUNDARY1)
L.append('Content-Disposition: form-data; name="graphs"')
L.append('Content-Type: multipart/mixed; boundary=%s'%BOUNDARY2)
L.append('')
for (filename, mtype, value) in files:
L.append('--' + BOUNDARY2)
L.append('Content-Disposition: file; filename="%s"' % (filename,))
L.append('Content-Type: %s' % mtype)
L.append('')
L.append(value)
L.append('--' + BOUNDARY2)
L.append('--' + BOUNDARY1)
# L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY1
return content_type, body
TESTS = [
"GET of replacement for empty graph",
"PUT - replace empty graph",
"PUT - Initial state",
"GET of PUT - Initial state",
"PUT - graph already in store",
"GET of PUT - graph already in store",
"PUT - default graph",
"GET of PUT - default graph",
"PUT - mismatched payload",
"PUT - empty graph",
"GET of PUT - empty graph",
"DELETE - existing graph",
"GET of DELETE - existing graph",
"DELETE - non-existent graph",
"POST - existing graph",
"GET of POST - existing graph",
"POST - multipart/form-data",
"GET of POST - multipart/form-data",
"POST - create new graph",
"GET of POST - create new graph",
"POST - empty graph to existing graph",
"GET of POST - after noop",
"HEAD on an existing graph",
"HEAD on a non-existing graph",
]
GRAPH1=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<foaf:Person rdf:about="%s">
<foaf:businessCard>
<v:VCard>
<v:fn>%s</v:fn>
</v:VCard>
</foaf:businessCard>
</foaf:Person>
</rdf:RDF>"""
GRAPH2=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<foaf:Person rdf:about="%s">
<foaf:businessCard>
<v:VCard>
<v:given-name>%s</v:given-name>
</v:VCard>
</foaf:businessCard>
</foaf:Person>
</rdf:RDF>"""
GRAPH3=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<rdf:Description rdf:about="%s">
<foaf:name>%s</foaf:name>
</rdf:Description>
</rdf:RDF>"""
GRAPH4=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<foaf:Person rdf:about="%s">
<foaf:name>%s</foaf:name>
<foaf:businessCard>
<v:VCard>
<v:fn>%s</v:fn>
</v:VCard>
</foaf:businessCard>
</foaf:Person>
</rdf:RDF>"""
GRAPH5=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<rdf:Description rdf:about="%s">
<foaf:familyName>%s</foaf:familyName>
</rdf:Description>
</rdf:RDF>
"""
GRAPH6=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<rdf:Description rdf:about="%s">
<foaf:givenName>%s</foaf:givenName>
</rdf:Description>
</rdf:RDF>
"""
GRAPH7=\
u"""<?xml version="1.0"?>
<rdf:RDF
xmlns:rdf ="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:v ="http://www.w3.org/2006/vcard/ns#"
>
<foaf:Person rdf:about="%s">
<foaf:name>%s</foaf:name>
<foaf:givenName>Jane</foaf:givenName>
<foaf:familyName>Doe</foaf:familyName>
<foaf:businessCard>
<v:VCard>
<v:fn>%s</v:fn>
</v:VCard>
</foaf:businessCard>
</foaf:Person>
</rdf:RDF>"""
TEST_GRAPHS = {
"PUT - Initial state" : (GRAPH1,"person/1",'xml',"John Doe"),
"PUT - graph already in store" : (GRAPH1,"person/1",'xml',"Jane Doe"),
"PUT - default graph" : (GRAPH2,"",'xml',"Alice"),
"PUT - mismatched payload" : (GRAPH1,"person/1",'xml',"Jane Doe"),
"PUT - empty graph" : (None,None,None,None),
"PUT - replace empty graph" : (GRAPH2,"",'xml',"Alice"),
"POST - existing graph" : (GRAPH3,"person/1","xml","Jane Doe"),
"HEAD on an existing graph" : (GRAPH1,"person/1",'xml',"John Doe"),
"HEAD on a non-existing graph" : (None,None,None,None),
"GET of POST - existing graph" : (GRAPH4,"person/1","xml",("Jane Doe",)*2),
"POST - create new graph" : (GRAPH2,"",'xml',"Alice"),
"POST - empty graph to existing graph" : (None,None,None,None),
"multipart/form-data graph 1" : (GRAPH5,"person/1",'xml',"Doe"),
"multipart/form-data graph 2" : (GRAPH6,"person/1",'xml',"Jane"),
"GET of POST - multipart/form-data" : (GRAPH7,"person/1",'xml',("Jane Doe",)*2)
}
class GraphStoreValidator(object):
def __init__(self,graph_store_url,gs_url_internal):
self.gs_url_internal = gs_url_internal
if gs_url_internal:
self.gs_url_internal = gs_url_internal\
if gs_url_internal[-1] == '/' else gs_url_internal + '/'
self.graph_store_url = graph_store_url
self.graphs = TEST_GRAPHS.copy()
self.graph_store_url_base = graph_store_url \
if graph_store_url[-1] == '/' else graph_store_url + '/'
for testName,(src,relUrl,format,name) in list(TEST_GRAPHS.items()):
if src is None:
self.graphs[testName] = None
else:
graphIri = URIRef(iri.absolutize(relUrl,self.graph_store_url_base))
if isinstance(name,tuple):
params = (graphIri,)+name
src = src%params
else:
src = src%(graphIri,name)
self.graphs[testName] = IsomorphicTestableGraph().parse(StringIO(src),
format=format)
def yieldResultElem(self,testName,successful,url,message=None):
path_and_query = urlparse(url).path
path_and_query = path_and_query + u'?' + urlparse(url
).query if urlparse(url).query else path_and_query
attrs = {
u'path' : path_and_query,
u'name' : testName,
u'result' : u'passed' if successful else u'failed'
}
assert testName in TESTS, testName
testId = testName.replace(' ','_').replace('-','').replace('/','_').lower()
attrs['id'] = testId
if message:
return E(
(TEST_NS,u'Result'),attrs,message
)
else:
return E(
(TEST_NS,u'Result'),attrs
)
def graphSubmit(
self,
h,
url,
testName,
getTestName=None,
expectedStatus=[201,204],
imt='text/turtle; charset=utf-8',
format='n3',
method='PUT',
responseInfo=None,
getUrl=None):
responseInfo = responseInfo if responseInfo is not None else []
graph = self.graphs[testName]
body = graph.serialize(format=format) if graph is not None else u""
hasPayload = False
headers = {'cache-control' : 'no-cache'}
if method in ['PUT','POST']:
hasPayload = True
headers['content-type'] = imt
headers['content-length'] = str(len(body))
resp, content = h.request(
url,
method,
body=body if hasPayload else None,
headers=headers
)
responseInfo.append((resp,content))
if isinstance(expectedStatus,list):
matchingStatus = resp.status in expectedStatus
else:
matchingStatus = resp.status == expectedStatus
if method == 'HEAD':
if content:
yield self.yieldResultElem(
testName,
False,
url,
u'HEAD response should have no content in the body'
)
elif not matchingStatus:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
expectedStatus,
resp.status,
content
)
)
elif 'content-length' not in resp:
yield self.yieldResultElem(
testName,
False,
url,
u'expected content-length header in response'
)
elif 'content-type' not in resp:
yield self.yieldResultElem(
testName,
False,
url,
u'expected content-type header in response'
)
else:
yield self.yieldResultElem(testName,True,url)
elif not matchingStatus:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
expectedStatus,
resp.status,
content
)
)
else:
yield self.yieldResultElem(testName,True,url)
if getTestName:
_url = getUrl if getUrl else url
for el in self.isomorphCheck(testName,h,_url,getTestName):
yield el
def isomorphCheck(self,testName,h,url,alternativeTestName=None):
try:
resp, content = h.request(url,"GET",headers={
'cache-control': 'no-cache',
'Accept' : 'text/turtle; charset=utf-8'
})
except:
yield self.yieldResultElem(
testName,
False,
url,
u'Unable to perform content negotiated GET'%repr(e)
)
else:
getTestName = alternativeTestName if alternativeTestName else testName
if resp.status == 200:
if 'content-type' not in resp or resp['content-type'].find('text/turtle')+1:
if content is not None and not content.strip():
content = None
g1=IsomorphicTestableGraph().parse(
StringIO(content),
format='n3') if content is not None else None
if g1 != self.graphs[testName]:
print "Unexpected response: "
print content
print resp.status
print resp
print "----"*5
yield self.yieldResultElem(
getTestName,
False,
url,
u'unexpected returned RDF graph'
)
else:
yield self.yieldResultElem(getTestName,True,url)
elif 'content-type' in resp:
yield self.yieldResultElem(
getTestName,
False,
url,
u'expected returned content-type of "text/turtle; charset=utf-8", received %s'%(
resp['content-type']
)
)
else:
yield self.yieldResultElem(
getTestName,
False,
url,
u'expected status %s, received %s (%s)'%(
200,resp.status,content
)
)
def runTests(self):
h = httplib2.Http()
url = iri.absolutize("person/1.ttl",self.graph_store_url_base)
if self.gs_url_internal:
internal_url = iri.absolutize("person/1.ttl",self.gs_url_internal)
indirect_url = self.graph_store_url_base + '?graph=' + urllib2.quote(internal_url)
else:
indirect_url = self.graph_store_url_base + '?graph=' + urllib2.quote(url)
for el in self.graphSubmit(h,url,"PUT - Initial state","GET of PUT - Initial state"):
yield el
h = httplib2.Http()
url = iri.absolutize("person/1.ttl",self.graph_store_url_base)
testName = "HEAD on an existing graph"
for el in self.graphSubmit(h,url,testName,expectedStatus=[200,204],method='HEAD'):
yield el
h = httplib2.Http()
testName = "HEAD on a non-existing graph"
resp, content = h.request(
iri.absolutize("person/4.ttl",
self.graph_store_url_base),
"HEAD"
)
if resp.status == 404:
yield self.yieldResultElem(testName,True,url)
else:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
404,
resp.status,
content
)
)
testName = u"PUT - graph already in store"
for el in self.graphSubmit(
h,
url,
testName,
expectedStatus=[200,204],
getTestName="GET of PUT - graph already in store",
getUrl=indirect_url):
yield el
url = self.graph_store_url_base+'?default'
testName = "PUT - default graph"
for el in self.graphSubmit(
h,
url,
testName,
"GET of PUT - default graph",
expectedStatus=204):
yield el
h = httplib2.Http()
url = iri.absolutize("person/1.ttl",self.graph_store_url_base)
testName = "PUT - mismatched payload"
for el in self.graphSubmit(h,url,testName,expectedStatus=400,imt='application/rdf+xml'):
yield el
h = httplib2.Http()
url = iri.absolutize("person/2.ttl",self.graph_store_url_base)
if self.gs_url_internal:
internal_url = iri.absolutize("person/2.ttl",self.gs_url_internal)
indirect_url = self.graph_store_url_base + '?graph=' + urllib2.quote(internal_url)
else:
indirect_url = self.graph_store_url_base + '?graph=' + urllib2.quote(url)
for el in self.graphSubmit(
h,
indirect_url,
"PUT - empty graph",
"GET of PUT - empty graph",
getUrl=url):
yield el
h = httplib2.Http()
url = iri.absolutize("person/2.ttl",self.graph_store_url_base)
for el in self.graphSubmit(
h,
url,
"PUT - replace empty graph",
"GET of replacement for empty graph",
expectedStatus=[201,204]):
yield el
testName = "DELETE - existing graph"
resp, content = h.request(url,"DELETE")
if resp.status not in [200,204]:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
200,
resp.status,
content
)
)
else:
yield self.yieldResultElem(testName,True,url)
testName = "GET of DELETE - existing graph"
resp, content = h.request(url,"GET",headers={
'cache-control': 'no-cache',
})
if resp.status == 404:
yield self.yieldResultElem(testName,True,url)
else:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
404,
resp.status,
content
)
)
testName = "DELETE - non-existent graph"
resp, content = h.request(url,"DELETE")
if resp.status == 404:
yield self.yieldResultElem(testName,True,url)
else:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
404,
resp.status,
content
)
)
h = httplib2.Http()
url = iri.absolutize("person/1.ttl",self.graph_store_url_base)
testName = "POST - existing graph"
for el in self.graphSubmit(
h,
url,
testName,
expectedStatus=[200,204],
method='POST'):
yield el
for el in self.isomorphCheck(
"GET of POST - existing graph",
h,
url,
alternativeTestName="GET of POST - existing graph"):
yield el
try:
from poster.encode import multipart_encode, MultipartParam
from poster.streaminghttp import register_openers
h = httplib2.Http()
testName = "POST - multipart/form-data"
register_openers()
datagen, headers = multipart_encode(
[
MultipartParam(
"lastName.ttl",
self.graphs["multipart/form-data graph 1"].serialize(
format='turtle'
),
filename="lastName.ttl",
filetype='text/turtle; charset=utf-8'
),
MultipartParam(
"firstName.ttl",
self.graphs["multipart/form-data graph 2"].serialize(
format='turtle'
),
filename="firstName.ttl",
filetype='text/turtle; charset=utf-8'
)
]
)
req = urllib2.Request(url, datagen, headers)
resp = urllib2.urlopen(req)
if resp.code not in [200,204]:
yield self.yieldResultElem(
testName,
False,
url,
u'expected status %s, received %s (%s)'%(
[200,204],
resp.code,
resp.read()
)
)
else:
yield self.yieldResultElem(testName,True,url)
for el in self.isomorphCheck(
"GET of POST - multipart/form-data",
h,
url,
"GET of POST - multipart/form-data"):
yield el
except urllib2.HTTPError, e:
yield self.yieldResultElem(
testName,
False,
url,
u'Invalid Server response: %s'%repr(e)
)
except ImportError:
pass
h = httplib2.Http()
url = self.graph_store_url_base
testName = "POST - create new graph"
responseInfo = []
for el in self.graphSubmit(
h,
url,
testName,
expectedStatus=201,
method='POST',
responseInfo=responseInfo):
yield el
if responseInfo:
resp,content = responseInfo[0]
if 'location' in resp:
yield self.yieldResultElem(testName,True,url)
url = resp['location']
for el in self.isomorphCheck(
"POST - create new graph",
h,
url,
alternativeTestName="GET of POST - create new graph"):
yield el
h = httplib2.Http()
for el in self.graphSubmit(
h,
url,
"POST - empty graph to existing graph",
expectedStatus=[200,204],
method='POST'):
yield el
for el in self.isomorphCheck(
"POST - create new graph",
h,
url,
alternativeTestName="GET of POST - after noop"):
yield el
else:
yield self.yieldResultElem(
testName,
False,
url,
u'POST to graph store should return Location header: %s'%repr(resp)
)
class WsgiGSPValidator(object):
def __init__(self,app): pass
def __call__(self, environ, start_response):
req = Request(environ)
if req.method == 'POST':
if req.content_type == 'application/x-www-form-urlencoded':
implementation_url = req.POST.get('doap_project_url')
validator_url = req.POST.get('gs_url')
project_name = req.POST.get('doap_project_name')
gsInternalUrl = req.POST.get('gs_url_internal')
else:
form = cgi.FieldStorage(
fp=StringIO(req.body),
environ=request.environ
)
implementation_url = form.getvalue("doap_project_url")
validator_url = form.getvalue("gs_url")
project_name = form.getvalue('doap_project_name')
gsInternalUrl = form.getvalue('gs_url_internal')
elif req.method == 'GET':
implementation_url = req.params.get('doap_project_url')
validator_url = req.params.get("gs_url")
project_name = req.params.get('doap_project_name')
gsInternalUrl = req.params.get('gs_url_internal')
else:
rt = "Validation HTTP methods supported are POST and GET: received %s"%(
req.method
)
start_response("405 Method Not Allowed",
[("Content-Length", len(rt))])
return rt
if validator_url and implementation_url:
validator = GraphStoreValidator(validator_url,gsInternalUrl)
src = StringIO()
w = structwriter(indent=u"yes", stream=src)
w.feed(
ROOT(
E(
(TEST_NS,u'Results'),
(elem for elem in validator.runTests())
)
)
)
requestedRDF = set(layercake_mimetypes).intersection(req.accept)
if 'HTTP_ACCEPT' not in environ or not requestedRDF:
rt = transform(
src.getvalue(),
os.path.join(
config().get('demo_path'),
'gsp_validation_results.xslt'),
params={
u'project' : project_name,
u'url' : implementation_url})
start_response("200 Ok",
[("Content-Type" , HTML_IMT+';charset=utf-8'),
("Content-Length", len(rt))])
return rt
elif requestedRDF:
preferredMT = req.accept.best_match(layercake_mimetypes)
format = layercake_mimetypes[preferredMT]
rt = transform(
src.getvalue(),
os.path.join(
config().get('demo_path'),
'gsp_validation_results_earl.xslt'),
params={
u'project' : project_name,
u'date' : datetime.date.today().isoformat(),
u'url' : implementation_url})
g=Graph().parse(StringIO(rt),format='xml')
g.bind('validator',URIRef('http://metacognition.info/gsp_validation/'))
g.bind('gsp',URIRef('http://www.w3.org/2009/sparql/docs/tests/data-sparql11/http-rdf-update/manifest#'))
g.bind('test',
URIRef(
'http://www.w3.org/2009/sparql/docs/tests/data-sparql11/http-rdf-update/#')
)
rt=g.serialize(format=format)
start_response("200 Ok",
[("Content-Type" , preferredMT),
("Content-Length", len(rt))])
return rt
else:
msg="Bad request"
start_response("303 See Other",
[("Location","gsp.validator.form?message=Please+provide+a+Graph+Store+URL+to+validate+and+an+implementation+url"),
("Content-Length", len(msg))])
return msg
@service(SERVICE_ID, 'gsp.validator.run',wsgi_wrapper=WsgiGSPValidator)
def validation():pass | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/demo/gsp_validator.py | gsp_validator.py |
import json, os, cgi, urllib, re
from pprint import pprint
from amara.writers.struct import structwriter, E, ROOT
from cStringIO import StringIO
from akara import module_config as config, request
from akara.services import service, simple_service
from akamu.config.dataset import Query
from akamu.xslt import xslt_rest, NOOPXML
SERVICE_ID_PREFIX = u'http://example.com/service/'
HTML_IMT = 'text/html'
JSON_IMT = 'application/json'
@simple_service(
'GET' ,
SERVICE_ID_PREFIX + 'music_brainz_browse_artist',
'browse.artist',
HTML_IMT)
@xslt_rest(
os.path.join(
config().get('working_directory'),
'test/musicbrainz/music_brainz_browse_artist.xslt'),
)
def browse_artist(artist_uri):
names = set()
tracks = set()
for artistName, track, trackLabel in Query(
'musicbrainz-artist-info.rq',
'musicbrainz',
params=(artist_uri,artist_uri)):
names.add(artistName)
tracks.add(trackLabel)
src = StringIO()
w = structwriter(indent=u"yes", stream=src)
w.feed(ROOT(
E(u'Music',{u'artist': ' / '.join(names)},
E(u'Tracks',
(
E(u'Track',{u'name':trackLabel}) for trackLabel in tracks
)
),
)
))
return src.getvalue()
@simple_service(
'GET' ,
SERVICE_ID_PREFIX + 'music_brainz_form',
'search.form',
HTML_IMT)
@xslt_rest(
os.path.join(
config().get('working_directory'),
'test/musicbrainz/music_brainz_search.xslt'),
source=NOOPXML
)
def search_form(): pass
@simple_service('GET', SERVICE_ID_PREFIX + 'categories', 'categories',JSON_IMT)
def categories(term=None):
return json.dumps([
{ 'label' : label,
'value' : label }
for tag,label in Query('musicbrainz-categories.rq',
'musicbrainz',
params=(term,))
])
@simple_service(
'POST' ,
SERVICE_ID_PREFIX + 'music_brainz_search',
'search',
HTML_IMT)
@xslt_rest(
os.path.join(
config().get('working_directory'),
'test/musicbrainz/music_brainz_search.xslt'),
)
def search(body,ctype):
form = cgi.FieldStorage(
fp=StringIO(body),
environ=request.environ
)
category = form.getvalue('category')
artist = form.getvalue('artistSearch')
pattern = re.compile(artist)
src = StringIO()
w = structwriter(indent=u"yes", stream=src)
if category is not None and category.strip():
queryFile = 'musicbrainz-artists.rq'
params = (category,)
else:
queryFile = 'musicbrainz-artists-any-category.rq'
params = None
w.feed(ROOT(
E(u'Root',
E(u'Artists',
(
E(u'Artist',
{u'name' : name,
u'url' : urllib.quote(artist_uri)})
for artist_uri,
name in Query(queryFile,'musicbrainz',params=params)
if pattern.match(name)
)
)
)
))
return src.getvalue() | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/demo/musicbrainz.py | musicbrainz.py |
__author__ = 'chimezieogbuji'
import cgi, amara
from akamu.xslt import xslt_rest
from akara.services import simple_service
from cStringIO import StringIO
from amara.writers.struct import structwriter, E, NS, ROOT
from amara.lib import U
from akara import request
from akamu.diglot import Manager, Resource
from akamu.xslt import TransformWithAkamuExtensions
from akamu.config.dataset import DestroyOrCreateDataset
from akamu.config.diglot import GetDiglotManager
SERVICE_ID = 'http://example.com/xslt_rest'
def MakeFoafGraph(name='Chimezie Ogbuji'):
src = StringIO()
w = structwriter(indent=u"yes", stream=src)
w.feed(
ROOT(
E(u'Patient',{ u'name' : U(name),
u'gender' : u'Male'}
)
)
)
return src.getvalue()
@simple_service('GET', SERVICE_ID, 'xslt_rest_get','application/rdf+xml')
@xslt_rest('test/foaf.xslt',source=MakeFoafGraph,srcIsFn=True)
def rest_service(name='Chimezie Ogbuji'): pass
@simple_service('POST', SERVICE_ID, 'xslt_rest_post','application/rdf+xml')
@xslt_rest('test/foaf.xslt',source=MakeFoafGraph,srcIsFn=True)
def rest_service_post(body, ctype): pass
@simple_service('GET', SERVICE_ID, 'diglot_extensions_basic_test')
def test_diglot_extensions_basic(rootPath):
def TestGraphUriFn(path,fName):
return 'http://example.com%s'%path.split('.')[0]
DestroyOrCreateDataset('mysqlDataset')
mgr = GetDiglotManager(TestGraphUriFn)
rt = TransformWithAkamuExtensions(
'<Root/>',
open('test/diglot_extension_test1.xslt').read(),
mgr)
DestroyOrCreateDataset('mysqlDataset')
doc = amara.parse(rt)
assert doc.xml_select(
'/Answer/sparql:sparql/sparql:boolean[text() = "true"]',
prefixes={u'sparql' : u'http://www.w3.org/2005/sparql-results#'}
)
assert doc.xml_select('/Answer/Patient[@name = "Uche Ogbuji"]')
assert doc.xml_select('/Answer/AfterChange/Patient[@name = "Chimezie Ogbuji"]')
assert doc.xml_select('/Answer/AfterChange/FoundPatientViaExtensionFunction')
return "Success" | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/demo/xslt.py | xslt.py |
import sys
from StringIO import StringIO
from webob import Request
from akamu.security.acl import web_acl, WEB_ACL_NS
from akamu.security.repoze import RepozeWrapper
from akara.services import service, simple_service
from rdflib import plugin, URIRef, OWL, RDFS, RDF, Namespace
ACCESS_NS = Namespace('http://example.com/access_classes/')
SERVICE_ID_PREFIX = 'http://example.com/service/'
def CreateRepozeMiddleWare(app):
from repoze.who.plugins.htpasswd import HTPasswdPlugin
from repoze.who.plugins.basicauth import BasicAuthPlugin
from repoze.who.plugins.auth_tkt import AuthTktCookiePlugin
from repoze.who.plugins.redirector import RedirectorPlugin
from repoze.who.interfaces import IChallenger
from repoze.who.middleware import PluggableAuthenticationMiddleware
io = StringIO()
io.write('admin:admin')
io.seek(0)
def cleartext_check(password, hashed):
return password == hashed
htpasswd = HTPasswdPlugin(io, cleartext_check)
basicauth = BasicAuthPlugin('repoze.who')
redirector = RedirectorPlugin('/login')
redirector.classifications = {IChallenger:['browser'],} # only for browser
identifiers = [('basicauth', basicauth)]
authenticators = [('htpasswd', htpasswd)]
challengers = [('redirector', redirector),
('basicauth', basicauth)]
mdproviders = []
from repoze.who.classifiers import default_request_classifier
from repoze.who.classifiers import default_challenge_decider
import logging
log_stream = sys.stdout
return PluggableAuthenticationMiddleware(
app,
identifiers,
authenticators,
challengers,
mdproviders,
default_request_classifier,
default_challenge_decider,
log_stream = log_stream,
log_level = logging.DEBUG
)
def service_stub(environ, start_response, supported_methods):
req = Request(environ)
if req.method not in supported_methods:
start_response("405 Method Not Allowed", [])
return "Method not allowed for this resource"
else:
rt = 'Success'
start_response("200 Ok",
[("Content-Type", 'text/plain'),
("Content-Length", len(rt))]
)
return rt
@service(SERVICE_ID_PREFIX+'1','service.1',wsgi_wrapper=CreateRepozeMiddleWare)
@web_acl('mysqlDataset',simple_service=False)
def service_1(environ, start_response):
return service_stub(
environ,
start_response,
['POST','GET']
)
@service(SERVICE_ID_PREFIX + '2', 'service.2',wsgi_wrapper=CreateRepozeMiddleWare)
@web_acl('mysqlDataset',
accessMap = { "POST" : WEB_ACL_NS.Append },
simple_service=False)
def service_2(environ, start_response):
return service_stub(
environ,
start_response,
['POST','GET','PUT']
)
@simple_service(
'GET' ,
SERVICE_ID_PREFIX + '3',
'service.3',
'text/plain',
wsgi_wrapper=CreateRepozeMiddleWare)
@web_acl('mysqlDataset')
def service_3(): return "Success" | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/demo/web_acl.py | web_acl.py |
__author__ = 'chimezieogbuji'
from cStringIO import StringIO
try:
from Ft.Xml.Xslt import XsltElement, ContentInfo, AttributeInfo
from Ft.Xml.XPath import Conversions
from Ft.Xml.Domlette import NonvalidatingReader
from Ft.Xml.Xslt import OutputParameters
from Ft.Xml.Xslt.CopyOfElement import CopyNode
except ImportError:
import warnings;warnings.warn("unable to import 4Suite, extensions not supported")
from akamu.config.dataset import ConnectToDataset
from rdflib.Graph import Graph, ConjunctiveGraph
from rdflib import plugin, URIRef
NS=u'tag:metacognitionllc.com,2012:AkamuXsltExtensions'
class SPARQLQueryElement(XsltElement):
"""
This extension evaluates a SPARQL query either against the Akamu RDF data
set(s) identified by name either within the targetGraph or across the entire
dataset. The schema is a path within the Akamu Diglot FS to an OWL or RDFS
documentation of the vocabulary for use in optimizing queries over
MySQL/layercake-python datasets
The body of this element is a template; when the template is instantiated,
it is processed as a SPARQL query string evaluated using the given
extension attributes
"""
content = ContentInfo.Template
legalAttrs = {
'targetGraph' : AttributeInfo.StringAvt(default=''),
'datasetName' : AttributeInfo.StringAvt(default=''),
'schema' : AttributeInfo.StringAvt(default='')
}
def instantiate(self, context, processor):
try:
# Set the output to an RTF writer, which will create an RTF for
# us.
processor.pushResultTree(self.baseUri)
# The template is manifested as children of the extension element
# node. Instantiate each in turn.
for child in self.children:
child.instantiate(context, processor)
# You want to be sure you re-balance the stack even in case of
# error.
finally:
# Retrieve the resulting RTF.
result_rtf = processor.popResult()
query=result_rtf.childNodes[0].nodeValue
context.setProcessState(self)
context.processorNss = self.namespaces
targetGraph = Conversions.StringValue(self._targetGraph.evaluate(context))
datasetName = Conversions.StringValue(self._datasetName.evaluate(context))
schema = self._schema.evaluate(context)
store = ConnectToDataset(datasetName)
dataset = Graph(store,URIRef(targetGraph)) if targetGraph else ConjunctiveGraph(store)
print "SPARQL: \n%s" % query
if schema:
pass
# ontGraph=Graph().parse(StringIO(repo.fetchResource(schema).getContent()))
# for litProp,resProp in ontGraph.query(OWL_PROPERTIES_QUERY,
# initNs={u'owl':OWL_NS}):
# if litProp:
# print "\t Registering literal property ", litProp
# dataset.store.literal_properties.add(litProp)
# if resProp:
# print "\t Registering resource property ", resProp
# dataset.store.resource_properties.add(resProp)
try:
rt = dataset.query(
query.encode('utf-8'),
initNs=self.namespaces).serialize(format='xml')
doc = NonvalidatingReader.parseString(
rt.encode('utf-8'),
'tag:[email protected]:2007:meaninglessURI')
# doc = _dispatchSPARQLQuery(connection,
# storeKind,
# storeIdentifier,
# dataset,
# targetGraph,
# query,
# self.namespaces)
except:
#Error
import traceback
st = StringIO()
traceback.print_exc(file=st)
processor.xslMessage(st.getvalue())
raise
processor.writers[-1].copyNodes(doc.documentElement)
class CreateResourceElement(XsltElement):
content = ContentInfo.Template
legalAttrs = {
'path' : AttributeInfo.StringAvt(default=''),
'literal-content' : AttributeInfo.YesNo(
default='no',
description=('If yes, treat the content literally, i.e. do not'
' execute any XSLT instructions or extensions')),
'method' : AttributeInfo.QNameAvt(),
'version' : AttributeInfo.NMTokenAvt(),
'encoding' : AttributeInfo.StringAvt(),
'omit-xml-declaration' : AttributeInfo.YesNoAvt(),
'standalone' : AttributeInfo.YesNoAvt(),
'doctype-public' : AttributeInfo.StringAvt(),
'doctype-system' : AttributeInfo.StringAvt(),
'cdata-section-elements' : AttributeInfo.QNamesAvt(),
'indent' : AttributeInfo.YesNoAvt(),
'media-type' : AttributeInfo.StringAvt(),
}
doesSetup = True
def setup(self):
self._output_parameters = OutputParameters.OutputParameters()
return
def instantiate(self, context, processor):
context.setProcessState(self)
stream = StringIO()
self._output_parameters.avtParse(self, context)
processor.addHandler(self._output_parameters, stream)
literal_content = self._literal_content
if literal_content:
for child in self.children:
CopyNode(processor, child)
else:
self.processChildren(context, processor)
processor.removeHandler()
text = stream.getvalue()
path = Conversions.StringValue(self._path.evaluate(context))
processor.manager.createResource(path,text.encode('utf-8'))
class GetResourceContentElement(XsltElement):
content = ContentInfo.Empty
legalAttrs = {
'path' : AttributeInfo.StringAvt(default='')
}
def instantiate(self, context, processor):
context.setProcessState(self)
path = Conversions.StringValue(self._path.evaluate(context))
content = processor.manager.getResource(path).getContent()
doc = NonvalidatingReader.parseString(content,NS)
processor.writers[-1].copyNodes(doc.documentElement)
def GetResourceFunction(context,path):
content = context.processor.manager.getResource(path).getContent()
doc = NonvalidatingReader.parseString(content,NS)
return [ doc.documentElement ]
class UpdateResourceElement(XsltElement):
legalAttrs = {
'path' : AttributeInfo.StringAvt(default=''),
'literal-content' : AttributeInfo.YesNo(
default='no',
description=('If yes, treat the content literally, i.e. do not'
' execute any XSLT instructions or extensions')),
'method' : AttributeInfo.QNameAvt(),
'version' : AttributeInfo.NMTokenAvt(),
'encoding' : AttributeInfo.StringAvt(),
'omit-xml-declaration' : AttributeInfo.YesNoAvt(),
'standalone' : AttributeInfo.YesNoAvt(),
'doctype-public' : AttributeInfo.StringAvt(),
'doctype-system' : AttributeInfo.StringAvt(),
'cdata-section-elements' : AttributeInfo.QNamesAvt(),
'indent' : AttributeInfo.YesNoAvt(),
'media-type' : AttributeInfo.StringAvt(),
}
doesSetup = True
def setup(self):
self._output_parameters = OutputParameters.OutputParameters()
return
def instantiate(self, context, processor):
context.setProcessState(self)
stream = StringIO()
self._output_parameters.avtParse(self, context)
processor.addHandler(self._output_parameters, stream)
literal_content = self._literal_content
if literal_content:
for child in self.children:
CopyNode(processor, child)
else:
self.processChildren(context, processor)
processor.removeHandler()
text = stream.getvalue()
path = Conversions.StringValue(self._path.evaluate(context))
res = processor.manager.getResource(path)
res.update(text)
ExtFunctions = {
# (NS, 'sparql-query') : SPARQLQueryFunction,
(NS, 'get-resource') : GetResourceFunction
}
ExtElements = {
(NS, 'sparql-query') : SPARQLQueryElement,
(NS, 'create-resource') : CreateResourceElement,
(NS, 'get-resource') : GetResourceContentElement,
(NS, 'update-resource') : UpdateResourceElement
} | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/xslt/extensions.py | extensions.py |
__author__ = 'chimezieogbuji'
import cgi, inspect, re, hashlib
from cStringIO import StringIO as c_string_io
from StringIO import StringIO as regular_string_io
from amara.xslt import transform
from akara import request, response, global_config
from amara.xslt.processor import processor as amara_processor
from amara.lib import inputsource
# from akamu.xslt.extensions import NS
FIREFOX_PATTERN = re.compile(r'15\..+')
IE_9_PATTERN = re.compile(r'9.0.+')
IE_8_PATTERN = re.compile(r'8.0.+')
OPERA_PATTERN = re.compile(r'11.62.+')
SAFARI_PATTERN = re.compile(r'5.1.3')
CHROME_PATTERN = re.compile(r'20\..*')
#See: http://greenbytes.de/tech/tc/xslt/
CLIENT_SIDE_BROWSERS = [
('Firefox',FIREFOX_PATTERN),
('Microsoft Internet Explorer', IE_9_PATTERN),
('Microsoft Internet Explorer',IE_8_PATTERN),
('Opera',OPERA_PATTERN),
('Safari',SAFARI_PATTERN),
('Chrome',CHROME_PATTERN),
]
def ClientSideXSLTCapable(environ):
import httpagentparser
agentInfo = httpagentparser.detect(environ.get('HTTP_USER_AGENT', ''))
browserInfo = agentInfo['browser']
supported = filter(lambda (name,versionPattn):
browserInfo['name'] == name and
versionPattn.match(browserInfo['version']),
CLIENT_SIDE_BROWSERS)
return bool(supported)
def TransformWithAkamuExtensions(src,xform,manager,params=None,baseUri=None):
import warnings;warnings.warn("Akamu XSLT extensions are not fully supported!")
baseUri = baseUri if baseUri else NS
from amara.xslt.result import streamresult
from amara.xpath.util import parameterize
params = parameterize(params) if params else {}
processor = amara_processor(ignore_pis=True)#Processor.Processor()
processor.manager = manager
processor.registerExtensionModules(['akamu.xslt.extensions'])
result = streamresult(StringIO())
source = inputsource(src,baseUri)#InputSource.DefaultFactory.fromString(src,baseUri)
isrc = inputsource(xform,baseUri)#InputSource.DefaultFactory.fromString(xform,baseUri)
processor.append_transform(isrc)
proc.run(source, params, result)
# processor.run(
# source,
# result=result,
# ignorePis=True,
# topLevelParams=params
# )
return result.getvalue()
NOOPXML = u'<Root/>'
class xslt_rest(object):
"""
Decorator of Akara services that will cause all invokations to
route HTTP (query or form) parameters into the transform as
xslt parameters. The source of the transform (a string) is given by applying
a user-specified function against the parameters and
the result of the transformation of this (using a user-specified
transform) is used as the result of the service
"""
def __init__(self,
transform,
source = None,
argRemap = None,
parameters = None,
clientSide = False,
srcIsFn = False,
etag_result = False,
kwdArgumentsToDel = None):
self.etag_result = etag_result
self.clientSide = clientSide
self.argRemap = argRemap if argRemap else {}
self.transform = transform
self.params = parameters if parameters else {}
self.source = source if source else None
self.srcIsFn = srcIsFn
self.kwdArgumentsToDel = kwdArgumentsToDel if kwdArgumentsToDel else ['_']
def setEtagToResultTreeHash(self, src):
#If user specifies, set hash of result as ETag of response
if 'wheezy.http.cache_policy' in request.environ:
#Resuse any policy set by decorated akara service
policy = request.environ['wheezy.http.cache_policy']
else:
#Set the policy so wheezy.http caching middle ware can use it
policy = request.environ['wheezy.http.cache_profile'].cache_policy()
request.environ['wheezy.http.cache_policy'] = policy
policy.etag(hashlib.sha1(src).hexdigest())
def __call__(self, func):
def innerHandler(*args, **kwds):
argNames = inspect.getargspec(func).args
parameters = {}
parameters.update(self.params)
isaPost = len(args) == 2 and list(argNames) == ['body','ctype']
if isaPost:
#Parameters in POST body
fields = cgi.FieldStorage(
fp=regular_string_io(args[0]),
environ=request.environ
)
for k in fields:
val = fields.getvalue(k)
if k in self.argRemap:
parameters[self.argRemap[k]] = val
else:
parameters[k] = val
else:
#parameters to service method are GET query arguments
for idx,argName in enumerate(argNames):
if argName in self.argRemap:
parameters[self.argRemap[argName]] = args[idx] if len(args) > idx + 1 else kwds[argName]
elif len(args) > idx + 1 or argName in kwds:
parameters[argName] = args[idx] if len(args) > idx + 1 else kwds[argName]
for k,v in kwds.items():
if k in self.argRemap:
parameters[self.argRemap[k]] = v
else:
parameters[k] = v
#Route non-keyword and keyword arguments and their values to
#XSLT parameters
argInfo = inspect.getargspec(func)
vargs = argInfo.varargs
keywords = argInfo.keywords
#If the source is a function, then the parameters
#are given to it, otherwise, to the decorated service function
srcFn = self.source if self.srcIsFn else func
if keywords is None and argInfo.defaults:
keywords = argInfo.args[-len(argInfo.defaults):]
vargs = argInfo.args[:-len(argInfo.defaults)]
for _arg in self.kwdArgumentsToDel:
if _arg in kwds:
del kwds[_arg]
if isaPost:
src = srcFn(*args) if (
self.source is None or self.srcIsFn
) else self.source
elif vargs and keywords:
src = srcFn(*args, **kwds) if (
self.source is None or self.srcIsFn
) else self.source
elif vargs:
src = srcFn(*args) if (
self.source is None or self.srcIsFn
) else self.source
elif keywords:
src = srcFn(**kwds) if (
self.source is None or self.srcIsFn
) else self.source
elif argInfo.args and parameters:
src = srcFn(*map(lambda i:parameters[i],
argInfo.args)) if (
self.source is None or self.srcIsFn
) else self.source
else:
src = srcFn() if (
self.source is None or self.srcIsFn
) else self.source
isInfoResource = (isinstance(response.code,int) and
response.code == 200
) or (isinstance(response.code,basestring) and
response.code.lower()) == '200 ok'
if not isInfoResource:
#If response is not a 200 then we just return it (since we can't
# be invoking an XSLT HTTP operation)
if self.etag_result:
self.setEtagToResultTreeHash(src)
return src
else:
if isinstance(src,tuple) and len(src)==2:
src,newParams = src
parameters.update(newParams)
authenticatedUser = request.environ.get('REMOTE_USER')
if authenticatedUser:
parameters[u'user'] = authenticatedUser
elif u'user' in parameters:
del parameters[u'user']
if self.clientSide:
from amara.bindery import parse
doc = parse(src)
pi = doc.xml_processing_instruction_factory(
u"xml-stylesheet",
u'href="%s" type="text/xsl"'%self.transform)
doc.xml_insert(0,pi)
if self.etag_result:
self.setEtagToResultTreeHash(src)
return doc
else:
rtStream = regular_string_io()
#Unicde must be given as inefficient stream
#See http://docs.python.org/2/library/stringio.html#cStringIO.StringIO
#and limitation on akara transform function
# print src, type(src), self.transform
# src = regular_string_io(src) if isinstance(src,unicode) else c_string_io(src)
src = src.encode('utf-8') if isinstance(src,unicode) else src
transform(src,self.transform,params=parameters,output=rtStream)
rt = rtStream.getvalue()
if self.etag_result:
self.setEtagToResultTreeHash(rt)
return rt
return innerHandler | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/xslt/__init__.py | __init__.py |
import os
from cStringIO import StringIO
from akamu.config.dataset import ConnectToDataset, ReplaceGraph, ClearGraph
from amara.xslt import transform
from rdflib.store import Store
from rdflib.Graph import Graph
from rdflib import URIRef
def GetFNameFromPath(path):
return path.split('/')[-1]
layercake_mimetypes = {
'application/rdf+xml' : 'xml',
'text/n3' : 'n3',
'text/turtle' : 'turtle',
#'text/plain' : 'nt'
}
layercake_parse_mimetypes = {
'application/rdf+xml' : 'xml',
'text/n3' : 'n3',
'text/turtle' : 'n3',
#'text/plain' : 'nt'
}
XML_MT = 'application/xml'
class Manager(object):
def __init__(self,root,datasetName=None,graphUriFn=None,transforms4Dir=None):
self.root = root
self.datasetName = datasetName
self.graphUriFn = graphUriFn
self.transforms4Dir = transforms4Dir
def hasResource(self,path):
return os.path.exists(
os.path.join(
self.root,
path[1:] if path[0] == '/' else path
)
)
def getFullPath(self,path):
return os.path.join(self.root,path[1:] if path[0] == '/' else path)
def findTransform(self,path):
parentDir = '/'.join(path.split('/')[:-1]) if path.find('/')+1 else '/'
rootXform = self.transforms4Dir.get('/')
xform = self.transforms4Dir.get(parentDir)
if xform:
return xform
elif len(path.split('/'))>1:
return self.findTransform(parentDir)
elif rootXform:
return rootXform
def synch(self,path=None,parameters=None):
parameters = parameters if parameters else {}
if path:
graphUri = URIRef(self.graphUriFn(path,GetFNameFromPath(path)))
params = {
u'path' : path,
u'graph-uri': graphUri
}
params.update(parameters)
xFormPath = self.findTransform(path)
rt = transform(
self.getResource(path).getContent(),
self.getResource(xFormPath).getContent(),
params=params
)
ReplaceGraph(
self.datasetName,
graphUri,
StringIO(rt),
format='xml',
storeName=not isinstance(self.datasetName,Store),
baseUri=graphUri
)
else:
raise NotImplementedError("[..]")
def getResource(self,path):
return Resource(self,path)
def deleteResource(self,path):
self.getResource(path).delete()
def createResource(self,path,content,parameters=None):
parameters = parameters if parameters else {}
res = Resource(self,path)
res.update(content,parameters)
return res
class Resource(object):
def __init__(self,manager,path):
self.manager = manager
self.path = path
def delete(self):
os.remove(self.manager.getFullPath(self.path))
graphUri = URIRef(
self.manager.graphUriFn(
self.path,GetFNameFromPath(self.path))
)
ClearGraph(
self.manager.datasetName,
graphUri,
storeName=not isinstance(self.manager.datasetName,Store))
def update(self,content,parameters=None):
parameters = parameters if parameters else {}
f=open(self.manager.getFullPath(self.path),'w')
f.write(content)
f.close()
self.manager.synch(self.path,parameters)
def getContent(self,mediaType=None):
if mediaType is None or mediaType == XML_MT:
return open(self.manager.getFullPath(self.path)).read()
else:
store = ConnectToDataset(self.manager.datasetName)
graphUri = URIRef(self.manager.graphUriFn(self.path,GetFNameFromPath(path)))
graph = Graph(store,identifier=graphUri)
return graph.serialize(format=layercake_mimetypes[mediaType]) | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/diglot/__init__.py | __init__.py |
__author__ = 'chimezieogbuji'
import os, imp, random, string, datetime, hashlib
from cStringIO import StringIO
from akamu.config.diglot import GetDiglotManager
from akamu.diglot import layercake_mimetypes, XML_MT, GetFNameFromPath
from wsgiref.util import shift_path_info, request_uri
from akara import request
from amara.lib import U, inputsource
from webob import Request
from amara.xslt import transform
from amara.bindery import parse
from amara.xupdate import reader, XUpdateError, apply_xupdate
from rdflib import URIRef
from rdflib.Graph import Graph
from akamu.config.dataset import ConnectToDataset
from akamu.wheezy import WheezyCachingAdapterSetup
GRDDL_NS = u'http://www.w3.org/2003/g/data-view#'
XUPDATE_NS = u'http://www.xmldb.org/xupdate'
XSLT_MT = u'application/xslt+xml'
def updatePolicy4File(filePath,policy):
mtime_dt = datetime.datetime.fromtimestamp(os.path.getmtime(filePath))
policy.last_modified(mtime_dt)
policy.etag(hashlib.sha1(open(filePath).read()).hexdigest())
def HandleGET(req,mgr,environ,root,start_response):
path = request_uri(environ,0).split(root)[-1]
res = mgr.getResource(path)
content = res.getContent()
requestedXML = XML_MT in req.accept
requestedRDF = req.accept.best_match(list(layercake_mimetypes))
if 'HTTP_ACCEPT' not in environ or (requestedXML and not requestedRDF):
start_response("200 Ok",
[("Content-Type", XML_MT),
("Content-Length", len(content))]
)
return content
elif requestedRDF and requestedXML:
#requested both RDF media types and POX
doc=parse(content)
docRoot = doc.xml_select('/*')[0]
transformPath = os.path.join(root,mgr.findTransform(path)[1:])
docRoot.xmlns_attributes[u'grddl'] = GRDDL_NS
docRoot.xml_attributes[(GRDDL_NS,u'transformation')] = transformPath
stream = StringIO()
doc.xml_write(stream=stream)
rt = stream.getvalue()
start_response("200 Ok",
[("Content-Type", XML_MT),
("Content-Length", len(rt))]
)
return rt
elif requestedRDF:
store = ConnectToDataset(mgr.datasetName)
preferredMT = req.accept.best_match(layercake_mimetypes)
graphUri = URIRef(
mgr.graphUriFn(
path,
GetFNameFromPath(path))
)
rt = Graph(store,identifier=graphUri).serialize(layercake_mimetypes[preferredMT])
start_response("200 Ok",
[("Content-Type", preferredMT),
("Content-Length", len(rt))]
)
return rt
else:
raise
def HandlePATCH(req,mgr,environ,root,start_response):
path = request_uri(environ,0).split(root)[-1]
res = mgr.getResource(path)
content = res.getContent()
if req.content_type == XSLT_MT:
newContent = transform(content,req.body)
res.update(newContent,parameters=req.params)
start_response("200 Ok",[])
return ''
else:
payloadDoc = parse(req.body)
if payloadDoc.xml_select('/xu:modifications',prefixes={u'xu' : XUPDATE_NS}):
from amara.xupdate import reader, XUpdateError, apply_xupdate
from amara.lib import inputsource
source = inputsource( content, 'source')
xupdate = inputsource(req.body, 'xupdate-source')
newContent = apply_xupdate(source, xupdate)
rt=StringIO()
newContent.xml_write(stream=rt)
res.update(rt.getvalue(),parameters=req.params)
start_response("200 Ok",[])
return ''
else:
rt = 'PATCH body must be XSLT (application/xslt+xml) or XUpdate'
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
def HandlePOST(req,mgr,diglotPath,base=None):
targetContent = mgr.getResource(diglotPath).getContent()
if req.content_type == XML_MT:
#@TODO: handle parameters
return transform(req.body,targetContent), 'application/xml+rdf'
else:
doc=parse(targetContent)
payloadGraph = Graph().parse(
StringIO(req.body),
format = layercake_mimetypes[req.content_type],
publicID = URIRef(base) if base else None
)
for revXFormSrc in doc.xml_select(
'/xsl:stylesheet/ggs:reverse_transform',prefixes={
u'ggs': u'http://code.google.com/p/akamu/wiki/DiglotFileSystemProtocol#Bidirectional_transformations',
u'xsl': u'http://www.w3.org/1999/XSL/Transform'}):
fnCode = revXFormSrc.xml_select('string(text())').strip()
module = imp.new_module('inverseMap')
exec fnCode in module.__dict__
return module.ReverseTransform(payloadGraph), XML_MT
def HandleDirectoryPOST(req,start_response,mgr,newPath,newDiglotPath):
if req.content_type == XML_MT:
mgr.createResource(newPath,req.body,parameters=req.params)
start_response("201 Created",
[("Location", newDiglotPath),
("Content-Length", 0)])
return ''
else:
requestedRDF = req.accept.best_match(list(layercake_mimetypes))
if not requestedRDF:
rt = "Didn't provide an RDF Content-type header"
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
else:
base = req.GET.get('base')
payloadGraph = Graph().parse(
StringIO(req.body),
format = layercake_mimetypes[req.content_type],
publicID = URIRef(base) if base else None
)
xform = mgr.findTransform(newPath)
targetContent = mgr.getResource(xform).getContent()
doc=parse(targetContent)
revXForm = doc.xml_select(
'/xsl:stylesheet/ggs:reverse_transform',prefixes={
u'ggs': u'http://code.google.com/p/akamu/wiki/DiglotFileSystemProtocol#Bidirectional_transformations',
u'xsl': u'http://www.w3.org/1999/XSL/Transform'})
if revXForm:
revXFormSrc = revXForm[0]
fnCode = revXFormSrc.xml_select('string(text())').strip()
module = imp.new_module('inverseMap')
exec fnCode in module.__dict__
xmlDoc = module.ReverseTransform(payloadGraph)
mgr.createResource(newPath,xmlDoc,parameters=req.params)
start_response("201 Created",
[("Location", newDiglotPath),
("Content-Length", 0)])
return ''
else:
rt = "Target XSLT doesn't have a reverse transform"
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
def random_filename(chars=string.hexdigits, length=16, prefix='',
suffix='', verify=True, attempts=10):
"""
From - http://ltslashgt.com/2007/07/23/random-filenames/
"""
for attempt in range(attempts):
filename = ''.join([random.choice(chars) for i in range(length)])
filename = prefix + filename + suffix
if not verify or not os.path.exists(filename):
return filename
class xforms_grddl(object):
"""
Decorator for service for an XForms documents
that manage Diglot resources (served from a
mounted Diglot Filesystem Protocol instance)
Can be bound for use with a particular XForm document or
the document used can be provided as a parameter to the
service invokation.
"""
def __init__(self,
instanceId='diglot-resource',
submissionId='diglot-submission',
hostDocumentBase=None,
hostDocument=None,
diglotRoot=None,
hostDocumentParam='document',
instanceAttribute='src',
instanceParam='src'):
self.submissionId = submissionId
self.diglotRoot = diglotRoot
self.hostDocumentBase = hostDocumentBase
self.instanceParam = instanceParam
self.instanceId = instanceId
self.instanceAttribute = instanceAttribute
self.hostDocument = hostDocument
self.hostDocumentParam = hostDocumentParam
def __call__(self, func):
def innerHandler(*args, **kwds):
req = Request(request.environ)
xformDocument = req.params.get(self.hostDocumentParam,self.hostDocument)
xformDocument = os.path.join(
self.hostDocumentBase,
xformDocument) if self.hostDocumentBase else xformDocument
instancePath = req.params[self.instanceParam]
instancePath = os.path.join(
self.diglotRoot,
instancePath) if self.diglotRoot else instancePath
updateSrc =\
"""<?xml version="1.0"?>
<xupdate:modifications
version="1.0"
xmlns:xhtml="http://www.w3.org/1999/xhtml"
xmlns:xf="http://www.w3.org/2002/xforms"
xmlns:xupdate="http://www.xmldb.org/xupdate">
<xupdate:update select="/xhtml:html/xhtml:head/xf:model/xf:instance[@id = '%s']/@%s">%s</xupdate:update>
<xupdate:update select="/xhtml:html/xhtml:head/xf:model/xf:submission[@id = '%s']/@resource">%s</xupdate:update>
</xupdate:modifications>
"""%(
self.instanceId,
self.instanceAttribute,
instancePath,
self.submissionId,
instancePath
)
source = inputsource(xformDocument, 'source')
xupdate = inputsource(updateSrc, 'xupdate-source')
doc = apply_xupdate(source, xupdate)
aStream = StringIO()
doc.xml_write(stream=aStream)
return aStream.getvalue()
return innerHandler
class grddl_graphstore_resource(object):
"""
Extension of SPARQL Graph Store Protocol for use with Diglot filesystem
Payload are GRDDL source documents,
GET requests without Accept headers or with an 'application/xml' Accept header
returns the XML document, those with RDF media types and application/xml return
GRDDL source documents with references to transformation for the directory, and
Accept headers with RDF media types return the RDF faithful rendition of the
Diglot resource.
XML posted to transforms in the diglot system invoke the transformation of
the document, returning the GRDDL result graph. RDF posted to transforms
with reverse mappings return the corresponding XML document
XUpdate sent via HTTP PATCH request to diglot resources will be applied to them.
Closed XSLT transforms sent via HTTP PATCH to diglot resources will also be applied,
replacing them with the result
HEAD requests, are the same as GET but without any returned content
Mounts an implementation of the protocol at the specified root and using the
given graphUriFn funciton for use with the manager
"""
def __init__(self, root, graphUriFn, caching = False, cacheability = None):
self.root = root
self.graphUriFn = graphUriFn
self.cachingSetup = WheezyCachingAdapterSetup(
queries=['base'],
environ=['HTTP_ACCEPT'],
asFn = True
) if caching else None
if cacheability:
self.cachingSetup.name = 'wheezyApp'
self.cacheability = cacheability if cacheability else 'public'
def introspect_resource(self,req,mgr):
diglotPath = req.path.split(self.root)[-1]
isXslt = diglotPath in mgr.transforms4Dir.values()
return diglotPath, isXslt
def handleResourceCacheability(self,mgr,diglotPath,cacheability,environ):
policy = environ['wheezy.http.HTTPCachePolicy'](cacheability)
updatePolicy4File(
mgr.getFullPath(diglotPath),
policy
)
environ['wheezy.http.cache_policy'] = policy
environ['wheezy.http.cache_dependency'] = diglotPath
def invalidateResourceCache(self,cacheability,environ,diglotPath):
policy = environ['wheezy.http.HTTPCachePolicy'](cacheability)
environ['akamu.wheezy.invalidate'](diglotPath)
environ['wheezy.http.cache_policy'] = policy
def __call__(self, func):
def innerHandler(environ, start_response):
req = Request(environ)
mgr = GetDiglotManager(self.graphUriFn)
diglotPath, isXslt = self.introspect_resource(req,mgr)
try:
if req.method in ['HEAD','GET']:
rt = HandleGET(req,mgr,environ,self.root,start_response)
if req.method == 'GET':
if self.cachingSetup:
self.handleResourceCacheability(
mgr,
diglotPath,
self.cacheability,
environ
)
return rt
else:
return ''
elif req.method == 'PUT':
_path = mgr.getFullPath(diglotPath)
if not os.path.exists(_path):
mgr.createResource(diglotPath,req.body,parameters=req.params)
if self.cachingSetup: environ['wheezy.http.noCache'] = True
start_response("201 Created",[])
else:
mgr.getResource(diglotPath).update(req.body,parameters=req.params)
environ['wheezy.http.noCache'] = True
start_response("204 No Content",[])
if self.cachingSetup:
self.invalidateResourceCache(
self.cacheability,
environ,
diglotPath
)
return ''
elif req.method == 'DELETE':
mgr.getResource(diglotPath).delete()
msg = '%s has been deleted'%diglotPath
start_response("204 No Content",
[("Content-Length", len(msg))]
)
if self.cachingSetup:
self.invalidateResourceCache(
self.cacheability,
environ,
diglotPath
)
environ['wheezy.http.noCache'] = True
return msg
elif req.method == 'PATCH':
rt = HandlePATCH(req,mgr,environ,self.root,start_response)
if self.cachingSetup:
self.invalidateResourceCache(
self.cacheability,
environ,
diglotPath
)
environ['wheezy.http.noCache'] = True
return rt
elif req.method == 'POST' and isXslt:
rt,cont_type = HandlePOST(req,mgr,diglotPath,base=req.GET.get('base'))
start_response("200 Ok",
[("Content-Type" , cont_type),
("Content-Length", len(rt))]
)
if self.cachingSetup: environ['wheezy.http.noCache'] = True
return rt
elif req.method == 'POST' and os.path.isdir(mgr.getFullPath(diglotPath)):
randFileN = random_filename()+'.xml'
newPath = os.path.join(diglotPath,randFileN)
newDiglotPath = os.path.join(self.root,diglotPath[1:],randFileN)
if self.cachingSetup: environ['wheezy.http.noCache'] = True
return HandleDirectoryPOST(
req,
start_response,
mgr,
newPath,
newDiglotPath
)
else:
start_response("405 Method Not Allowed",[])
return "Method not allowed for this resource"
except IOError, e:
msg = str(e)#e.message
start_response("404 Method Not Allowed",[("Content-Length", len(msg))])
return msg
if self.cachingSetup:
return self.cachingSetup(innerHandler)
else:
return innerHandler | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/protocol/grddlstore.py | grddlstore.py |
__author__ = 'chimezieogbuji'
import os, string, random, pyparsing, time, re
from cStringIO import StringIO
from webob import Request
try:
from Triclops.Server import WsgiApplication, SD_FORMATS, MIME_SERIALIZATIONS
except ImportError: pass
from akamu.config.dataset import ConnectToDataset, GetGraphStoreForProtocol, ConfigureTriclops, GetExternalGraphStoreURL
from akamu.diglot import layercake_mimetypes, XML_MT, layercake_parse_mimetypes
from amara.lib import iri
from amara.xslt import transform
from akara import request
from wsgiref.util import shift_path_info, request_uri
from rdflib.Graph import Graph,ConjunctiveGraph
from rdflib.sparql import parser as sparql_parser
from rdflib import OWL, RDF, RDFS, URIRef, BNode, Namespace, Literal
from akamu.util import enum
from amara.lib.iri import uri_to_os_path
RESULT_FORMAT = enum(CSV='CSV',TSV='TSV',JSON='JSON',XML='XML')
DefaultGraph_NS = Namespace('tag:metacognition.info,2012:DefaultGraphs#')
def random_filename(chars=string.hexdigits, length=16, prefix='',
suffix='', verify=True, attempts=10):
"""
From - http://ltslashgt.com/2007/07/23/random-filenames/
"""
for attempt in range(attempts):
filename = ''.join([random.choice(chars) for i in range(length)])
filename = prefix + filename + suffix
if not verify or not os.path.exists(filename):
return filename
class NoEmptyGraphSupport(Exception):
def __init__(self):
super(NoEmptyGraphSupport, self).__init__(
"Implementation does not support empty graphs"
)
def RequestedGraphContent(req,store,datasetName):
graphParamValue = req.params.get('graph')
if 'default' in req.params:
graph = Graph(identifier=DefaultGraph_NS[datasetName],store=store)
else:
graphIri = graphParamValue if graphParamValue else req.url
graph = Graph(identifier=URIRef(graphIri),store=store)
return graph
def HandleGET(req,environ,start_response,store,datasetName):
graph = RequestedGraphContent(req,store,datasetName)
if not graph and 'default' not in req.params:
raise NoEmptyGraphSupport()
requestedRDF = req.accept.best_match(list(layercake_mimetypes))
if 'HTTP_ACCEPT' not in environ or not requestedRDF:
requestedMT = 'application/rdf+xml'
format = 'pretty-xml'
elif requestedRDF:
preferredMT = req.accept.best_match(layercake_mimetypes)
requestedMT = preferredMT
format = layercake_mimetypes[preferredMT]
content = graph.serialize(format=format)
start_response("200 Ok",
[("Content-Type" , requestedMT),
("Content-Length", len(content))]
)
return content
def HandlePUT(req,start_response,store,datasetName):
graph = RequestedGraphContent(req,store,datasetName)
nonexistent = not graph and 'default' not in req.params
if not req.content_type:
rt = "Didn't provide an RDF Content-type header"
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
else:
format = layercake_parse_mimetypes.get(
req.content_type,
layercake_mimetypes[req.content_type])
try:
payloadGraph = Graph().parse(StringIO(req.body),format = format)
except Exception, e:
rt = e.message
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
print req.url, graph.identifier, req.params,bool(payloadGraph)
if payloadGraph:
graph.remove((None,None,None))
for s,p,o in payloadGraph:
graph.add((s,p,o))
store.commit()
if nonexistent:
start_response("201 Created",[])
else:
start_response("204 No Content",[])
return ""
else:
#Empty graph
# raise NoEmptyGraphSupport()
start_response("200 Ok",[])
return "NOOP: server doesn't support empty graphs"
def HandleDELETE(req,start_response,store,datasetName):
graph = RequestedGraphContent(req,store,datasetName)
if not graph and 'default' not in req.params:
raise NoEmptyGraphSupport()
else:
graph.remove((None,None,None))
store.commit()
start_response("200 Ok",[])
return ""
def handleTrailingSlash(url,strip=True):
if strip:
return url if url[-1]!='/' else url[:-1]
else:
return url+'/' if url[-1]!='/' else url
def HandlePOST(req,start_response,store,graphStore,externalGS,datasetName):
graph = RequestedGraphContent(req,store,datasetName)
if not req.content_type:
rt = "Didn't provide an RDF Content-type header"
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
if handleTrailingSlash(req.url) == handleTrailingSlash(graphStore):
#If the request IRI identifies the underlying Graph Store, the origin
#server MUST create a new RDF graph comprised of the statements in
#the RDF payload and return a designated graph IRI associated with
#the new graph. The new graph IRI should be specified in the Location
#HTTP header along with a 201 Created code and be different from the
#request IRI.
new_filename = random_filename(suffix=req.params.get('suffix',''))
new_location = iri.absolutize(
new_filename,
handleTrailingSlash(graphStore,strip=False)
)
new_location = URIRef(new_location)
external_new_location = iri.absolutize(
new_filename,
handleTrailingSlash(
externalGS if externalGS else graphStore,
strip=False)
)
try:
Graph(identifier=new_location,store=store).parse(
StringIO(req.body),
format = layercake_parse_mimetypes[req.content_type]
)
except Exception, e:
rt = e.message
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
store.commit()
start_response("201 Created",
[("Location", external_new_location),
("Content-Length", 0)])
return ''
else:
toAdd = []
canMerge = True
if req.content_type == 'multipart/form-data':
import cgi
form = cgi.FieldStorage(
fp=StringIO(req.body),
environ=request.environ
)
try:
for multipartEntry in form:
for triple in Graph().parse(
StringIO(form.getvalue(multipartEntry)),
format = layercake_parse_mimetypes[form[multipartEntry].type]
):
s,p,o = triple
if triple not in graph:
toAdd.append((s,p,o,graph))
elif [term for term in triple if isinstance(term,BNode)]:
#Blank node in common is shared, no support for this currently
#See: http://www.w3.org/TR/rdf-mt/#defmerge
canMerge = False
break
except Exception, e:
rt = str(e)
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
else:
try:
for triple in Graph().parse(
StringIO(req.body),
format = layercake_parse_mimetypes[req.content_type]
):
s,p,o = triple
if triple not in graph:
toAdd.append((s,p,o,graph))
elif [term for term in triple if isinstance(term,BNode)]:
#Blank node in common is shared, no support for this currently
#See: http://www.w3.org/TR/rdf-mt/#defmerge
canMerge = False
break
except Exception, e:
rt = str(e)
start_response("400 Bad Request",
[("Content-Length", len(rt))])
return rt
if not canMerge:
rt = "Merge involving shared blank nodes not supported"
start_response("409 Conflict",
[("Content-Length", len(rt))])
return rt
else:
graph.addN(toAdd)
store.commit()
start_response("200 Ok",[])
return ''
class graph_store_protocol(object):
"""
"""
def __init__(self):
self.datasetName,self.gs_url = GetGraphStoreForProtocol()
self.store = ConnectToDataset(self.datasetName)
self.external_gs_url = GetExternalGraphStoreURL()
def __call__(self, func):
def innerHandler(environ, start_response):
req = Request(environ)
try:
if req.method in ['HEAD','GET']:
rt = HandleGET(req,environ,start_response,self.store,self.datasetName)
if req.method == 'GET':
return rt
else:
return ''
elif req.method == 'PUT':
return HandlePUT(req,start_response,self.store,self.datasetName)
elif req.method == 'DELETE':
return HandleDELETE(req,start_response,self.store,self.datasetName)
elif req.method == 'PATCH':
rt="PATCH not supported"
start_response("405 Method Not Allowed",
[("Content-Length", len(rt))]
)
return rt
elif req.method == 'POST':
return HandlePOST(
req,
start_response,
self.store,
self.gs_url,
self.external_gs_url,
self.datasetName)
else:
start_response("405 Method Not Allowed",[])
return "Method not allowed for this resource"
except NoEmptyGraphSupport, e:
rt = "Implementation does not support empty graphs"
start_response("404 Method Not Allowed",
[("Content-Length", len(rt))])
return rt
except NotImplementedError, e:
raise e
return innerHandler
def GetResultFormats(results,xslt_dir,result_format=RESULT_FORMAT.XML):
query_results = results.serialize(format='xml')
if result_format == RESULT_FORMAT.JSON:
serialization = transform(
query_results,
os.path.join(xslt_dir,'sparqlxml2json.xsl'),
params={ u'ignore-bnode' : True }
)
elif result_format == RESULT_FORMAT.TSV:
serialization = transform(
query_results,
os.path.join(xslt_dir,'xml-to-csv-tsv.xslt'),
params={u'standard':True}
)
else:
serialization = transform(
query_results,
os.path.join(xslt_dir,'xml-to-csv-tsv.xslt'),
params={u'standard':True,u'tsv':False}
)
return serialization
class sparql_rdf_protocol(object):
"""
Prepares a Triclops WSGI application for use to wrap
the Akara via the 'wsgi_wrapper' keyword argument of
@simple_service and @service
See: http://code.google.com/p/python-dlp/wiki/Triclops
"""
def __init__(self, root, datasetName):
self.nsBindings = { u'owl' :OWL.OWLNS,
u'rdf' :RDF.RDFNS,
u'rdfs':RDFS.RDFSNS}
self.litProps = set()
self.resProps = set()
self.root = root
self.datasetName = datasetName
self.conf = ConfigureTriclops(
self.datasetName,
self.nsBindings,
self.litProps,
self.resProps)
self.conf['endpoint'] = self.root
def __call__(self, func):
def innerHandler(environ, start_response):
# @@@ Core Application @@@
app = WsgiApplication(
self.conf,
self.nsBindings,
[],
self.litProps,
self.resProps,
Graph(),
Graph(),
set(),
Graph())
req = Request(environ)
d = req.params
query = d.get('query')
ticket = d.get('ticket')
default_graph_uri = d.get('default-graph-uri')
rtFormat = d.get('resultFormat')
if 'query' in d and len(filter(lambda i:i == 'query',d))>1:
rt = "Malformed SPARQL Query: query parameter provided twice"
status = '400 Bad Request'
response_headers = [('Content-type','text/plain'),
('Content-Length',
len(rt))]
start_response(status,response_headers)
return rt
if req.method == 'POST':
if req.content_type == 'application/sparql-query':
query = req.body
elif req.content_type == 'application/x-www-form-urlencoded':
query = req.POST.get('query')
print "## Query ##\n", query, "\n###########"
print "Default graph uri ", default_graph_uri
requestedFormat = environ.get('HTTP_ACCEPT','application/rdf+xml')
if req.method == 'POST':
assert query,"POST can only take an encoded query or a query in the body"
elif req.method == 'GET' and not query:
if requestedFormat not in SD_FORMATS:
requestedFormat = 'application/rdf+xml'
if app.ignoreQueryDataset:
targetGraph = app.buildGraph(default_graph_uri)
else:
targetGraph = app.buildGraph(default_graph_uri=None)
sdGraph = Graph()
SD_NS = Namespace('http://www.w3.org/ns/sparql-service-description#')
SCOVO = Namespace('http://purl.org/NET/scovo#')
VOID = Namespace('http://rdfs.org/ns/void#')
FORMAT = Namespace('http://www.w3.org/ns/formats/')
sdGraph.bind(u'sd',SD_NS)
sdGraph.bind(u'scovo',SCOVO)
sdGraph.bind(u'void',VOID)
sdGraph.bind(u'format',FORMAT)
service = BNode()
datasetNode = BNode()
if app.endpointURL:
sdGraph.add((service,SD_NS.endpoint,URIRef(app.endpointURL)))
sdGraph.add((service,SD_NS.supportedLanguage ,SD_NS.SPARQL10Query))
sdGraph.add((service,RDF.type ,SD_NS.Service))
sdGraph.add((service,SD_NS.defaultDatasetDescription,datasetNode))
sdGraph.add((service,SD_NS.resultFormat,FORMAT['SPARQL_Results_XML']))
sdGraph.add((datasetNode,RDF.type,SD_NS.Dataset))
for graph in targetGraph.store.contexts():
graphNode = BNode()
graphNode2 = BNode()
sdGraph.add((datasetNode,SD_NS.namedGraph,graphNode))
sdGraph.add((graphNode,SD_NS.name,URIRef(graph.identifier)))
sdGraph.add((graphNode,SD_NS.graph,graphNode2))
sdGraph.add((graphNode,RDF.type,SD_NS.NamedGraph))
sdGraph.add((graphNode2,RDF.type,SD_NS.Graph))
noTriples = Literal(len(graph))
sdGraph.add((graphNode2,VOID.triples,noTriples))
doc = sdGraph.serialize(
format=MIME_SERIALIZATIONS[requestedFormat])
status = '200 OK'
response_headers = [
('Content-type' , requestedFormat),
('Content-Length', len(doc))
]
start_response(status,response_headers)
return doc
else:
assert req.method == 'GET',"Either POST or GET method!"
if app.ignoreQueryDataset:
app.targetGraph = app.buildGraph(default_graph_uri)
else:
app.targetGraph = app.buildGraph(default_graph_uri=None)
for pref,nsUri in app.nsBindings.items():
app.targetGraph.bind(pref,nsUri)
origQuery = query
describePattern=re.compile(r'DESCRIBE\s+\<(?P<iri>[^\>]+)\>',re.DOTALL)
describeQueryMatch = describePattern.match(query)
if describeQueryMatch:
iri=URIRef(describeQueryMatch.group('iri'))
g=Graph()
for p,u in app.targetGraph.namespaces():
g.bind(p,u)
for t in app.targetGraph.triples((None,None,iri)):
g.add(t)
for t in app.targetGraph.triples((iri,None,None)):
g.add(t)
rt=g.serialize(format='pretty-xml')
status = '200 OK'
response_headers = [('Content-type','application/rdf+xml'),
('Content-Length',
len(rt))]
start_response(status,response_headers)
return rt
try:
query=sparql_parser.parse(query)
except pyparsing.ParseException, e:
rt = "Malformed SPARQL Query: %s"%repr(e)
status = '400 Bad Request'
response_headers = [('Content-type','text/plain'),
('Content-Length',
len(rt))]
start_response(status,response_headers)
return rt
start = time.time()
if app.ignoreBase and hasattr(query,'prolog') and query.prolog:
query.prolog.baseDeclaration=None
if app.ignoreQueryDataset and hasattr(query.query,'dataSets') and query.query.dataSets:
print "Ignoring query-specified datasets: ", query.query.dataSets
query.query.dataSets = []
if not app.proxy and ticket:
#Add entry for current thread in ticket -> thread id lookup
global ticketLookup
ticketLookup[ticket]=app.targetGraph.store._db.thread_id()
#Run the actual query
rt = app.targetGraph.query(origQuery,
initNs=app.nsBindings,
DEBUG=app.debugQuery,
parsedQuery=query)
print "Time to execute SPARQL query: ", time.time() - start
qRT = rt.serialize(format='xml')
app.targetGraph.close()
print "Time to execute and seralize SPARQL query: ", time.time() - start
print "# of bindings: ", rt.noAnswers
if rtFormat in ['xml','csv'] or not rtFormat:
# from amara.bindery import parse
# doc = parse(src)
# pi = doc.xml_processing_instruction_factory(
# u"xml-stylesheet",
# u'href="%s" type="text/xsl"'%self.transform)
# doc.xml_insert(0,pi)
# return doc
rt = qRT
imt='application/sparql-results+xml'
# rtDoc = NonvalidatingReader.parseString(qRT,
# 'tag:nobody@nowhere:2007:meaninglessURI')
# stylesheetPath = rtFormat == 'xml' and '/xslt/xml-to-html.xslt' or '/xslt/xml-to-csv.xslt'
# imt='application/xml'
#
# pi = rtDoc.createProcessingInstruction("xml-stylesheet",
# "type='text/xml' href='%s'"%stylesheetPath)
# #Add a stylesheet instruction to direct browsers how to render the result document
# rtDoc.insertBefore(pi, rtDoc.documentElement)
# out = StringIO()
# PrettyPrint(rtDoc, stream=out)
# rt = out.getvalue()
elif rtFormat == 'csv-pure':
imt='text/plain'
rt=app.csvProcessor.run(InputSource.DefaultFactory.fromString(qRT))
status = '200 OK'
response_headers = [('Content-type',imt),
('Content-Length',len(rt))]
start_response(status, response_headers)
return rt
return innerHandler | Akamu | /Akamu-0.7.tar.gz/Akamu-0.7/lib/protocol/sparql.py | sparql.py |
import atexit
import os
from os.path import abspath, dirname
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import urllib2
import urlparse
import httplib
# XXX I only use one function from here. Bring it into this file?
import python_support
######
# Set 'False' to keep the temporary directory used for the server tests
#DELETE_TEMPORARY_SERVER_DIRECTORY = False
DELETE_TEMPORARY_SERVER_DIRECTORY = True
if "AKARA_SAVE" in os.environ:
DELETE_TEMPORARY_SERVER_DIRECTORY = False
######
ATOM_ENTRIES = os.path.join(dirname(abspath(__file__)), "resource", "atom")
assert os.path.exists(ATOM_ENTRIES), "no atom entries directory?"
# All of the tests use a single server instance.
# This is started the first time it's needed.
# It is stopped during test shutdown.
def _override_server_uri():
server = os.environ.get("AKARA_TEST_SERVER", None)
if not server:
return None
assert "/" not in server
return "http://" + server + "/"
SERVER_URI = _override_server_uri()
config_root = None
config_filename = None
server_pid = None
server_did_not_start = False
# Create a temporary directory structure for Akara.
# Needs a configuration .ini file and the logs subdirectory.
def create_server_dir(port):
global config_root, config_filename
config_root = tempfile.mkdtemp(prefix="akara_test_")
config_filename = os.path.join(config_root, "akara_test.config")
f = open(config_filename, "w")
f.write("""
class Akara:
ConfigRoot = %(config_root)r
ServerRoot = 'http://dalkescientific.com/'
InternalServerRoot = 'http://localhost:%(port)s/'
Listen = 'localhost:%(port)s'
LogLevel = 'DEBUG'
MinSpareServers = 3
# These affect test_server.py:test_restart
MaxServers = 5
MaxRequestsPerServer = 5
class atomtools:
entries = %(atom_entries)r
feed_envelope = '''<feed xmlns="http://www.w3.org/2005/Atom">
<title>Feed me!</title><id>http://example.com/myfeed</id></feed>'''
class static:
paths = {
"resource": %(resource_dir)r,
"static": %(resource_dir)r + '/static',
}
# Let the XSLT test reach directly into the filesystem
class xslt:
uri_space = 'file:///'
MODULES = ["akara.demo.akara_tests",
"akara.demo.akara_tests",
"akara.demo.atomtools",
"akara.demo.calaistools",
"akara.demo.calweb",
"akara.demo.collection",
"akara.demo.echo",
"akara.demo.icaltools",
"akara.demo.luckygoogle",
"akara.demo.markuptools",
"akara.demo.method_dispatcher",
"akara.demo.moin2atomentries",
"akara.demo.moincms",
"akara.demo.moinrest",
"akara.demo.oaitools",
"akara.demo.rdfatools",
"akara.demo.static",
"akara.demo.statstools",
"akara.demo.svntools",
"akara.demo.unicodetools",
"akara.demo.wwwlogviewer",
"akara.demo.xslt",
]
""" % dict(config_root = config_root,
port = port,
atom_entries = os.path.join(ATOM_ENTRIES, "*.atom"),
resource_dir = os.path.join(dirname(abspath(__file__)), "resource"),
))
f.close()
os.mkdir(os.path.join(config_root, "logs"))
#FIXME: add back config for:
#[collection]
#folder=/tmp/collection
# Remove the temporary server configuration directory,
# if I created it
def remove_server_dir():
global server_pid, config_root
if server_pid is not None:
# I created the server, I kill it
os.kill(server_pid, signal.SIGTERM)
server_pid = None
if config_root is not None:
# Very useful when doing development and testing.
# Would like this as a command-line option somehow.
if DELETE_TEMPORARY_SERVER_DIRECTORY:
shutil.rmtree(config_root)
else:
print "Test server configuration and log files are in", config_root
config_root = None
atexit.register(remove_server_dir)
# Start a new Akara server in server mode.
def start_server():
global server_pid, server_did_not_start
# There's a PID if the spawning worked
assert server_pid is None
# Can only tell if the server starts by doing a request
# If the request failed, don't try to restart.
if server_did_not_start:
raise AssertionError("Already tried once to start the server")
port = python_support.find_unused_port()
create_server_dir(port)
args = ['akara', "--config-file", config_filename, "start"]
try:
result = subprocess.call(args)
except:
print "Failed to start", args
raise
# Akara started, but it might have failed during startup.
# Report errors by reading the error log
if result != 0:
f = open(os.path.join(config_root, "logs", "error.log"))
err_text = f.read()
raise AssertionError("Could not start %r:\n%s" % (args, err_text))
# Akara server started in the background. The main
# process will only exit with a success (0) if the
# pid file has been created.
f = open(os.path.join(config_root, "logs", "akara.pid"))
line = f.readline()
f.close()
# Save the pid information now so the server will be shut down
# if there are any problems.
temp_server_pid = int(line)
# Check to see that the server process really exists.
# (Is this overkill? Is this portable for Windows?)
os.kill(temp_server_pid, 0) # Did Akara really start?
server_did_not_start = True
check_that_server_is_available(port)
server_did_not_start = False
# Looks like it's legit!
server_pid = temp_server_pid
return port
# It takes the server a little while to get started.
# In the worst case (trac #6), top-level import failures
# will loop forever, and the server won't hear requests.
def check_that_server_is_available(port):
old_timeout = socket.getdefaulttimeout()
try:
# How long do you want to wait?
socket.setdefaulttimeout(20.0)
try:
urllib2.urlopen("http://localhost:%d/" % port).read()
except urllib2.URLError, err:
print "Current error log is:"
f = open(os.path.join(config_root, "logs", "error.log"))
err_text = f.read()
print err_text
raise
finally:
socket.setdefaulttimeout(old_timeout)
# Get the server URI prefix, like "http://localhost:8880/"
def server():
global SERVER_URI
if SERVER_URI is None:
# No server specified and need to start my own
port = start_server()
SERVER_URI = "http://localhost:%d/" % port
return SERVER_URI
def httplib_server():
url = server()
# <scheme>://<netloc>/<path>?<query>#<fragment>
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
assert path in ("", "/"), "Cannot handle path in %r" % url
assert query == "", "Cannot handle query in %r" % url
assert fragment == "", "Cannot handle fragment in %r" % url
assert "@" not in netloc, "Cannot handle '@' in %r" % url
if ":" in netloc:
host, port = netloc.split(":")
port = int(port)
else:
host = netloc
port = 80
conn = httplib.HTTPConnection(host, port, strict=True)
return conn | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/test/server_support.py | server_support.py |
# The Python license is available from http://www.python.org/psf/license/
# Note that the source file (test/support.py) contained no copyright statement.
# XXX add copyright here
import socket
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/test/python_support.py | python_support.py |
#Goes in plugin/formatter under moin data dir
from MoinMoin.formatter import FormatterBase
from MoinMoin import config
from MoinMoin.Page import Page
from amara import tree
from amara.writers.struct import structwriter, E, NS, ROOT, RAW
from amara.bindery.html import markup_fragment
from amara.lib import inputsource
from amara.lib import U
#This check ensures it's OK to just use Amara's U function directly
if config.charset != 'utf-8':
#Note: we should never get here unless the user has messed up their config
raise RuntimeError('Only UTF-8 encoding supported by Moin, and thus by us')
class Formatter(FormatterBase):
"""
Send XML data.
"""
def __init__(self, request, **kw):
FormatterBase.__init__(self, request, **kw)
self._current_depth = 1
self._base_depth = 0
self.in_pre = 0
self._doc = tree.entity()
self._curr = self._doc
#self._writer = structwriter(indent=u"yes", encoding=config.charset)
return
def macro(self, macro_obj, name, args, markup=None):
#Macro response are (unescaped) markup. Do what little clean-up we camn, and cross fingers
output = FormatterBase.macro(self, macro_obj, name, args, markup=markup)
#response is Unicode
if output:
output_body = markup_fragment(inputsource.text(output.encode(config.charset)))
#print "macro 2", repr(output)
self._curr.xml_append(output_body)
return ''
def startDocument(self, pagename):
self._curr = tree.element(None, u's1')
self._curr.xml_attributes[None, u'title'] = U(pagename)
self._doc.xml_append(self._curr)
return ''
def endDocument(self):
#Yuck! But Moin seems to insist on Unicode object result (see MoinMoin.parser.text_moin_wiki.Parser.scan)
#print "endDocument", repr(self._doc.xml_encode(encoding=config.charset).decode(config.charset))
return U(self._doc.xml_encode(encoding=config.charset))
def _elem(self, name, on, **kw):
if on:
e = tree.element(None, name)
self._curr.xml_append(e)
self._curr = e
elif name == self._curr.xml_local:
self._curr = self._curr.xml_parent
return
def lang(self, on, lang_name):
self._elem(u'div', on)
if on:
self._curr.xml_attributes[None, u'lang'] = U(lang_name)
return ''
def sysmsg(self, on, **kw):
self._elem(u'sysmsg', on)
return ''
def rawHTML(self, markup):
#output = htmlparse(markup).html.body.xml_encode() if markup else ''
if markup:
body = markup_fragment(inputsource.text(markup))
for child in body.xml_children:
self._curr.xml_append(child)
#self._curr.xml_append(tree.text(output.decode(config.charset)))
#print "rawHTML", htmlparse(markup).xml_encode()
return ''
def pagelink(self, on, pagename='', page=None, **kw):
FormatterBase.pagelink(self, on, pagename, page, **kw)
if page is None:
page = Page(self.request, pagename, formatter=self)
link_text = page.link_to(self.request, on=on, **kw)
self._curr.xml_append(tree.text(U(link_text)))
return ''
def interwikilink(self, on, interwiki='', pagename='', **kw):
self._elem(u'interwiki', on)
if on:
self._curr.xml_attributes[None, u'wiki'] = U(interwiki)
self._curr.xml_attributes[None, u'pagename'] = U(pagename)
return ''
def url(self, on, url='', css=None, **kw):
self._elem(u'jump', on)
self._curr.xml_attributes[None, u'url'] = U(url)
if css:
self._curr.xml_attributes[None, u'class'] = U(css)
return ''
def attachment_link(self, on, url=None, **kw):
self._elem(u'attachment', on)
if on:
self._curr.xml_attributes[None, u'href'] = U(url)
return ''
def attachment_image(self, url, **kw):
self._elem(u'attachmentimage', on)
if on:
self._curr.xml_attributes[None, u'href'] = U(url)
return ''
def attachment_drawing(self, url, text, **kw):
self._elem(u'attachmentimage', on)
self._curr.xml_attributes[None, u'href'] = U(url)
self._curr.xml_append(tree.text(U(text)))
self._elem(u'attachmentimage', off)
return ''
def attachment_inlined(self, url, text, **kw):
from MoinMoin.action import AttachFile
import os
_ = self.request.getText
pagename, filename = AttachFile.absoluteName(url, self.page.page_name)
fname = wikiutil.taintfilename(filename)
fpath = AttachFile.getFilename(self.request, pagename, fname)
ext = os.path.splitext(filename)[1]
Parser = wikiutil.getParserForExtension(self.request.cfg, ext)
if Parser is not None:
try:
content = file(fpath, 'r').read()
# Try to decode text. It might return junk, but we don't
# have enough information with attachments.
content = wikiutil.decodeUnknownInput(content)
colorizer = Parser(content, self.request, filename=filename)
colorizer.format(self)
except IOError:
pass
self.attachment_link(1, url)
self.text(text)
self.attachment_link(0)
return ''
def smiley(self, text):
self._curr.xml_append(tree.text(U(text)))
return ''
def text(self, text, **kw):
self._curr.xml_append(tree.text(text))
return ''
def rule(self, size=0, **kw):
e = tree.element(None, u'br') # <hr/> not supported in stylebook
e.xml_append(tree.text((u"-" * 78)))
self._curr.xml_append(e)
return ''
def icon(self, type_):
self._elem(u'icon', on)
self._curr.xml_attributes[None, u'type'] = U(type_)
self._elem(u'icon', off)
return ''
def strong(self, on, **kw):
self._elem(u'strong', on)
return ''
def emphasis(self, on, **kw):
self._elem(u'em', on)
return ''
def highlight(self, on, **kw):
self._elem(u'strong', on)
return ''
def number_list(self, on, type=None, start=None, **kw):
self._elem(u'ol', on)
return ''
def bullet_list(self, on, **kw):
self._elem(u'ul', on)
return ''
def listitem(self, on, **kw):
self._elem(u'li', on)
return ''
def code(self, on, **kw):
self._elem(u'code', on)
return ''
def small(self, on, **kw):
self._elem(u'small', on)
return ''
def big(self, on, **kw):
self._elem(u'big', on)
return ''
def sup(self, on, **kw):
self._elem(u'sup', on)
return ''
def sub(self, on, **kw):
self._elem(u'sub', on)
return ''
def strike(self, on, **kw):
self._elem(u'strike', on)
return ''
def preformatted(self, on, **kw):
FormatterBase.preformatted(self, on)
self._elem(u'source', on)
return ''
def paragraph(self, on, **kw):
FormatterBase.paragraph(self, on)
self._elem(u'p', on)
return ''
def linebreak(self, preformatted=1):
e = tree.element(None, u'br')
self._curr.xml_append(e)
return ''
def heading(self, on, depth, id=None, **kw):
# remember depth of first heading, and adapt current depth accordingly
if not self._base_depth:
self._base_depth = depth
depth = max(depth + (2 - self._base_depth), 2)
name = u's%i'%depth
if on:
found = None
parent_depth = depth-1
while not found:
found = self._curr.xml_select(u'ancestor-or-self::' + u's%i'%(parent_depth))
parent_depth -= 1
if found:
break
#print name, found
self._curr = found[0]
e = tree.element(None, name)
id = U(id) if id else u''
e.xml_attributes[None, u'title'] = id
e.xml_attributes[None, u'id'] = id
self._curr.xml_append(e)
self._curr = e
e = tree.element(None, u'title')
self._curr.xml_append(e)
self._curr = e
else:
parent = self._curr.xml_parent
if self._curr.xml_local == u'title':
parent.xml_remove(self._curr)
self._curr = parent
return ''
def table(self, on, attrs={}, **kw):
self._elem(u'table', on)
return ''
def table_row(self, on, attrs={}, **kw):
self._elem(u'tr', on)
return ''
def table_cell(self, on, attrs={}, **kw):
self._elem(u'td', on)
return ''
def anchordef(self, id):
e = tree.element(None, u'anchor')
self._curr.xml_append(e)
self._curr.xml_attributes[None, u'id'] = U(id)
return ''
def anchorlink(self, on, name='', **kw):
self._elem(u'link', on)
if on:
id = kw.get('id', None)
if id:
self._curr.xml_attributes[None, u'id'] = U(id)
self._curr.xml_attributes[None, u'anchor'] = U(name)
return ''
def underline(self, on, **kw):
return self.strong(on) # no underline in StyleBook
def definition_list(self, on, **kw):
self._elem(u'gloss', on)
return ''
def definition_term(self, on, compact=0, **kw):
self._elem(u'label', on)
return ''
def definition_desc(self, on, **kw):
self._elem(u'item', on)
return ''
def image(self, src=None, **kw):
e = tree.element(None, u'img')
self._curr.xml_append(e)
valid_attrs = ('src', 'width', 'height', 'alt', 'title')
kw.update({'src': src})
for key, value in kw.items():
if key in valid_attrs:
self._curr.xml_attributes[None, U(key)] = U(value)
return ''
def transclusion(self, on, **kw):
# TODO, see text_html formatter
return ''
def transclusion_param(self, **kw):
# TODO, see text_html formatter
return ''
def code_area(self, on, code_id, code_type='code', show=0, start=-1, step=-1, msg=None):
self._elem(u'codearea', on)
if on:
self._curr.xml_attributes[None, u'id'] = U(code_id)
return ''
def code_line(self, on):
self._elem(u'codeline', on)
return ''
def code_token(self, on, tok_type):
self._elem(u'codetoken', on)
if on:
self._curr.xml_attributes[None, u'type'] = U(tok_type)
return '' | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/demo/etc/MoinMoin/formatter/application_xml.py | application_xml.py |
import re, time
from MoinMoin import config, wikiutil
from MoinMoin.logfile import editlog
from MoinMoin.util import timefuncs
from MoinMoin.Page import Page
from MoinMoin.wikixml.util import RssGenerator
from amara.writers.struct import structencoder, E, E_CURSOR, NS, ROOT, RAW
from amara.namespaces import ATOM_NAMESPACE
RSSWIKI_NAMESPACE = u"http://purl.org/rss/1.0/modules/wiki/"
def full_url(request, page, querystr=None, anchor=None):
url = page.url(request, anchor=anchor, querystr=querystr)
url = wikiutil.escape(url)
return request.getQualifiedURL(url)
def execute(pagename, request):
""" Send recent changes as an RSS document
"""
cfg = request.cfg
# get params
items_limit = 100
try:
max_items = int(request.values['items'])
max_items = min(max_items, items_limit) # not more than `items_limit`
except (KeyError, ValueError):
# not more than 15 items in a RSS file by default
max_items = 15
try:
unique = int(request.values.get('unique', 0))
except ValueError:
unique = 0
try:
diffs = int(request.values.get('diffs', 0))
except ValueError:
diffs = 0
## ddiffs inserted by Ralf Zosel <[email protected]>, 04.12.2003
try:
ddiffs = int(request.values.get('ddiffs', 0))
except ValueError:
ddiffs = 0
urlfilter = request.values.get('filter')
if urlfilter:
urlfilter = re.compile(urlfilter)
else:
urlfilter = None
# get data
log = editlog.EditLog(request)
logdata = []
counter = 0
pages = {}
lastmod = 0
for line in log.reverse():
if urlfilter and not(urlfilter.match(line.pagename)):
continue
if not request.user.may.read(line.pagename):
continue
if (not line.action.startswith('SAVE') or
((line.pagename in pages) and unique)): continue
#if log.dayChanged() and log.daycount > _MAX_DAYS: break
line.editor = line.getInterwikiEditorData(request)
line.time = timefuncs.tmtuple(wikiutil.version2timestamp(line.ed_time_usecs)) # UTC
logdata.append(line)
pages[line.pagename] = None
if not lastmod:
lastmod = wikiutil.version2timestamp(line.ed_time_usecs)
counter += 1
if counter >= max_items:
break
del log
timestamp = timefuncs.formathttpdate(lastmod)
etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)
# for 304, we look at if-modified-since and if-none-match headers,
# one of them must match and the other is either not there or must match.
if request.if_modified_since == timestamp:
if request.if_none_match:
if request.if_none_match == etag:
request.status_code = 304
else:
request.status_code = 304
elif request.if_none_match == etag:
if request.if_modified_since:
if request.if_modified_since == timestamp:
request.status_code = 304
else:
request.status_code = 304
else:
# generate an Expires header, using whatever setting the admin
# defined for suggested cache lifetime of the RecentChanges RSS doc
expires = time.time() + cfg.rss_cache
request.mimetype = 'application/rss+xml'
request.expires = expires
request.last_modified = lastmod
request.headers['Etag'] = etag
# send the generated XML document
baseurl = request.url_root
logo = re.search(r'src="([^"]*)"', cfg.logo_string)
if logo:
logo = request.getQualifiedURL(logo.group(1))
# prepare output
output = structencoder(indent=u"yes")
FEED_HEADER_COMMENT = '''
<!--
Add an "items=nnn" URL parameter to get more than the default 15 items.
You cannot get more than %d items though.
Add "unique=1" to get a list of changes where page names are unique,
i.e. where only the latest change of each page is reflected.
Add "diffs=1" to add change diffs to the description of each items.
Add "ddiffs=1" to link directly to the diff (good for FeedReader).
Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i
-->
''' % (items_limit, max_items, unique, diffs, ddiffs)
# Feed envelope
page = Page(request, pagename)
f = output.cofeed(
ROOT(
NS(u'', ATOM_NAMESPACE),
NS(u'wiki', RSSWIKI_NAMESPACE),
E_CURSOR((ATOM_NAMESPACE, u'feed'),
)
)
)
f.send(E((ATOM_NAMESPACE, u'id'), full_url(request, page).encode(config.charset))),
f.send(E((ATOM_NAMESPACE, u'title'), cfg.sitename.encode(config.charset))),
f.send(E((ATOM_NAMESPACE, u'link'), {u'href': request.url_root.encode(config.charset)})),
f.send(E((ATOM_NAMESPACE, u'summary'), ('RecentChanges at %s' % cfg.sitename).encode(config.charset))),
#Icon
#E((ATOM_NAMESPACE, u'link'), {u'href': logo.encode(config.charset)}),
#if cfg.interwikiname:
# handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)
for item in logdata:
anchor = "%04d%02d%02d%02d%02d%02d" % item.time[:6]
page = Page(request, item.pagename)
#link = full_url(request, page, anchor=anchor)
if ddiffs:
link = full_url(request, page, querystr={'action': 'diff'})
else:
link = full_url(request, page)
# description
desc_text = item.comment
if diffs:
# TODO: rewrite / extend wikiutil.pagediff
# searching for the matching pages doesn't really belong here
revisions = page.getRevList()
rl = len(revisions)
for idx in range(rl):
rev = revisions[idx]
if rev <= item.rev:
if idx + 1 < rl:
lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
if len(lines) > 20:
lines = lines[:20] + ['...\n']
lines = '\n'.join(lines)
lines = wikiutil.escape(lines)
desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
break
#if desc_text:
# handler.simpleNode('description', desc_text)
# contributor
edattr = {}
#if cfg.show_hosts:
# edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
if item.editor[0] == 'interwiki':
edname = "%s:%s" % item.editor[1]
##edattr[(None, 'link')] = baseurl + wikiutil.quoteWikiname(edname)
else: # 'ip'
edname = item.editor[1]
##edattr[(None, 'link')] = link + "?action=info"
history_link = full_url(request, page, querystr={'action': 'info'})
f.send(
E((ATOM_NAMESPACE, u'entry'),
E((ATOM_NAMESPACE, u'id'), link.encode(config.charset)),
E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
E((ATOM_NAMESPACE, u'updated'), timefuncs.W3CDate(item.time).encode(config.charset)),
E((ATOM_NAMESPACE, u'link'), {u'href': link.encode(config.charset)}),
E((ATOM_NAMESPACE, u'summary'), desc_text.encode(config.charset)),
E((ATOM_NAMESPACE, u'author'),
E((ATOM_NAMESPACE, u'name'), edname.encode(config.charset))
),
#E((ATOM_NAMESPACE, u'title'), item.pagename.encode(config.charset)),
# wiki extensions
E((RSSWIKI_NAMESPACE, u'wiki:version'), ("%i" % (item.ed_time_usecs)).encode(config.charset)),
E((RSSWIKI_NAMESPACE, u'wiki:status'), (u'deleted', u'updated')[page.exists()]),
E((RSSWIKI_NAMESPACE, u'wiki:diff'), link.encode(config.charset)),
E((RSSWIKI_NAMESPACE, u'wiki:history'), history_link.encode(config.charset)),
# handler.simpleNode(('wiki', 'importance'), ) # ( major | minor )
# handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA )
)
)
# emit logo data
#if logo:
# handler.startNode('image', attr={
# (handler.xmlns['rdf'], 'about'): logo,
# })
# handler.simpleNode('title', cfg.sitename)
# handler.simpleNode('link', baseurl)
# handler.simpleNode('url', logo)
# handler.endNode('image')
f.close()
request.write(output.read()) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/demo/etc/MoinMoin/action/atom_rc.py | atom_rc.py |
import urllib, urllib2
import os
import shutil
import sys
import base64
import hashlib
import cPickle as pickle
import time
from akara import registry
from akara import global_config
# File object returned to clients on cache hit. A real file, but with an info() method
# to mimic that operation on file-like objects returned by urlopen().
class CacheFile(file):
def __init__(self,*args, **kwargs):
file.__init__(self,*args,**kwargs)
self.headers = None
self.code = 200
self.url = None
self.msg = "OK"
def getcode(self):
return self.code
def geturl(self):
return self.url
def info(self):
return self.headers
# Utility function to remove the oldest entry from a cache directory.
# Look at the modification dates for metadata files
def remove_oldest(cachedir):
files = (name for name in os.listdir(cachedir) if name.endswith(".p"))
paths = (os.path.join(cachedir,name) for name in files)
time_and_paths = ((os.path.getmtime(path),path) for path in paths)
oldest_time, path = min(time_and_paths)
# Remove the oldest entry
os.remove(path)
class cache(object):
def __init__(self,ident,maxentries=65536,expires=15*60,opener=None):
"""Create a cache for another Akara service.
ident is the Akara service ID
maxentries is the maximum number of cache entries (approximate)
expires is the time in seconds after which entries expire
opener is an alternative URL opener. By default urllib2.urlopen is used.
"""
self.ident = ident
if opener is None:
opener = urllib2.urlopen
self.opener = opener
self.maxentries = maxentries
self.expires = expires
self.maxperdirectory = maxentries / 256
self.serv = None
self.initialized = False
# Internal method that locates the Akara service description and sets up a
# base-URL for making requests. This can not be done in __init__() since
# not all Akara services have been initialized at the same __init__() executes.
# Instead, it has to be done lazily after all services are up and running.
# In this implementation, the _find_service() method gets invoked upon the
# first use of the cache
def _find_service(self):
self.serv = registry.get_a_service_by_id(self.ident)
if not self.serv:
raise KeyError("Nothing known about service %s" % ident)
# print >>sys.stderr,"CACHE: %s at %s\n" % (self.ident, self.serv.path)
hostname,port = global_config.server_address
if not hostname:
hostname = "localhost"
self.baseurl = "http://%s:%d/%s" % (hostname, port, self.serv.path)
# print >>sys.stderr,"CACHE: %s\n" % self.baseurl
# Method that makes the cache directory if it doesn't yet exist
def _make_cache(self):
# Make sure the cache directory exists
if not os.path.exists(global_config.module_cache):
try:
os.mkdir(global_config.module_cache)
except OSError:
pass # Might be a race condition in creating. Ignore errors, but follow up with an assert
assert os.path.exists(global_config.module_cache),"%s directory can't be created" % (global_config.module_cache)
self.cachedir = os.path.join(global_config.module_cache,self.serv.path)
if not os.path.exists(self.cachedir):
try:
os.mkdir(self.cachedir)
except OSError:
# This exception handler is here to avoid a possible race-condition in startup.
# Multiple server instances might enter here at the same time and try to create directory
pass
assert os.path.exists(self.cachedir), "Failed to make module cache directory %s" % self.cachedir
# Method that initializes the cache if needed
def _init_cache(self):
self._find_service()
self._make_cache()
self.initialized = True
def get(self,**kwargs):
"""Make a cached GET request to an Akara service. If a result can be
found in the cache, it is returned. Otherwise, a GET request is issued
to the service using urllib2.urlopen(). The result of this operation
mimics the file-like object returned by urlopen(). Reading from the file
will return raw data. Invoking the .info() method will return HTTP
metadata about the request."""
# Check to see if the cache has been initialized or not. This has to be done here since
# it is currently not possible to fully initialize the cache in __init__().
if not self.initialized:
self._init_cache()
# This is a sanity check. If the cache is gone, might have to rebuild it
if not os.path.exists(self.cachedir):
self._make_cache()
# Make a canonical query string from the arguments (guaranteed
# to be the same even if the keyword argumenst are specified in
# in an arbitrary order)
#
query = "&".join(name+"="+urllib.quote(str(value)) for name,value in sorted(kwargs.items()))
# Take the query string and make a SHA hash key pair out of it. The general idea here
# is to come up with an identifier that has a reasonable number of bits, but which is extremely
# unlikely to collide with other identifiers. It would be extremely unlikely that the query
# would have a collision with other entries
shadigest = hashlib.sha1()
shadigest.update(query)
# Create a unique cache identifier from the digest.
identifier = shadigest.hexdigest()
# Caching operation. The identifier is split into 2 parts. The first byte is turned
# into two hex digits which specify a cache directory. The remaining bytes are turned
# into a 47-character filename.
# specify a cache directory. The remaining digits specify a filename. We do this
# to avoid putting too many files into one big directory (which slows down filesystem operations
subdir = identifier[:2]
filename = identifier[2:]
# Check for the existence of a cache subdirectory. Make it if neeeded
cache_subdir = os.path.join(self.cachedir,subdir)
if not os.path.exists(cache_subdir):
# Make the cache subdirectory if it doesn't exist
try:
os.mkdir(cache_subdir)
except OSError:
pass # Here for possible race condition
assert os.path.exists(cache_subdir), "Failed to make directory %s" % cache_subdir
# Check for existence of cache file
cache_file= os.path.join(cache_subdir,filename+".p")
if os.path.exists(cache_file):
# A cache hit. Load the metadata file to get the cache information and return it.
f = CacheFile(cache_file,"rb")
metaquery,timestamp,url,headers = pickle.load(f)
# Check to make sure the query string exactly matches the meta data
# and that the cache data is not too old
if metaquery == query and (timestamp + self.expires > time.time()):
# A cache hit and the query matches. Just return the file we opened.
# the file pointer should be set to imemdiately after the pickled
# metadata at the front
f.headers = headers
f.url = url
return f
# There was a cache hit, but the cache metadata is for a different query (a collision)
# or the timestamp is out of date. We're going to remove the cache file and
# proceed as if there was a cache miss
try:
os.remove(cache_file)
except OSError:
pass # Ignore. If the files don't exist, who cares?
# Cache miss
# On a miss, a GET request is issued using the cache opener object
# (by default, urllib2.urlopen). Any HTTP exceptions are left unhandled
# for clients to deal with if they want (HTTP errors are not cached)
# Before adding a new cache entry. Check the number of entries in the cache subdirectory.
# If there are too many entries, remove the oldest entry to make room.
if len(os.listdir(cache_subdir)) >= self.maxperdirectory:
remove_oldest(cache_subdir)
# Make an akara request
url = self.baseurl + "?" + query
u = self.opener(url)
# If successful, we'll make it here. Read data from u and store in the cache
# This is done by initially creating a file with a different filename, fully
# populating it, and then renaming it to the correct cache file when done.
cache_tempfile = cache_file + ".%d" % os.getpid()
f = open(cache_tempfile,"wb")
pickle.dump((query,time.time(),url,u.info()),f,-1)
# Write content into the file
while True:
chunk = u.read(65536)
if not chunk: break
f.write(chunk)
f.close()
# Rename the file, open, and return
shutil.move(cache_tempfile, cache_file)
# Return a file-like object back to the client
f = CacheFile(cache_file,"rb")
metaquery,timestamp,f.url,f.headers = pickle.load(f)
return f
#
# Method that makes the cache directory if it doesn't yet exist
def make_named_cache(name):
#serv = registry.get_a_service_by_id(ident)
#if not serv:
# raise KeyError("Nothing known about service %s" % ident)
# Make sure the cache directory exists
if not os.path.exists(global_config.module_cache):
try:
os.mkdir(global_config.module_cache)
except OSError:
pass # Might be a race condition in creating. Ignore errors, but follow up with an assert
assert os.path.exists(global_config.module_cache),"%s directory can't be created" % (global_config.module_cache)
cachedir = os.path.join(global_config.module_cache, name)
if not os.path.exists(cachedir):
try:
os.mkdir(cachedir)
except OSError:
# This exception handler is here to avoid a possible race-condition in startup.
# Multiple server instances might enter here at the same time and try to create directory
pass
assert os.path.exists(cachedir), "Failed to make module cache directory %s" % cachedir
return cachedir | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/caching.py | caching.py |
import inspect
import amara
from amara import tree
from akara import logger
# Take care! This is only initalized when the server starts up and
# after it reads the config file. It's used to generate the full
# template name in list_services().
from akara import global_config
from akara import opensearch
__all__ = ("register_service", "get_service")
#### Simple registry of services
# We had some discussion about using the term 'path' or 'mount_point'?
# Right now we don't really have '/' paths so there's no real difference
# Therefore, you register mount_points, which must not have a "/" in them.
# Incoming requests have a path, and the first segment (before the "/")
# is used to find the mount point.
# We're calling it a 'path' for future compatibility. For more
# complex paths we might want to use Routes or similar system, in
# which case we'll also have pattern matching on the path segments.
# You'll likely register a path pattern, still with the name 'path'.
class Service(object):
"Internal class to store information about a given service resource"
def __init__(self, handler, path, ident, doc, query_template):
self.handler = handler # most important - the function to call
# XXX is it okay for the path to be None? I think so ...
self.path = path # where to find the service
self.ident = ident # URN which identifies this uniquely
self.doc = doc # description to use when listing the service
self.query_template = query_template # OpenSearch template fragment
self._template = None # The OpenSearch Template
self._internal_template = None # The OpenSearch template used internally
@property
def template(self):
if self._template is False:
# No template is possible
return None
if self._template is not None:
# Cached version
return self._template
# Compute the template
if self.query_template is None:
# No template available
self._template = False
return None
template = global_config.server_root + self.query_template
self._template = opensearch.make_template(template)
return self._template
@property
def internal_template(self):
if self._internal_template is False:
# No template is possible
return None
if self._internal_template is not None:
# Cached version
return self._internal_template
# Compute the template
if self.query_template is None:
# No template available
self._internal_template = False
return None
internal_template = global_config.internal_server_root + self.query_template
self._internal_template = opensearch.make_template(internal_template)
return self._internal_template
class Registry(object):
"Internal class to handle resource registration information"
def __init__(self):
self._registered_services = {}
def register_service(self, ident, path, handler, doc=None, query_template=None):
if "/" in path:
raise ValueError("Registered path %r may not contain a '/'" % (path,))
if doc is None:
doc = inspect.getdoc(handler) or ""
if ident in self._registered_services:
logger.warn("Replacing mount point %r (%r)" % (path, ident))
else:
logger.debug("Created new mount point %r (%r)" % (path, ident))
serv = Service(handler, path, ident, doc, query_template)
self._registered_services[path] = serv
def get_service(self, path):
return self._registered_services[path]
def list_services(self, ident=None):
document = tree.entity()
services = document.xml_append(tree.element(None, 'services'))
for path, service in sorted(self._registered_services.iteritems()):
if ident is not None and service.ident != ident:
continue
service_node = services.xml_append(tree.element(None, 'service'))
service_node.xml_attributes['ident'] = service.ident
E = service_node.xml_append(tree.element(None, 'path'))
template = service.template
if template is not None:
E.xml_attributes["template"] = service.template.template
E.xml_append(tree.text(path))
E = service_node.xml_append(tree.element(None, 'description'))
E.xml_append(tree.text(service.doc))
return document
_current_registry = Registry()
def register_service(ident, path, function, doc=None, query_template=None):
_current_registry.register_service(ident, path, function, doc, query_template)
def get_service(mount_point):
return _current_registry.get_service(mount_point)
def list_services(ident=None):
return _current_registry.list_services(ident)
def get_a_service_by_id(ident):
for path, service in _current_registry._registered_services.items():
if service.ident == ident:
return service
return None
# ident -> template
_registered_templates = {}
# Split this up to make it easier to test
def _register_services(uri):
new_templates = {}
tree = amara.parse(uri)
for path in tree.xml_select("//service[@ident]/path[@template]"):
ident = path.xml_parent.xml_attributes["ident"]
template = path.xml_attributes["template"]
new_templates[ident] = opensearch.make_template(template)
return new_templates
def register_services(uri):
new_templates = _register_services(uri)
_registered_templates.update(new_templates)
def register_template(ident, template):
if isinstance(template, basestring):
template = opensearch.make_template(template)
_registered_templates[ident] = template
def _get_url(ident, template_attr, kwargs):
# Get the local service first
service = get_a_service_by_id(ident)
if service is not None:
template = getattr(service, template_attr)
else:
# Still not here? Look for the other registered templates.
template = _registered_templates.get(ident, None)
if template is None:
# XXX What's a good default? Just put them as kwargs at the end?
raise TypeError("service %r does not have a query template" % (ident,))
return template.substitute(**kwargs)
def get_service_url(ident, **kwargs):
return _get_url(ident, "template", kwargs)
def get_internal_service_url(ident, **kwargs):
return _get_url(ident, "internal_template", kwargs) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/registry.py | registry.py |
import sys
import os
import signal
import shutil
from akara.thirdparty import argparse
from akara import read_config, run
def get_pid(args):
try:
settings, config = read_config.read_config(args.config_filename)
except read_config.Error, err:
raise SystemExit(str(err))
pid_file = settings["pid_file"]
try:
f = open(pid_file)
except IOError, err:
raise SystemExit("Could not open Akara PID file: %s" % (err,))
pid = f.readline()
if not pid:
raise SystemExit("Empty Akara PID file: %r" % (pid_file,))
try:
return int(pid)
except ValueError:
raise SystemExit("Akara PID file %r does not contain a PID (%r)" %
(pid_file, pid))
def start(args):
run.main(args)
def stop(args):
pid = get_pid(args)
os.kill(pid, signal.SIGTERM)
def restart(args):
pid = get_pid(args)
os.kill(pid, signal.SIGHUP)
def status(args):
config_filename = read_config.DEFAULT_SERVER_CONFIG_FILE
if args.config_filename is not None:
config_filename = args.config_filename
print " == Akara status =="
print "Configuration file:", repr(config_filename)
try:
settings, config = read_config.read_config(config_filename)
except read_config.Error, err:
print "** ERROR **:", str(err)
raise SystemExit(1)
print "Error log file:", repr(settings["error_log"])
print "Access log file:", repr(settings["access_log"])
pid_file = settings["pid_file"]
print "PID file:", repr(pid_file)
try:
line = open(pid_file).readline()
except IOError, err:
# It's fine if the PID file isn't there
if not os.path.exists(pid_file):
print "PID file does not exist"
print "Akara is not running"
else:
print "*** Cannot open PID file:", err
raise SystemExit(1)
else:
try:
pid = int(line)
except ValueError, err:
print "*** Unable to parse the PID from the PID file:", err
raise SystemExit(1)
try:
os.kill(pid, 0)
except OSError:
print "PID is", pid, "but no process found with that PID"
print "Akara is not running"
else:
print "PID is", pid, "and there is a process with that PID"
# XXX try to connect to the server?
print "Akara is running"
def setup_config_file():
_setup_config_file(read_config.DEFAULT_SERVER_CONFIG_FILE)
# This function is called by test code.
# It is not part of the external API.
def _setup_config_file(default_file):
if os.path.exists(default_file):
print "Configuration file already exists at", repr(default_file)
else:
print "Copying reference configuration file to", repr(default_file)
dirname = os.path.dirname(default_file)
if not os.path.exists(dirname):
print " Creating directory", dirname
try:
os.makedirs(dirname)
except OSError, err:
raise SystemExit("Cannot make directory: %s" % err)
# Using 'read_config.__file__' because it was handy
akara_config = os.path.join(os.path.dirname(read_config.__file__),
"akara.conf")
try:
shutil.copy(akara_config, default_file)
except IOError, err:
raise SystemExit("Cannot copy file: %s" % err)
def setup_directory_for(what, dirname):
if os.path.isdir(dirname):
if what[0] > "Z":
what = what[0].upper() + what[1:]
print "%s directory exists: %r" % (what, dirname)
else:
try:
os.makedirs(dirname)
except OSError, err:
raise SystemExit("Cannot make %s directory: %s" % (what, err))
print "Created %s directory: %r" % (what, dirname)
def setup(args):
if not args.config_filename:
setup_config_file()
settings, config = read_config.read_config(args.config_filename)
dirname = os.path.dirname
setup_directory_for("error log", dirname(settings["error_log"]))
setup_directory_for("access log", dirname(settings["access_log"]))
setup_directory_for("PID file", dirname(settings["pid_file"]))
setup_directory_for("extension modules", settings["module_dir"])
print
print "Akara environment set up. To start Akara use:"
print " akara start"
# This function is not multi-process safe. It's meant to be
# called by hand during the development process
def error_log_rotate(args):
import datetime
settings, config = read_config.read_config(args.config_filename)
error_log = settings["error_log"]
ext = ""
i = 0
timestamp = datetime.datetime.now().isoformat().split(".")[0]
template = error_log + "." + timestamp
archived_error_log = template
while os.path.exists(archived_error_log):
i += 1
archived_error_log = template + "_" + str(i)
try:
os.rename(error_log, archived_error_log)
except OSError, err:
if not os.path.exists(error_log):
print "No error log found at %r" % error_log
else:
raise
else:
print "Rotated log file from %r to %r" % (error_log, archived_error_log)
######################################################################
# Handle the command-line arguments
parser = argparse.ArgumentParser(prog="akara", add_help=False)
parser.add_argument("-f", "--config-file", metavar="FILE", dest="config_filename",
help="read configuration data from FILE")
# I didn't like how argparse put the --help and --version options first.
# I didn't like how it uses -v as a variation of --version.
# So, do it myself.
parser.add_argument("-h", "--help", action="help", default=argparse.SUPPRESS,
help=argparse._("show this help message and exit"))
parser.version = "akaractl version 2.0"
parser.add_argument("--version", action="version", default=argparse.SUPPRESS,
help=argparse._("show program's version number and exit"))
#### Commands for start, stop, etc.
subparsers = parser.add_subparsers(title="The available server commands are")
parser_start = subparsers.add_parser("start", help="start Akara (use -X for debug mode)")
parser_start.add_argument("-X", dest="debug", action="store_true",
help="start in debug mode")
parser_start.add_argument("-f", dest="skip_pid_check", action="store_true",
help="do not check for an existing PID file")
parser_start.set_defaults(func=start)
parser_stop = subparsers.add_parser("stop", help="stop an Akara server")
parser_stop.set_defaults(func=stop)
parser_restart = subparsers.add_parser("restart", help="restart an Akara server")
parser_restart.set_defaults(func=restart)
parser_status = subparsers.add_parser("status", help="display a status report")
parser_status.set_defaults(func=status)
parser_setup = subparsers.add_parser("setup", help="set up directories and files for Akara")
parser_setup.set_defaults(func=setup)
# There may be an "akara log rotate" in the future, along perhaps with
# "akara log tail", "akara log filename" and other options. There's
# not yet enough call for those and the following doesn't interfere
# with the possibility (excepting non-orthagonality).
parser_setup = subparsers.add_parser("rotate",
help="rotate out the current Akara error log")
parser_setup.set_defaults(func=error_log_rotate)
def main(argv):
args = parser.parse_args(argv[1:])
args.func(args) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/commandline.py | commandline.py |
__all__ = ["Pipeline", "Stage", "register_pipeline"]
import urllib
from cStringIO import StringIO
from akara import logger
from akara import registry
# Helper function to figure out which stage is the first and/or last stage
# [X] -> [ (1,1,X])
# [X, Y] -> [ (1,0,X), (0,1,Y) ]
# [X, Y, Z] -> [ (1,0,X), (0,0,Y), (0,1,Z) ]
def _flag_position(data):
first_flags = [1] + [0] * (len(data)-1)
last_flags = first_flags[::-1]
return zip(range(len(data)), first_flags, last_flags, data)
def _find_header(search_term, headers):
search_term = search_term.lower()
for (name, value) in headers:
if name.lower() == search_term:
return value
raise AssertionError("Could not find %r in the headers" % search_term)
class Pipeline(object):
def __init__(self, ident, path, stages, doc):
if not len(stages):
raise TypeError("No stages defined for the pipeline")
self.ident = ident
self.path = path
self.stages = stages
self.doc = doc
def __call__(self, environ, start_response):
# The WSGI entry point for the pipeline
logger.debug("Started the %s (%s) pipeline", self.ident, self.path)
# Help capture the response of a WSGI request,
# so I can forward it as input to the next request.
captured_response = [None, None, None]
captured_body_length = None
def capture_start_response(status, headers, exc_info=None):
if exc_info is None:
captured_response[:] = [status, headers, False]
else:
captured_response[:] = [status, headers, True]
# Forward this to the real start_response
return start_response(status, headers, exc_info)
num_stages = len(self.stages)
for stage_index, is_first, is_last, stage in _flag_position(self.stages):
service = registry.get_a_service_by_id(stage.ident)
if service is None:
logger.error("Pipeline %r(%r) could not find a %r service",
self.ident, self.path, stage.ident)
start_response("500 Internal server error", [("Content-Type", "text/plain")])
return ["Broken internal pipeline.\n"]
# Construct a new environ for each stage in the pipeline.
# We have to make a new one since a stage is free to
# do whatever it wants to the environ. (It does not have
# free reign over all the contents of the environ.)
stage_environ = environ.copy()
if not is_first:
# The first stage gets the HTTP request method
# Everything else gets a POST.
stage_environ["REQUEST_METHOD"] = "POST"
assert service.path is not None # Can some services/pipelines not be mounted?
stage_environ["SCRIPT_NAME"] = service.path
#stage_environ["PATH_INFO"] = ... # I think this is best left unchanged. XXX
if is_first:
# Augment the QUERY_STRING string with any pipeline-defined query string
# (You probably shouldn't be doing this. Remove this feature? XXX)
if stage.query_string:
if stage_environ["QUERY_STRING"]:
stage_environ["QUERY_STRING"] += ("&" + stage.query_string)
else:
stage_environ["QUERY_STRING"] = stage.query_string
else:
# The other stages get nothing about the HTTP query string
# but may get a pipeline-defined query string
if stage.query_string:
stage_environ["QUERY_STRING"] = stage.query_string
else:
stage_environ["QUERY_STRING"] = ""
if not is_first:
# Forward information from the previous stage
stage_environ["CONTENT_TYPE"] = _find_header("content-type",
captured_response[1])
stage_environ["CONTENT_LENGTH"] = captured_body_length
stage_environ["wsgi.input"] = captured_body
# Make the previous response headers available to the next stage
stage_environ["akara.pipeline_headers"] = captured_response[1]
logger.debug("Pipeline %r(%r) at stage %r (%d/%d)",
self.ident, self.path, stage.ident, stage_index+1, num_stages)
if is_last:
# End of the pipeline. Let someone else deal with the response
return service.handler(stage_environ, start_response)
else:
# Intermediate stage output. Collect to forward to the next stage
captured_body = StringIO()
result = service.handler(stage_environ, capture_start_response)
# Did start_response get an exc_info term? (It might not
# have been thrown when forwarded to the real start_response.)
if captured_response[2]:
# It didn't raise an exception. Assume the response contains
# the error message. Forward it and stop the pipeline.
logger.debug(
"Pipeline %r(%r) start_response received exc_info from stage %r. Stopping.",
self.ident, self.path, stage.ident)
return result
# Was there some sort of HTTP error?
status = captured_response[0].split(None, 1)[0]
# XXX What counts as an error?
if status not in ("200", "201"):
logger.debug(
"Pipeline %r(%r) start_response received status %r from stage %r. Stopping.",
self.ident, self.path, status, stage.ident)
start_response(captured_response[0], captured_response[1])
# This should contain error information
return result
# Save the response to the cStringIO
try:
# We might be able to get some better performance using
# a wsgi.file_wrapper. If the chunks come from a file-like
# object then we can reach in and get that file-like object
# instead of copying it to a new one
for chunk in result:
captured_body.write(chunk)
finally:
# Part of the WSGI spec
if hasattr(result, "close"):
result.close()
captured_body_length = captured_body.tell()
captured_body.seek(0)
raise AssertionErorr("should never get here")
# The dictionary values may be strings for single-valued arguments, or
# list/tuples for multiple-valued arguments. That is
# dict(a=1, z=9) -> "a=1&z=9"
# dict(a=[1,2, z=9]) -> "a=1&a=2&z=9"
# This function helps flatten the values, producing a tuple-stream
# that urlencode knows how to process.
def _flatten_kwargs_values(kwargs):
result = []
if isinstance(kwargs, dict):
args = kwargs.items()
else:
args = kwargs
for k,v in args:
if isinstance(v, basestring):
result.append( (k,v) )
else:
for item in v:
result.append( (k, item) )
return result
def _build_query_string(query_args, kwargs):
if query_args is None:
if kwargs is None or kwargs == {}:
return ""
# all kwargs MUST be url-encodable
return urllib.urlencode(_flatten_kwargs_values(kwargs))
if kwargs is None or kwargs == {}:
# query_args MUST be url-encodable
return urllib.urlencode(_flatten_kwargs_values(query_args))
raise TypeError("Cannot specify both 'query_args' and keyword arguments")
class Stage(object):
"""Define a stage in the pipeline
'ident' is the service identifier which uniquely identifies the service
To specify additional QUERY_STRING arguments passed to the service use one of:
query_args - a list of (name, value) tuples
**kwargs - the kwargs.items() are used as (name, value) tuples
query_string - the raw query string
If the value is a string then the name, value pair is converted to
an HTTP query parameter. Otherwise the value is treated as a list
and each list item adds a new query parameter
(name, value[0]), (name, value[1]), ...
Here are examples:
# Using query_args (which preserves order)
Stage("http://example.com", [("a", ["1", "2"]), ("b", "9")])
-> QUERY_STRING = "a=1&a=2&b=9"
# Using kwargs (which might not preserve order)
Stage("http://example.com", a=["1", "2"], b="9")
-> QUERY_STRING = "b=9&a=1&a=2"
# Using a raw query string
Stage("http://example.com", query_string="a=2&b=9&a=1")
-> QUERY_STRING = "a=2&b=9&a=1"
The first stage gets the HTTP request QUERY_STRING plus
the query string defined for the stage. The other stages
only get the query string defined for the stage.
"""
def __init__(self, ident, query_args=None, query_string=None, **kwargs):
self.ident = ident
if query_string is not None:
if query_args is not None:
raise TypeError("Cannot specify both 'query_string' and 'query_args'")
if kwargs:
raise TypeError("Cannot specify both 'query_string' and keyword argument")
self.query_string = query_string
else:
self.query_string = _build_query_string(query_args, kwargs)
def _normalize_stage(stage):
if isinstance(stage, basestring):
return Stage(stage)
return stage
def register_pipeline(ident, path=None, stages=None, doc=None):
if not stages:
raise TypeError("a pipeline must have stages")
stages = [_normalize_stage(stage) for stage in stages]
# Should I check that the dependent stages are already registered?
pipeline = Pipeline(ident, path, stages, doc)
registry.register_service(ident, path, pipeline, doc) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/pipeline.py | pipeline.py |
import re
import urlparse
import urllib
__all__ = ["make_template", "apply_template"]
# The OpenSearch format is documented at
# http://www.opensearch.org/Specifications/OpenSearch/1.1/Draft_4
# it references the URI spec at
# http://www.ietf.org/rfc/rfc3986.txt
# Note a proposed extension to OpenSearch at:
# http://www.snellspace.com/wp/?p=369
###############
# I started by looking for existing OpenSearch implementations but
# they didn't work correctly. For example, one tool uses
# urllib.urlsplit to parse the fields, then does template substitution
# of each field. That works only because the library didn't support
# optional "?" fields. That is, consider:
# >>> urllib.urlsplit("http://example.com/{spam?}/eggs")
# SplitResult(scheme='http', netloc='example.com',
# path='/{spam', query='}/eggs', fragment='')
# I started implementing a parser from the spec then realized the spec
# grammar isn't correctly defined. For example, it has:
# thost = *( host / tparameter )
# where
# host = IP-literal / IPv4address / reg-name
# which means it allows 127.0.0.1127.0.0.1 as an address. While the
# grammar isn't that bad, it's easier to use an approach more like
# what Python's urllib.urlsplit does.
# I also noticed the grammar doesn't allow http://{userid}.myopenid.com/
# as a template, which seemed important for some cases.
# I can't treat this as a simple template grammar and just search for
# the {...} tokens because of Unicode issues. The encoding is field
# specific. The hostname uses IDNA while most of the rest of the
# encoding uses URL-encoded UTF-8.
# The algorithm breaks the template up into parts. Each part is either
# a string (should be a byte string since URLs are not Unicode) or a
# function corresponding to a template lookup. The function will get a
# dictionary of input parameters and it must returns the correctly
# encoded value.
# Template substitution is a merger of either the byte string or the
# result of calling the function with the input parameters.
### Syntax definitions from the relevant specs
# tparameter = "{" tqname [ tmodifier ] "}"
# tqname = [ tprefix ":" ] tlname
# tprefix = *pchar
# tlname = *pchar
# pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# pct-encoded = "%" HEXDIG HEXDIG
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# tmodifier = "?"
pchar = r"""([A-Za-z0-9._~!$&''()*+,;=""-]|(%[0-9A-Fa-f]{2}))*"""
scheme = r"[a-zA-Z0-9+.-]+"
tparameter = r"""
(?: {{
((?P<tprefix> {pchar} ) : )?
(?P<tlname> {pchar} )
(?P<tmodifier>\?)?
}} )
""".format(pchar=pchar)
scheme_pat = re.compile(r"""( {scheme} | {tparameter}):""".format(
scheme=scheme, tparameter=tparameter), re.X)
tparameter_pat = re.compile(r"""
{{((?P<tprefix> {pchar} ) : )? (?P<tlname> {pchar} ) (?P<tmodifier>\?)? }}
""".format(pchar=pchar), re.X)
# Match either a tparameter or things which aren't in a template
template_pat = re.compile(r"({tparameter}|[^{{]+)".format(tparameter=tparameter), re.X)
# Test if the re match group contains the '?' tmodifier field
def _is_optional(m):
return m.group("tmodifier") == "?"
##################
# Parse the URI scheme field.
# Looks something like "http:", "ftp:", or "{scheme}:"
# This parses up to the ':' and returns the offset to the ':' and a
# list of template parts.
def _parse_scheme(uri):
m = scheme_pat.match(uri)
if m is None:
i = uri.find(":")
if i >= 0:
msg = "URI scheme must be either text or a single template field: %r"
else:
msg = "Missing or unparsable URI scheme: %r"
raise TypeError(msg % (uri,))
if m.group("tlname") is None:
# Just text. This must be an ASCII byte string.
return m.end(), [m.group(0).encode("ascii")]
if _is_optional(m):
raise TypeError("URI scheme cannot be an optional template variable")
def convert_scheme(params, tlname=m.group("tlname")):
# I could make this a more rigorous test for the legal scheme characters
return params[tlname].encode("ascii") # the scheme can only be ASCII
return m.end(), [convert_scheme, ":"]
# Find the end of the network location field. The start is just after the '//'.
# To make things easier, this must be a string with all template {names} removed!
# That's the "xuri", which uses "X"s to replace the template names.
def _find_netloc(xuri, start):
end = len(xuri)
# This is the same test in urllib.urlsplit
for c in "/?#":
offset = xuri.find(c, start)
if offset >= 0 and offset < end:
end = offset
return end
def _parse_netloc(netloc, xnetloc):
# Check to see if there's a username/password field.
# These happen before the '@', if one exists.
i = xnetloc.find("@")
if i >= 0:
# Username/password fields use the normal utf-8 encoding
# so handle that with the normal template parser.
for part in _parse_template(netloc[:i]):
yield part
yield "@"
hostname = netloc[i+1:]
xhostname = xnetloc[i+1:]
else:
hostname = netloc
xhostname = xnetloc
# There could be a port after the hostname.
# Starts with ":", as in http://localhost:8080/path/
i = xhostname.find(":")
if i >= 0:
port = hostname[i+1:]
hostname = hostname[:i]
else:
# No port specified
port = None
if not hostname:
raise TypeError("Akara requires a hostname in the template")
# This is tricky. I have to join all of the subfields before doing
# the idna encoding. This allows u"{hostname}.Espa\u00F1a.com"
# since the u".Espa\u00F1a.com" does not encode on its own.
# Create a list of subparts, either:
# - strings which are *not* encoded
# - a function to look up the value in the dictionary
subparts = []
for m in template_pat.finditer(hostname):
tlname = m.group("tlname")
if tlname is None:
subparts.append(m.group(0))
else:
if m.group("tmodifier") == "?":
raise TypeError("URI hostname cannot contain an optional template variable")
subparts.append(lambda d, tlname=tlname: d[tlname])
# In the common case this is a string. No need for the extra overhead.
if len(subparts) == 1 and isinstance(subparts[0], basestring):
yield subparts[0].encode("idna")
else:
# Function to convert, join, and encode based the parts
def convert_hostname(params, parts=subparts):
results = []
for part in parts:
if isinstance(part, basestring):
results.append(part)
else:
results.append(part(params))
result = "".join(results)
return result.encode("idna")
yield convert_hostname
# And finally, the port.
if port is None:
return
# If it's just a number, return the number (and the ":" I had removed)
if port.isdigit():
yield ":" + port
return
# Otherwise it's a parameter. Make sure it's only a paramter
m = tparameter_pat.match(port)
if m is None:
raise TypeError("Port must be either a number or a template name")
if m.end() != len(port):
raise TypeError("Port may not contain anything after the template name")
tlname = m.group("tlname")
if _is_optional(m):
extract = lambda params, tlname=tlname: params.get(tlname, "")
else:
extract = lambda params, tlname=tlname: params[tlname]
def convert_port(params, extract=extract, tlname=tlname):
value = extract(params)
if isinstance(value, int):
# Allow people to pass in a port number as an integer
return ":%d" % (value,)
if value == "":
# No port given? Use the default. (Don't include the ':' here.)
return ""
if value.isdigit():
return ":" + value
raise TypeError("Port template parameter %r is not an integer (%r)" %
(tlname, value))
yield convert_port
# Handle the text fields which are escaped via URL-encoded UTF-8
def _parse_template(template):
for m in template_pat.finditer(template):
if m.group("tlname") is None:
# "ascii" to ensure that no Unicode characters are in the template
yield m.group(0).encode("ascii") # You must pre-encode non-ASCII text yourself
else:
if _is_optional(m):
def convert_scheme(params, tlname=m.group("tlname")):
return urllib.quote_plus(params.get(tlname, "").encode("utf8"))
else:
def convert_scheme(params, tlname=m.group("tlname")):
return urllib.quote_plus(params[tlname].encode("utf8"))
yield convert_scheme
def decompose_template(uri):
"""Internal function to break down an OpenSearch template into its Template terms"""
# For use in Akara, the scheme and host name are required, and the
# "uri" syntax is defined from RFC 3986 + OpenSearch templates.
# I'll make life easier by working with a string without the {} templates.
def check_and_replace_template_field(m):
if m.group("tprefix") is not None:
raise TypeError("Template prefix not supported in Akara (in %r)" % (m.group(0),))
if m.group("tlname") == "":
raise TypeError("Empty template variable in %r" % (uri,))
return "X" * (m.end() - m.start())
xuri = tparameter_pat.sub(check_and_replace_template_field, uri)
# Make sure that no "{" or "}" characters are present!
if "{" in xuri:
raise TypeError(
"Unexpected '{' found in URI at position %d)" % (xuri.index("{")+1,))
if "}" in xuri:
raise TypeError(
"Unexpected '}' found in URI at position %d" % (xuri.index("}")+1,))
parts = []
# "http:", "ftp:", "{scheme}:"
start, subparts = _parse_scheme(uri)
parts.extend(subparts)
# Check for the "//" in things like "http://example.com"
if uri[start:start+2] != "//":
raise TypeError("Missing required '//' in URI (scheme and hostname must be given)")
assert isinstance(parts[-1], basestring)
# This is either something like ["http:"] or something like [<function>. ":"]
# Optimize by merging the last item with the "//"
parts[-1] += "//"
start += 2
# [tuserinfo "@"] thost [ ":" tport ]
# The OpenSearch template makes this harder to find. I have to consider:
# http://example.com?spam
# http://{host?}?spam
# http://example.com/spam
# http://example.com#spam
# ftp://userid:password@ftp
# ftp://{userid?}:{password?}@{userid}.example.com/
# Since I've replaced the templates with "X"s, this becomes a lot easier.
end = _find_netloc(xuri, start)
netloc = uri[start:end]
xnetloc = xuri[start:end]
# The userid portion is encoded different than the hostname
parts.extend(_parse_netloc(netloc, xnetloc))
# And the rest is a simple encoding
parts.extend(_parse_template(uri[end:]))
return parts
class Template(object):
"""A parsed OpenSearch Template object.
Use 'make_template(template_string)' to make a Template instance.
"""
def __init__(self, template, terms):
"""You should not call this constructor directly."""
self.template = template
self.terms = terms
def substitute(self, **kwargs):
"""Use kwargs to fill in the template fields.
Keywords unknown to the template ignored.
"""
#XXX: this used to use kwargs, but that's not a good idea because not all
results = []
for term in self.terms:
if isinstance(term, basestring):
results.append(term)
else:
results.append(term(kwargs))
return "".join(results)
def make_template(template):
"""Given an OpenSearch template, return a Template instance for it.
>>> template = make_template('http://localhost/search?q={term}')
>>> template.substitute(term='opensearch syntax')
'http://localhost/search?q=opensearch+syntax'
>>>
"""
terms = decompose_template(template)
return Template(template, terms)
def apply_template(template, **kwargs):
"""Apply the kwargs to the template fields and return the result
>>> apply_template('http://{userid}.example.com/status', userid='anonymous')
'http://anonymous.example.com/status'
>>>
Note: it's very common for URI components to be invalid as Python IDs.
In that case you have to invoke as follows:
apply_template(tpl, **{'a-b': 'cd'})
"""
t = make_template(template)
return t.substitute(**kwargs) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/opensearch.py | opensearch.py |
import os
import sys
import socket
import logging
import signal
import akara
from akara import read_config
from akara import logger, logger_config
from akara.multiprocess_http import AkaraPreforkServer
from akara import global_config
# Need this in order to install "/" as service.list_services. I think
# this code is somewhat clunky. There should be no reason to need the
# import here, but something has to install the top-level search
# handler, and doing it via a simple_service just makes things, well,
# simple. But the registry can't import services because services
# needs a fully-loaded registry to register things.
# Threw my hands up in the sky and broke the reference here.
# Caveat emptor.
from akara import services
def save_pid(pid_file):
"Save the current pid to the given PID filename"
# One line, newline terminated
pid_s = str(os.getpid()) + "\n"
try:
f = open(pid_file, "w")
except Exception, error:
raise Exception("Unable to open PID file: %s" %
(error,))
try:
try:
f.write(pid_s)
except Exception, error:
raise Exception("Unable to write to PID file %r: %s" %
(pid_file, error))
finally:
f.close()
def remove_pid(pid_file):
"Remove the given filename (which should be the PID file)"
try:
os.remove(pid_file)
except Exception, error:
if not os.path.exists(pid_file):
logger.error("Unable to remove PID file %r: %s",
pid_file, error)
else:
logger.info("Removed PID file %r", pid_file)
# There are two ways to run the Akara server, either in debug mode
# (running in the foreground, with the -X option) or in daemon mode
# (running in the background) which is the default. The latter is
# trickier to support.
# In that case the command-line program spawns off a new process,
# which is the master HTTP node ("the flup server"). It manages the
# subprocesses which actually handle the HTTP requests. The flup
# server starts up and either manages to set things up or fails
# because of some problem. The command-line program needs to exit with
# an error code if there was a problem, so there must be some sort of
# communications between the two.
# The solution is simple. Setup a pipe. The child sends either
# "success\n" or "failure\n" as appropriate. The parent (which is the
# command-line program) waits until it gets one of those messages.
class NotifyParent(object):
def __init__(self):
self.r_pipe, self.w_pipe = os.pipe()
def failure(self):
"Called in the child, when it must abort"
os.write(self.w_pipe, "failure\n")
def success(self):
"Called in the child, when it's ready for HTTP requests"
os.write(self.w_pipe, "success\n")
def read_and_close(self):
"Called in the parent, to wait for the child"
status = os.read(self.r_pipe, 1000)
os.close(self.r_pipe)
os.close(self.w_pipe)
return status
# Life is much easier in debug mode. There's no need to communicate
# anything to the non-existent parent.
class NoParent(object):
def failure(self):
pass
def success(self):
pass
def demonize():
notify_parent = NotifyParent()
if os.fork():
# In the command-line parent. Wait for child status.
status = notify_parent.read_and_close()
if status.startswith("success"):
raise SystemExit(0)
else:
raise SystemExit(1)
# In the child, which is the flup server.
try:
# Create a new session with this process as the group leader
try:
setsid = os.setsid
except AttributeError:
os.setpgid(0, 0)
else:
setsid()
except:
notify_parent.failure()
raise
return notify_parent
# Sets up the global_config module contents
def set_global_config(settings):
for name, value in settings.items():
setattr(global_config, name, value)
def main(args):
config_filename = args.config_filename
debug = args.debug
skip_pid_check = args.skip_pid_check
first_time = True
old_server_address = None
sock = None
while 1:
# This is the main loop for the flup server.
# Why is it a loop? A SIGHUP sent to the server
# will shut down flup then reread the configuration
# file, reload the extension modules, and start
# the flup server again.
try:
settings, config = read_config.read_config(config_filename)
except read_config.Error, err:
logger.fatal(str(err))
if first_time:
raise SystemExit("Cannot start Akara. Exiting.")
else:
raise SystemExit("Cannot restart Akara. Exiting.")
akara.raw_config = config
# Establish the global configuration module
set_global_config(settings)
# In debug mode (-X), display all log messages.
# Otherwise, use the configuration level
if debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(settings["log_level"])
# Open this now, so any errors can be reported
try:
logger_config.set_logfile(settings["error_log"])
except IOError, err:
# Only give the 'akara setup' text here because it's where
# we get to with an empty/nonexistant configuration file.
logger.fatal("""\
Could not open the Akara error log:
%s
Does that directory exist and is it writeable?
You may want to use 'akara setup' to set up the directory structure.""" % err)
sys.exit(1)
# Configure the access log
try:
logger_config.set_access_logfile(settings["access_log"])
except IOError, err:
logger.fatal("""\
Could not open the Akara access log:
%s
Does that directory exist and is it writeable?""" % err)
sys.exit(1)
# Don't start if the PID file already exists.
pid_file = settings["pid_file"]
if first_time and (not skip_pid_check) and os.path.exists(pid_file):
msg = ("Akara PID file %r already exists. Is another Akara instance running?\n"
"If not, remove the file or use the '-f' option to skip this check")
logger.fatal(msg % (pid_file,))
raise SystemExit(1)
if debug or not first_time:
notify_parent = NoParent()
else:
# Spawn off the actual listener.
# The parent will always raise an exception, and never return.
try:
notify_parent = demonize()
except Exception, err:
# This can come from the parent or the child.
logger.critical("Cannot spawn HTTP server", exc_info=True)
raise SystemExit("Exiting - check the log file for details")
# At this point we are in the child. Set things up as
# far as we can go, then tell the parent that we're ready.
try:
server_address = settings["server_address"]
if server_address != old_server_address:
if sock is not None:
sock.close()
sock = socket.socket()
# XXX Should SO_REUSEADDR be a configuration setting?
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Disable Nagle's algorithm, which causes problems with
# keep-alive. See:
# http://stackoverflow.com/questions/1781766/
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
host, port = settings["server_address"]
if host:
description = "interface %r port %r" % (host, port)
else:
description = "port %r" % (port,)
try:
sock.bind(settings["server_address"])
except socket.error, error:
raise SystemExit("Can not bind to " + description)
logger.info("Listening to " + description)
sock.listen(socket.SOMAXCONN)
old_server_address = server_address
# NOTE: StartServers not currently supported and likely won't be.
# Why? Because the old algorithm would add/cull the server count
# within a few check intervals (each about 1 second), so it
# didn't have much long-term effect.
logger.info("Akara server is running")
server = AkaraPreforkServer(
minSpare = settings["min_spare_servers"],
maxSpare = settings["max_spare_servers"],
maxChildren = settings["max_servers"],
maxRequests = settings["max_requests_per_server"],
settings = settings,
config = config,
)
# Everything is ready to go, except for saving the PID file
if first_time:
save_pid(pid_file)
except:
notify_parent.failure()
logger.critical("Could not set up the Akara HTTP server", exc_info=True)
raise SystemExit("Akara HTTP server exiting - check the log file for details")
else:
notify_parent.success()
# Fully demonize - no more logging to sys.std*
# Close the standard file descriptors.
# Redirect sys.std* to the log file
if first_time and not debug:
logger_config.remove_logging_to_stderr()
logger_config.redirect_stdio()
try:
hupReceived = server.run(sock)
except SystemExit:
# Propogate the SystemExit through the system. Remember,
# this is also the root of the call tree for the child
# which handles the request. The child exits at some point.
raise
# Strange. Why didn't flup disable this alarm?
signal.alarm(0)
if not hupReceived:
logger.info("Akara server shutting down.")
break
logger.info("Akara server is restarting.")
first_time = False
remove_pid(pid_file) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/run.py | run.py |
import httplib
import warnings
import functools
import cgi
import inspect
from cStringIO import StringIO
from xml.sax.saxutils import escape as xml_escape
from BaseHTTPServer import BaseHTTPRequestHandler
http_responses = BaseHTTPRequestHandler.responses
del BaseHTTPRequestHandler
from amara import tree, writers
from akara import logger, registry
__all__ = ("service", "simple_service", "method_dispatcher")
ERROR_DOCUMENT_TEMPLATE = """<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<title>%(reason)s</title>
</head>
<body>
<h1>%(reason)s</h1>
<p>
%(message)s
</p>
<h2>Error %(code)s</h2>
</body>
</html>
"""
class _HTTPError(Exception):
"Internal class."
# I don't think the API is quite right.
# Should code be a number or include the reason in a string, like "200 OK"?
def __init__(self, code, message=None):
assert isinstance(code, int) # Being a bit paranoid about the API
self.code = code
self.reason, self.message = http_responses[code]
if message is not None:
self.message = message
self.text = ERROR_DOCUMENT_TEMPLATE % dict(code=self.code,
reason=xml_escape(self.reason),
message=xml_escape(self.message))
self.headers = [("Content-Type", "text/html")]
def make_wsgi_response(self, environ, start_response):
start_response("%s %s" % (self.code, self.reason), self.headers)
return [self.text]
class _HTTP405(_HTTPError):
def __init__(self, methods):
_HTTPError.__init__(self, 405)
self.headers.append( ("Allow", ", ".join(methods)) )
# Pull out any query arguments and set up input from any POST request
def _get_function_args(environ, allow_repeated_args):
request_method = environ.get("REQUEST_METHOD")
if request_method == "POST":
try:
request_length = int(environ["CONTENT_LENGTH"])
except (KeyError, ValueError):
raise _HTTPError(httplib.LENGTH_REQUIRED)
if request_length < 0:
raise _HTTPError(httplib.BAD_REQUEST)
request_bytes = environ["wsgi.input"].read(request_length)
request_content_type = environ.get("CONTENT_TYPE", None)
args = (request_bytes, request_content_type)
else:
args = ()
# Build up the keyword parameters from the query string
query_string = environ["QUERY_STRING"]
kwargs = {}
if query_string:
qs_dict = cgi.parse_qs(query_string)
if allow_repeated_args:
kwargs = qs_dict
else:
for k, v in qs_dict.iteritems():
if len(v) == 1:
kwargs[k] = v[0]
else:
raise _HTTPError(400,
message="Using the %r query parameter multiple times is not supported" % (k,))
return args, kwargs
######
def new_request(environ):
"prepare the akara.request and akara.response environment for a new request"
from akara import request, response
request.environ = environ
response.code = "200 OK"
response.headers = []
def send_headers(start_response, default_content_type, content_length):
"Send the WSGI headers, using values from akara.request.*"
from akara import response
code = response.code
if isinstance(code, int):
reason = http_responses[code][0]
code = "%d %s" % (code, reason)
has_content_type = False
has_content_length = False
for k, v in response.headers:
k = k.lower()
if k == "content-type":
has_content_type = True
elif k == "content-length":
has_content_length = True
if not has_content_type:
response.headers.append( ("Content-Type", default_content_type) )
if not has_content_length and content_length is not None:
response.headers.append( ("Content-Length", content_length) )
start_response(code, response.headers)
def convert_body(body, content_type, encoding, writer):
if isinstance(body, str):
if content_type is None:
content_type = "text/plain"
return [body], content_type, len(body)
if isinstance(body, tree.entity):
# XXX have Amara tell me the content type (with encoding)
# This is trac #29
if content_type is None:
if "html" in writer.lower():
content_type = "text/html"
else:
content_type = "application/xml"
w = writers.lookup(writer)
body = body.xml_encode(w, encoding)
return [body], content_type, len(body)
if isinstance(body, unicode):
body = body.encode(encoding)
if content_type is None:
content_type = "text/plain; charset=%s" % (encoding,)
return [body], content_type, len(body)
# Probably one of the normal WSGI responses
if content_type is None:
content_type = "text/plain"
return body, content_type, None
# The HTTP spec says a method can be and 1*CHAR, where CHAR is a
# US-ASCII character excepting control characters and "punctuation".
# (like '(){}' and even ' '). We're a bit more strict than that
# because we haven't seen people use words like "get".
def _check_is_valid_method(method):
min_c = min(method)
max_c = max(method)
if min_c < 'A' or max_c > 'Z':
raise ValueError("HTTP method %r value is not valid. "
"It must contain only uppercase ASCII letters" % (method,))
def _no_slashes(path):
if path is not None and "/" in path:
# Really these are more like mount points
raise ValueError("service paths may not contain a '/'")
def ignore_start_response(status, response_headers, exc_info=None):
pass
def _make_query_template(func):
argspec = inspect.getargspec(func)
if argspec.varargs is not None or argspec.keywords is not None:
# Can't handle *args or **kwargs in the parameter list
return None
if not argspec.args:
return ""
num_required = len(argspec.args) - len(argspec.defaults or ())
arg_info = [(arg, i >= num_required) for (i, arg) in enumerate(argspec.args)]
# I present these in alphabetical order to reduce template changes
# should the parameter list change.
arg_info.sort()
terms = []
for arg, is_optional in arg_info:
if is_optional:
fmt = "%s={%s?}"
else:
fmt = "%s={%s}"
terms.append( fmt % (arg, arg) )
return "?" + "&".join(terms)
def _handle_notify(environ, f, service_list):
for service_id in service_list:
service = registry.get_a_service_by_id(service_id)
service_environ = environ.copy()
service_environ["PATH_INFO"] = service.path
f.seek(0)
new_request(service_environ)
try:
service.handler(service_environ, ignore_start_response)
except Exception:
raise
# XXX
pass
FROM_ENVIRON = object()
def _handle_notify_before(environ, body, service_list):
if not service_list:
return
if body is FROM_ENVIRON:
body = environ["wsgi.input"].read()
f = StringIO(body)
environ["wsgi.input"] = f
_handle_notify(environ, f, service_list)
f.seek(0)
def _handle_notify_after(environ, result, service_list):
if not service_list:
return result
f = StringIO()
for block in result:
f.write(block)
# XXX ALso need to set the CONTENT_TYPE (and others?)
environ["CONTENT_LENGTH"] = f.tell()
environ["wsgi.input"] = f
_handle_notify(environ, f, service_list)
f.seek(0)
return f
###### public decorators
## Guide to help in understanding
# @service(*args) -> returns a service_wrapper
#
# @service(*args)
# def func(): pass -> returns a wrapper() which calls func
def service(service_id, path=None,
content_type=None,encoding="utf-8", writer="xml",
pipelines = None,
query_template = None,
wsgi_wrapper=None,
notify_before = None,
notify_after = None):
_no_slashes(path)
def service_wrapper(func):
@functools.wraps(func)
def wrapper(environ, start_response):
_handle_notify_before(environ, FROM_ENVIRON, notify_before)
# 'service' passes the WSGI request straight through
# to the handler so there's almost no point in
# setting up the environment. However, I can conceive
# of tools which might access 'environ' directly, and
# I want to be consistent with the simple* interfaces.
new_request(environ)
result = func(environ, start_response)
# You need to make sure you sent the correct content-type!
result, ctype, length = convert_body(result, None, encoding, writer)
result = _handle_notify_after(environ, result, notify_after)
return result
pth = path
if pth is None:
pth = func.__name__
# If an outer WSGI wrapper was specified, place it around the service wrapper being created
if wsgi_wrapper:
wrapper = wsgi_wrapper(wrapper)
#For purposes of inspection (not a good idea to change these otherwise you'll lose sync with the values closed over)
wrapper.content_type = content_type
wrapper.encoding = encoding
wrapper.writer = writer
registry.register_service(service_id, pth, wrapper, query_template=query_template)
return wrapper
return service_wrapper
## Guide to help in understanding
# @simple_service(*args) -> returns a service_wrapper
#
# @simple_service(*args)
# def func(): pass -> returns a wrapper() which calls func
def simple_service(method, service_id, path=None,
content_type=None, encoding="utf-8", writer="xml",
allow_repeated_args=False,
query_template=None,
wsgi_wrapper=None,
notify_before=None, notify_after=None):
"""Add the function as an Akara resource
These affect how the resource is registered in Akara
method - the supported HTTP method (either "GET" or "POST")
service_id - a string which identifies this service; should be a URL
path - the local URL path to the resource (must not at present
contain a '/') If None, use the function's name as the path.
query_template - An Akara URL service template (based on OpenSource; see akara.opensource)
Can be used to help consumers compose resources withing this service. The same
template is used for all HTTP methods
These control how to turn the return value into an HTTP response
content_type - the response content-type. If not specified, and if
"Content-Type" is not listed in akara.response.headers then infer
the content-type based on what the decorated function returns.
(See akara.services.convert_body for details)
encoding - Used to convert a returned Unicode string or an Amara tree
to the bytes used in the HTTP response
writer - Used to serialize the Amara tree for the HTTP response.
This must be a name which can be used as an Amara.writer.lookup.
This affects how to convert the QUERY_STRING into function call parameters
allow_repeated_args - The query string may have multiple items with the
same name, as in "?a=x&a=y&a=z&b=w". If True, this is converted into
a function call parameter like "f(a=['x','y','z'], b=['w'])". If
False then this is treated as an error. Suppose the query string
contains no repeated arguments, as in "?a=x&b=w". If
allow_repeated_args is True then the function is called as
as "f(a=['x'], b=['w'])" and if False, like "f(a='x', b='w')".
A simple_service decorated function can get request information from
akara.request and use akara.response to set the HTTP reponse code
and the HTTP response headers.
Here is an example of use:
@simple_service("GET", "http://example.com/get_date")
def date(format="%Y-%m-%d %H:%M:%S"):
'''get the current date'''
import datetime
return datetime.datetime.now().strftime(format)
which can be called with URLs like:
http://localhost:8880/date
http://localhost:8880/date?format=%25m-%25d-%25Y
Integration with other WSGI components:
The @simple_service decorator creates and returns a low-level handler
function that conforms to the WSGI calling conventions. However,
it is not safe to directly use the resulting handler with arbitrary
third-party WSGI components (e.g., to wrap the Akara handler with
an WSGI middleware component). This is because Akara handlers return
values other than sequences of byte-strings. For example, they might
return XML trees, Unicode, or other data types that would not be
correctly interpreted by other WSGI components.
To integrate other WSGI components with Akara, use the wsgi_wrapper
argument to @simple_service. For example:
def wrapper(app):
# Create an WSGI wrapper around WSGI application app
...
return wrapped_app
@simple_service("GET", "http://example.com/get_date", wsgi_wrapper=wrapper)
def date(format):
...
When specified, Akara will do the following:
- Arrange to have the wsgi_wrapper placed at the outermost layer
of Akara's processing. That is, control will pass into
the WSGI wrapper before any Akara-specific processing related
to the @simple_service handler takes place.
- Ensure that all output returned back to the WSGI wrapper
strictly conforms to the WSGI standard (is a sequence of bytes)
The wrapper function given with wsgi_wrapper should accept a function
as input and return an WSGI application as output. This application
should be a callable that accepts (environ, start_response).
See implementation notes in the code below.
"""
_no_slashes(path)
_check_is_valid_method(method)
if method not in ("GET", "POST"):
raise ValueError(
"simple_service only supports GET and POST methods, not %s" % (method,))
def service_wrapper(func):
@functools.wraps(func)
def wrapper(environ, start_response):
try:
if environ.get("REQUEST_METHOD") != method:
if method == "GET":
raise _HTTP405(["GET"])
else:
raise _HTTP405(["POST"])
args, kwargs = _get_function_args(environ, allow_repeated_args)
except _HTTPError, err:
return err.make_wsgi_response(environ, start_response)
if args:
body = args[0]
else:
body = ""
_handle_notify_before(environ, body, notify_before)
new_request(environ)
result = func(*args, **kwargs)
result, ctype, clength = convert_body(result, content_type, encoding, writer)
send_headers(start_response, ctype, clength)
result = _handle_notify_after(environ, result, notify_after)
return result
pth = path
if pth is None:
pth = func.__name__
# Construct the default query template, if needed and possible.
qt = query_template
if qt is None and method == "GET" and not allow_repeated_args:
qt = _make_query_template(func)
if qt is not None:
qt = pth + qt
# If an wsgi_wrapper was given, wrapper the service wrapper with it
if wsgi_wrapper:
wrapper = wsgi_wrapper(wrapper)
#For purposes of inspection (not a good idea to change these otherwise you'll lose sync with the values closed over)
wrapper.content_type = content_type
wrapper.encoding = encoding
wrapper.writer = writer
registry.register_service(service_id, pth, wrapper, query_template=qt)
return wrapper
return service_wrapper
# XXX idea for the majority of services which deal with XML
# @xml_service("http://example.com/cool_xml", "cool")
# def cool(xml_tree, param1):
# ...
# return xml_tree
#def xml_service()
## Use for services which dispatch based on HTTP method type (GET, POST, ...)
# Nomenclature: the service is identified by its service id.
# All handlers for a given service id implement a given protocol.
# Use a method_dispatcher when a service does different things
# based on the HTTP method (GET, POST, ...) and you want a
# different Python function to handle each method.
class service_method_dispatcher(object):
"""WSGI dispatcher based on request HTTP method
This is an internal class. You should not need to use it.
"""
def __init__(self, path, wsgi_wrapper=None):
self.path = path
self.method_table = {}
self.wsgi_wrapper = wsgi_wrapper
def add_handler(self, method, handler):
if method in self.method_table:
logger.warn("Replacing %r method handler for %r" %
(method, self.path))
else:
logger.info("Created %r method handler for %r" %
(method, self.path))
# If an outer WSGI wrapper was specified, wrap it around the handler method
if self.wsgi_wrapper:
handler = self.wsgi_wrapper(handler)
self.method_table[method] = handler
def __call__(self, environ, start_response):
method = environ.get("REQUEST_METHOD")
handler = self.method_table.get(method, None)
if handler is not None:
return handler(environ, start_response)
err = _HTTP405(sorted(self.method_table.keys()))
return err.make_wsgi_response(environ, start_response)
def head_method(self, environ, start_response):
handler = self.method_table.get("GET",None)
if handler is not None:
handler(environ, start_response)
return ['']
err = _HTTP405(sorted(self.method_table.keys()))
return err.make_wsgi_response(environ, start_response)
## Guide to help in understanding
# @method_dispatcher(*args) -> returns a method_dispatcher_wrapper
#
# @method_dispatcher(*args)
# def func(): pass -> returns a service_dispatcher_decorator
# service_dispatcher_decorator.method(*args) -> returns
# a service_dispatch_decorator_method_wrapper
#
# service_dispatcher_decorator.method(*args)
# def method_func(): pass --> returns a method_wrapper which calls method_func
#
# service_dispatcher_decorator.simple_method(*args)
# def method_func(): pass --> returns a method_wrapper which calls method_func
# This is the top-level decorator
def method_dispatcher(service_id, path=None, wsgi_wrapper=None, query_template=None):
"""Add an Akara resource which dispatches to other functions based on the HTTP method
Used for resources which handle, say, both GET and POST requests.
service_id - a string which identifies this service; should be a URL
path - the local URL path to the resource (must not at present
contain a '/') If None, use the function's name as the path.
wsgi_wrapper - An outer WSGI component to be wrapped around the methods
query_template - An Akara URL service template (based on OpenSource; see akara.opensource)
Can be used to help consumers compose resources withing this service. The same
template is used for all HTTP methods
Example of use:
@method_dispatcher("http://example.com/example_service")
def something():
'''docstring used for the service'''
@something.simple_method(method="GET", content_type="text/plain",
allow_repeated_args=True)
def something_get(names=[]):
return "Hi " + ", ".join(names) + "!\n"
@something.method("POST")
def something_post(environ, start_response):
start_response("200 OK", [("Content-Type", "image/gif")])
return image_bytes
If you have curl installed then you could access the GET option as:
curl http://localhost:8880/something?name=Andrew&name=Sara+Marie
and access the POST option as:
curl --data "" http://localhost:8880/something
"""
_no_slashes(path)
def method_dispatcher_wrapper(func):
# Have to handle a missing docstring here as otherwise
# the registry will try to get it from the dispatcher.
doc = inspect.getdoc(func) or ""
pth = path
if pth is None:
pth = func.__name__
dispatcher = service_method_dispatcher(pth, wsgi_wrapper)
registry.register_service(service_id, pth, dispatcher, doc)
return service_dispatcher_decorator(dispatcher)
return method_dispatcher_wrapper
class service_dispatcher_decorator(object):
"""Helper class used by method_dispatcher to add new handlers to the given resource
You should not need to create this directly. Instead, use 'method_dispatcher'
"""
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def method(self, method, encoding="utf-8", writer="xml"):
"""Register a function as a resource handler for a given HTTP method
method - the relevant HTTP method
encoding - Used to convert a returned Unicode string or an Amara tree
to the bytes used in the HTTP response
writer - Used to serialize the Amara tree for the HTTP response.
This must be a name which can be used as an Amara.writer.lookup.
The decorated function must take the normal WSGI parameters
(environ, start_response) and it must call start_response with
all the needed headers, including Content-Type. The function
may return an Akara tree or a Unicode string, in which case it
it serialized and converted to bytes based in the 'writer' and
'encoding' options.
"""
_check_is_valid_method(method)
def service_dispatch_decorator_method_wrapper(func):
@functools.wraps(func)
def method_wrapper(environ, start_response):
# 'method' passes the WSGI request straight through
# to the handler so there's almost no point in
# setting up the environment. However, I can conceive
# of tools which might access 'environ' directly, and
# I want to be consistent with the simple* interfaces.
new_request(environ)
result = func(environ, start_response)
# You need to make sure you sent the correct content-type!
result, ctype, clength = convert_body(result, None, encoding, writer)
return result
#For purposes of inspection (not a good idea to change these otherwise you'll lose sync with the values closed over)
method_wrapper.content_type = None
method_wrapper.encoding = encoding
method_wrapper.writer = writer
self.dispatcher.add_handler(method, method_wrapper)
return method_wrapper
return service_dispatch_decorator_method_wrapper
def simple_method(self, method, content_type=None,
encoding="utf-8", writer="xml", allow_repeated_args=False):
_check_is_valid_method(method)
if method not in ("GET", "POST"):
raise ValueError(
"simple_method only supports GET and POST methods, not %s" %
(method,))
def service_dispatch_decorator_simple_method_wrapper(func):
@functools.wraps(func)
def simple_method_wrapper(environ, start_response):
try:
args, kwargs = _get_function_args(environ, allow_repeated_args)
except _HTTPError, err:
return err.make_wsgi_response(environ, start_response)
new_request(environ)
result = func(*args, **kwargs)
result, ctype, clength = convert_body(result, content_type, encoding, writer)
send_headers(start_response, ctype, clength)
return result
#For purposes of inspection (not a good idea to change these otherwise you'll lose sync with the values closed over)
simple_method_wrapper.content_type = content_type
simple_method_wrapper.encoding = encoding
simple_method_wrapper.writer = writer
self.dispatcher.add_handler(method, simple_method_wrapper)
return simple_method_wrapper
return service_dispatch_decorator_simple_method_wrapper
# XXX Idea
#def xml_method(self, method="POST", content_type="text/xml"):
# ...
# Install some built-in services
@simple_service("GET", "http://purl.org/xml3k/akara/services/registry", "",
allow_repeated_args=False)
def list_services(service=None):
return registry.list_services(ident=service) # XXX 'ident' or 'service' ? | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/services.py | services.py |
import os
import shutil
from distutils.core import setup as _setup
from distutils.core import Command
from distutils import log
# Has not been tested against setuptools or Distribute and probably
# won't work with them.
# Let the Akara development team know if that's a problem.
from distutils.command.install import install as old_install
from distutils.dist import Distribution as _Distribution
import akara.read_config
MISSING_AKARA_EXTENSIONS = (
"If you call the Akara setup adapter for distutils.core.setup then\n"
"you need to include the 'akara_extensions' parameter, which is a\n"
"list of one or more filenames ending with '.py'")
EXTENSION_OPTIONS = [
("akara-config=", None,
("Location of an existing akara.conf use to get the 'modules' directory "
"if --akara-modules-dir is not specified")),
("akara-modules-dir=", None,
("Directory in which to place the new Akara extension and configuration files. "
"If not specified, get it from akara.conf")),
]
def setup(**kwargs):
if "akara_extensions" not in kwargs:
raise SystemExit(MISSING_AKARA_EXTENSIONS)
for filename in kwargs["akara_extensions"]:
if not filename.endswith(".py"):
raise SystemExit(
"Akara extensions must end with '.py' not %r" %
(filename,))
for filename in kwargs.get("akara_extension_confs", []):
if not filename.endswith(".conf"):
raise SystemExit(
"Akara extension configuration files must end with '.conf' not %r" %
(filename,))
new_kwargs = kwargs.copy()
# Create our new installation code.
# Figure out which command class to wrap.
cmdclass = new_kwargs.get('cmdclass', {})
if 'install' in cmdclass:
install = cmdclass['install']
else:
install = old_install
# A hook to add our own extensions
class my_install(install):
sub_commands = install.sub_commands + [
('install_akara_extensions', None)
]
user_options = install.user_options + EXTENSION_OPTIONS
def initialize_options(self):
install.initialize_options(self)
self.akara_config = None
self.akara_modules_dir = None
# Our installation extension
class install_akara_extensions(Command):
description = "Command to install akara extensions"
user_options = EXTENSION_OPTIONS
def initialize_options(self):
# I so don't know what I'm doing here, but it seems to work.
args = self.distribution.command_options["install"]
self.akara_modules_dir = self.akara_config = None
for key, value in args.items():
if key == "akara_modules_dir":
self.akara_modules_dir = value[1]
elif key == "akara_config":
self.akara_config = value[1]
def finalize_options(self):
if self.akara_modules_dir is None:
settings, config = akara.read_config.read_config(self.akara_config)
self.akara_modules_dir = settings["module_dir"]
def run (self):
dist = self.distribution
for (description, filenames) in (
("extension", dist.akara_extensions),
("configuration", dist.akara_extension_confs)):
for filename in filenames:
log.info("Installing Akara %s %r in %r" %
(description, filename, self.akara_modules_dir))
if not self.dry_run:
if not os.path.isdir(self.akara_modules_dir):
os.makedirs(self.akara_modules_dir) # Make sure the directory exists
shutil.copy(filename, self.akara_modules_dir)
new_cmdclass = {}
new_cmdclass.update(cmdclass)
new_cmdclass['install'] = my_install
new_cmdclass['install_akara_extensions'] = install_akara_extensions
new_kwargs['cmdclass'] = new_cmdclass
# Handle overriden distclass
if 'distclass' in new_kwargs:
Distribution = new_kwargs['distclass']
else:
Distribution = _Distribution
class MyDistribution(Distribution):
def __init__(self, attrs=None):
for opt in ("akara_extensions",
"akara_extension_confs"):
assert not hasattr(self, opt)
setattr(self, opt, attrs.get(opt, []))
Distribution.__init__(self, attrs)
new_kwargs['distclass'] = MyDistribution
return _setup(**new_kwargs) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/dist.py | dist.py |
import os
import logging
import inspect
class Error(Exception):
pass
DEFAULT_SERVER_CONFIG_FILE = os.path.expanduser("~/.config/akara.conf")
class AkaraDefault:
Listen = 8880
ConfigRoot = "~/.local/lib/akara"
#"ServerRoot": None
#"InternalServerRoot": None
PidFile = "logs/akara.pid"
MinSpareServers = 5
MaxSpareServers = 10
MaxServers = 150
MaxRequestsPerServer = 10000
ModuleDir = 'modules'
ModuleCache = 'caches'
ErrorLog = 'logs/error.log'
AccessLog = 'logs/access.log'
LogLevel = 'INFO'
_valid_log_levels = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARN": logging.WARN,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
log = logging.getLogger("akara.server")
def _add_akara_defaults(akara_namespace):
# in-place modify the namespace
for name, value in inspect.getmembers(AkaraDefault):
if name.startswith("_"):
continue
if not hasattr(akara_namespace, name):
setattr(akara_namespace, name, value)
def read_config(config_file=None):
"Read an Akara configuration file and return the parsed settings"
if config_file is None:
config_file = DEFAULT_SERVER_CONFIG_FILE
try:
config = open(config_file).read()
except IOError, err:
raise Error("""\
Could not open Akara configuration file:
%s
To set up the default configuration file and directories use "akara setup"\
""" % (err,))
# XX better error reporting
try:
code = compile(config, config_file, "exec")
except SyntaxError, err:
raise Error("""\
Could not parse Akara configuration file:
%s
because: %s""" % (config_file, err))
namespaces = dict(__builtins__ = None, __name__ = "akara.conf")
exec code in namespaces
errmsg = None
if "Akara" not in namespaces:
raise Error("Configuration file missing required 'Akara' definition")
_add_akara_defaults(namespaces["Akara"])
settings = _extract_settings(namespaces["Akara"])
return settings, namespaces
def _extract_settings(config):
"""Get parsed setting information from a config object
This does sanity checking on the input (like that port numbers
must be positive integers) and converts things into the
appropriate data type (like integers).
"""
# Some helper functions to get typed fields with good error reporting
def get(key):
value = getattr(config, key, None)
if value is None:
raise Error("Required 'Akara' configuration %r is missing" % (key,))
return value
def getstring(key):
value = get(key)
if not isinstance(value, basestring):
raise Error("'Akara' configuration %r must be a string, not %r" %
(key, value))
return value
def getint(key):
value = get(key)
try:
return int(value)
except ValueError:
raise Error("'Akara' configuration %r must be an integer, not %r" %
(key, value))
def getpositive(key):
value = get(key)
if value <= 0:
raise Error(
"'Akara' configuration %r must be a positive integer, not %r" %
(key, value))
return value
def getnonnegative(key):
value = getint(key)
if value <= 0:
raise Error(
"'Akara' configuration %r must be a non-negative integer, not %r" %
(key, value))
return value
settings = {}
# The value for 'Listen' can be:
# <port> as in 8080
# -or-
# <host>:<port> as in "localhost:8081"
addr = get('Listen')
if isinstance(addr, int):
host, port = ("", addr)
else:
if ':' in addr:
host, port_s = addr.rsplit(':', 1)
else:
host, port_s = '', addr
try:
port = int(port_s)
if port <= 0:
raise ValueError
except ValueError:
raise Error("Listen port must be a positive integer, not %r" % port_s)
settings["server_address"] = (host, port)
# Used to contract the full OpenSearch template to a given service.
# If not present, use the Listen host and port.
# (And if the host isn't present, use 'localhost'. It's not a good
# default but I'm not going to do a FQDN lookup here since that has
# side effects. Basically, if you need the name right, then set it.)
try:
server_root = getstring('ServerRoot')
except Error:
if port == 80:
fmt = "http://%(host)s/"
else:
fmt = "http://%(host)s:%(port)s/"
server_root = fmt % dict(host = (host or "localhost"), port = port)
# Uses only when an Akara service wants to call another Akara service.
# Needed for the (rare) cases when the listen server has a different
# local name than the published server.
try:
internal_server_root = getstring('InternalServerRoot')
except Error:
internal_server_root = server_root
settings["server_root"] = server_root
settings["internal_server_root"] = internal_server_root
config_root = getstring('ConfigRoot')
config_root = os.path.expanduser(config_root)
settings["config_root"] = os.path.abspath(config_root)
pid_file = getstring('PidFile')
settings["pid_file"] = os.path.join(config_root, pid_file)
error_log = getstring('ErrorLog')
settings["error_log"] = os.path.join(config_root, error_log)
access_log = getstring('AccessLog')
settings["access_log"] = os.path.join(config_root, access_log)
module_dir = getstring("ModuleDir")
settings["module_dir"] = os.path.join(config_root, module_dir)
module_cache = getstring("ModuleCache")
settings["module_cache"] = os.path.join(config_root, module_cache)
log_level_orig = getstring('LogLevel')
log_level_s = log_level_orig.upper()
if log_level_s in _valid_log_levels:
log_level = _valid_log_levels[log_level_s]
else:
raise Error(
"global setting 'LogLevel' is %r but must be one of: %s" %
(log_level_s, ", ".join(map(repr, _valid_log_levels))))
settings["log_level"] = log_level
settings["max_servers"] = getpositive("MaxServers")
settings["min_spare_servers"] = getnonnegative("MinSpareServers")
settings["max_spare_servers"] = getnonnegative("MaxSpareServers")
if settings["max_spare_servers"] < settings["min_spare_servers"]:
raise Error("MaxSpareServers (%r) must be greater than MinSpareServers (%r)" %
(settings["max_spare_servers"], settings["min_spare_servers"]))
settings["max_requests_per_server"] = getpositive("MaxRequestsPerServer")
return settings | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/read_config.py | read_config.py |
import sys
# Initializes logging and makes the logger public
from akara.logger_config import _logger as logger
# The contents of the "akara.conf" module, as a global namespace dict
raw_config = None
# Access to the per-module configuration. Used like:
# akara.module_config("akara.demo.xslt") -- full path name
# akara.module_config("xslt") -- in most cases, the last term works fine
# akara.module_config(__name__) -- easy way for extension modules to know its own name
# akara.module_config() -- get the __name__ automatically
def module_config(path = None):
if path is None:
# Let people call this without specifying a name
path = sys._getframe(1).f_globals["__name__"]
# The path can either be a full path, like "akara.demo.xslt"
# or a short name like "xslt". Look for the full name first.
# We use classes for the config information, which means
# it isn't so simple, as the class doesn't have a full name.
# Here's how to specify the full name, to match the path exactly:
#
# class xslt:
# akara_name = "akara.demo.xslt"
#
for name, obj in raw_config.items():
if name[:1] == "_":
continue
if hasattr(obj, "akara_name") and obj.akara_name == path:
return ModuleConfig(path, obj)
# There's also a shorthand notation, which is useful for
# the (expected to be) common case where there's no conflict;
# use the last term of the path as the name of the class.
class_name = path.rsplit(".", 1)[-1]
try:
klass = raw_config[class_name]
except KeyError:
# An extension should not require a configuration.
# Testing for configuration adds if (or try/except) tests, which complicates code.
# The purpose of returning a NoModuleConfig is to let people write code like:
# arg = module_config().get("order", "spam and eggs")
# and have code work as expected.
return NoModuleConfig(path)
else:
if hasattr(klass, "akara_name"):
path = klass.akara_name
return ModuleConfig(path, klass)
# Wrap the configuration object so the class attributes can be
# accessed as dictionaries via [] and get. Why? I think it simplifies
# parameter processing to use config.get("name", default) rather than
# getattr(config, "name", default) and because I think this will grow
# terms like "get_string()" to help with type validation.
class ModuleConfig(object):
def __init__(self, path, config_class):
self.path = path
self.config_class = config_class
def __getitem__(self, name):
try:
return getattr(self.config_class, name)
except AttributeError:
raise KeyError(name)
def get(self, name, default=None):
return getattr(self.config_class, name, default)
def require(self, name, what=None):
try:
return getattr(self.config_class, name)
except AttributeError:
pass
msg = (
"Akara configuration section %(path)r is missing the required parameter %(name)r"
% dict(path=self.path, name=name) )
if what is not None:
msg += ": " + what
raise AttributeError(msg)
def warn(self, name, default, what=None):
try:
return getattr(self.config_class, name)
except AttributeError:
pass
msg = "Akara configuration section %(path)r should have the parameter %(name)r"
if what is not None:
msg += " (%(what)s)"
msg += ". Using %(default)r instead."
msg = msg % dict(path=self.path, name=name, default=default, what=what)
logger.warn(msg)
return default
# Used when there is no configuration section for the module.
class NoModuleConfig(object):
def __init__(self, path):
self.path = path
def __getitem__(self, name):
raise KeyError(name)
def get(self, name, default=None):
return default
def __nonzero__(self):
# Can be used to test if there was a configuration section, as in
# if not module_config("Akara"): print "Something is wrong!"
return False
def require(self, name, what=None):
msg = ("Akara configuration section %(path)r is missing. It must have the "
"parameter %(name)r") % dict(path=self.path, name=name)
if what is not None:
msg += ": " + what
raise AttributeError(msg)
def warn(self, name, default, what=None):
msg = ("Akara configuration section %(path)r is missing. "
"It should have the parameter %(name)r")
if what is not None:
msg += " (%(what)s)"
msg += ". Using %(default)r instead."
msg = msg % dict(path=self.path, name=name, default=default, what=what)
logger.warn(msg)
return default
from version import version_info
__version__ = '.'.join(version_info) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/__init__.py | __init__.py |
import datetime
import os
import string
import sys
import time
import traceback
import urllib
from cStringIO import StringIO
import logging
from wsgiref.util import shift_path_info
from wsgiref.simple_server import WSGIRequestHandler
from akara import logger
from akara import registry
from akara.thirdparty import preforkserver, httpserver
access_logger = logging.getLogger("akara.access")
# AkaraPreforkServer creates and manages the subprocesses which are
# listening for HTTP requests. When a new connection request comes in
# it instanciates jobClass(sock, addr, *jobArgs) to process the
# connection.
# There is a minor complication here because the master node (which
# manages the subprocesses which are listening for HTTP requests) is
# the one which byte-compiles the extension modules during server
# start-up, as a sanity check that they make sense. The master process
# does not exec the extension modules. That is done in the child.
# Why wait? For one, it makes it hard for the extension modules to
# affect the master node. Core dumps from C extensions can't crash an
# Akara instance, slow memory leaks won't be a problem since flup
# automatically respawns a node after a number of requests, and so on.
# It's even possible to upgrade the extension modules and the modules
# it depends on then send a SIGHUP to the master to restart
# everything.
# The difficulty is that PreforkServer has no defined hook I can grab
# as a signal to exec the extension module bytecode. The jobClass is
# init'ed once for each request. I tried using a global variable to do
# the exec only when the first request comes in, but that slows down
# the first request (once for each number of spawned
# subprocesses).
# Instead, by inspection I found that self._child() is an internal
# method that I can use to sneak in my exec before letting flup's
# child mainloop run.
class AkaraPreforkServer(preforkserver.PreforkServer):
def __init__(self, settings, config,
minSpare=1, maxSpare=5, maxChildren=50,
maxRequests=0):
preforkserver.PreforkServer.__init__(self,
minSpare=minSpare, maxSpare=maxSpare,
maxChildren=maxChildren, maxRequests=maxRequests,
jobClass=AkaraJob,
jobArgs=(settings, config))
self.config = config
def _child(self, sock, parent):
_init_modules(self.config)
preforkserver.PreforkServer._child(self, sock, parent)
# Once the flup PreforkServer has a request, it starts up an AkaraJob.
# I'll let paste's WSGIHandler do the work of converting the HTTP
# request to a WSGI request. I actually use my own AkaraWSGIHandler
# because I want to change a few settings.
# AkaraWSGIHandler's third parameter, which is an
class AkaraJob(object):
def __init__(self, sock, addr, settings, config):
self._sock = sock
self._addr = addr
self.settings = settings # parsed settings as a dict
self.config = config # a ConfigParser
def run(self):
self._sock.setblocking(1)
logger.debug("Start request from address %r, local socket %r" %
(self._addr, self._sock.getsockname()))
handler = AkaraWSGIDispatcher(self.settings, self.config)
self.handler = AkaraWSGIHandler(self._sock, self._addr, handler)
logger.debug("End request from address %r, local socket %r" %
(self._addr, self._sock.getsockname()))
self._sock.close()
# Override a few of the default settings
class AkaraWSGIHandler(httpserver.WSGIHandler):
sys_version = None # Disable including the Python version number
server_version = "Akara/2.0" # Declare that we are an Akara server
protocol_version = "HTTP/1.1" # Support (for the most part) HTTP/1.1 semantics
# Suppress access log reporting from BaseHTTPServer.py
def log_request(self, code='-', size='-'):
pass
# This is the the top-level WSGI dispatcher between paste.httpserver
# and Akara proper. It only understand how to get the first part of
# the path (called the "mount_point") and get the associated handler
# from the registry.
ERROR_DOCUMENT_TEMPLATE = """<?xml version="1.0" encoding="ISO-8859-1"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" lang="en" xml:lang="en">
<head>
<title>%(reason)s</title>
</head>
<body>
<h1>%(reason)s</h1>
<p>
%(message)s
</p>
<h2>Error %(code)s</h2>
</body>
</html>
"""
def _send_error(start_response, code, exc_info=None):
reason, message = WSGIRequestHandler.responses[code]
start_response("%d %s" % (code, reason), [("Content-Type", "text/html")],
exc_info=exc_info)
return ERROR_DOCUMENT_TEMPLATE % dict(code = code,
reason = reason,
message = message)
# Output will look like Apache's "combined log format".
# Here's an example based on the Apache documentation (it should be on a single line)
# 127.0.0.1 - frank [10/Oct/2000:13:55:36 -0700] "GET /apache_pb.gif HTTP/1.0"
# 200 2326 "http://www.example.com/start.html" "Mozilla/4.08 [en] (Win98; I ;Nav)"
# This definition comes from paste.translogger. For certainty's sake:
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
ACCESS_LOG_MESSAGE = (
'%(REMOTE_ADDR)s - %(REMOTE_USER)s [%(start_time)s] '
'"%(REQUEST_METHOD)s %(REQUEST_URI)s %(HTTP_VERSION)s" '
'%(status)s %(bytes)s "%(HTTP_REFERER)s" "%(HTTP_USER_AGENT)s"')
# This proved a lot more difficult than I thought it would be.
# I looked at the translogger solution, but I don't think it works
# across the change of timezones and I didn't want to use the '%b'
# time formatter because it is locale dependant.
def timetuple_to_datetime(t):
return datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
_months = "XXX Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split()
def _get_time():
now = time.localtime()
utc_time = time.gmtime()
tz_seconds = (timetuple_to_datetime(now) - timetuple_to_datetime(utc_time)).seconds
# Round to the nearest minute
tz_minutes = (tz_seconds + 30)//60
tz_hour, tz_minute = divmod(tz_minutes, 60)
# I've got the timezone component. The rest is easy
return "%02d/%s/%d:%02d:%02d:%02d %+03d%02d" % (
now.tm_year, _months[now.tm_mon], now.tm_mday,
now.tm_hour, now.tm_min, now.tm_sec,
tz_hour, tz_minute)
# Filter out empty fields, control characters, and space characters
_illegal = ("".join(chr(i) for i in range(33)) + # control characters up to space (=ASCII 32)
"".join(chr(i) for i in range(127, 256)) ) # high ASCII
_clean_table = string.maketrans(_illegal, "?" * len(_illegal))
def _clean(s):
# Check for empty fields
if not s:
return "-"
# Filter control characters. These can be used for
# escape character attacks against the terminal.
# Plus, spaces can mess up access log parsers.
return s.translate(_clean_table)
class AkaraWSGIDispatcher(object):
def __init__(self, settings, config):
self.server_address = settings["server_address"]
def wsgi_application(self, environ, start_response):
# There's some sort of problem if the application
# hasn't read any data. This can occur, for example,
# when sending a POST to a GET service, returning
# a 405 message.
wsgi_input = environ["wsgi.input"]
try:
return self._wsgi_application(environ, start_response)
finally:
# change the "if 1" to "if 0" and run
# test_server.test_405_error_message_mega
# You should get socket.error "Connection reset by peer" errors.
if 1 and isinstance(wsgi_input, httpserver.LimitedLengthFile):
# Consume something if nothing was consumed *and* work
# around a bug where paste.httpserver allows negative lengths
if (wsgi_input._consumed == 0 and wsgi_input.length > 0):
# This seems to work even if there's 10K of input.
wsgi_input.read(1)
def _wsgi_application(self, environ, start_response):
# Get information used for access logging
request_uri = urllib.quote(environ.get("SCRIPT_NAME", "") +
environ.get("PATH_INFO", ""))
if environ.get("QUERY_STRING"):
request_uri += "?" + environ["QUERY_STRING"]
# Convert all HEAD requests to GET requests before sending
# them through the stack. Process the result to conform to
# requirements for HEAD then strip out the body. This
# follows the Apache/mod_wsgi approach. See
# http://blog.dscpl.com.au/2009/10/wsgi-issues-with-http-head-requests.html
is_head_request = environ.get("REQUEST_METHOD") == "HEAD"
if is_head_request:
environ["REQUEST_METHOD"] = "GET"
access_data = dict(start_time = _get_time(),
request_uri = request_uri,
# Will get the following two from start_response_
status=None, content_length="-")
# Set up some middleware so I can capture the status and header
# information used for access logging.
def start_response_(status, headers, exc_info=None):
access_data["status"] = status.split(" ", 1)[0]
access_data["content_length"] = "-"
content_length = None
headers = list(headers)
for i, (k, v) in enumerate(headers):
s = k.lower()
if s == "content-length":
access_data["content_length"] = v
elif s == "allow":
# Append a HEAD if a GET is allowed.
# Can't simply test: if "GET" in s
# since the Allow might be for "GET2".
# Parse and test the fields.
terms = [term.strip() for term in v.split(",")]
if "GET" in terms:
terms.append("HEAD")
headers[i] = (k, ", ".join(terms))
# Forward things to the real start_response
return start_response(status, headers, exc_info)
# Get the handler for this mount point
if "/" not in environ.get("PATH_INFO", "/"):
# This happens with very ill-formed queries
# Like when you use httplib directly and forget the leading '/'.
return _send_error(start_response, 400)
mount_point = shift_path_info(environ)
# Call the handler, deal with any errors, do access logging
try:
try:
service = registry.get_service(mount_point)
except KeyError:
# Not found. Report something semi-nice to the user
return _send_error(start_response_, 404)
try:
result = service.handler(environ, start_response_)
if is_head_request:
# successful HEAD requests MUST return an empty message-body
return []
return result
except Exception, err:
exc_info = sys.exc_info()
try:
f = StringIO()
traceback.print_exc(file=f)
logger.error("Uncaught exception from %r (%r)\n%s" %
(mount_point, service.ident, f.getvalue()))
return _send_error(start_response_, 500, exc_info=exc_info)
finally:
del exc_info
finally:
self.save_to_access_log(environ, access_data)
def save_to_access_log(self, environ, access_data):
fields = dict(REMOTE_ADDR = _clean(environ.get("REMOTE_ADDR")),
REMOTE_USER = _clean(environ.get("REMOTE_USER")),
start_time = access_data["start_time"],
REQUEST_METHOD = _clean(environ.get("REQUEST_METHOD")),
REQUEST_URI = _clean(access_data["request_uri"]),
HTTP_VERSION = environ.get("SERVER_PROTOCOL"),
status = access_data["status"],
bytes = access_data["content_length"],
HTTP_REFERER = _clean(environ.get("HTTP_REFERER")),
HTTP_USER_AGENT = _clean(environ.get("HTTP_USER_AGENT")),
)
access_logger.debug(ACCESS_LOG_MESSAGE % fields)
###### Support extension modules
# This used to be in its own module but that module was so small and
# almost pointless so I moved it into here. It doesn't feel quite
# right to be part of multiprocess_http, but it's close enough.
# The master HTTP process uses this module to import the modules and
# convert them into byte code with the correct globals(). It does not
# exec the byte code. That's the job for the spawned-off HTTP listener
# classes.
def _init_modules(config):
try:
modules = config["MODULES"]
except KeyError:
logger.error("Akara config file is missing the 'MODULES' variable.\n"
"No extensions will be installed.")
return
for module_name in modules:
# import the module
try:
__import__(module_name)
except:
logger.error(
"Unable to initialize module %r - skipping rest of module" % (module_name,),
exc_info = True) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/multiprocess_http.py | multiprocess_http.py |
import sys
import logging
from cStringIO import StringIO
__all__ = ("logger", "set_logfile", "remove_logging_to_stderr")
# Create the logger here but mark it as private.
# The other modules can access this as "akara.logger"
_logger = logging.getLogger('akara')
# Make the log messages look like:
# Jul 21 01:39:03 akara[11754]: [error] Traceback (most recent call last):
_default_formatter = logging.Formatter(
"%(asctime)s %(name)s[%(process)s]: [%(levelname)s] %(message)s",
"%b %d %H:%M:%S")
# Make special log levels for stdout and stderr.
# Makes the logging messages easier to read.
STDOUT, STDERR = 22, 21
logging.addLevelName(STDERR, "stderr")
logging.addLevelName(STDOUT, "stdout")
# The current output stream for the Akara server.
# Gets initialized in a bit.
_current_handler = None
def set_logfile(f):
"""Direct (or redirect) error logging to a file handle"""
global _current_handler
new_handler = logging.FileHandler(f)
new_handler.setFormatter(_default_formatter)
_logger.addHandler(new_handler)
if _current_handler is not None:
_logger.removeHandler(_current_handler)
_current_handler = new_handler
## Part of initialization, to log to stderr
# Set the default logger to stderr
def _init_stderr_handler():
new_handler = logging.StreamHandler(sys.stderr)
new_handler.setFormatter(_default_formatter)
_logger.addHandler(new_handler)
return new_handler
# Save this so I can remove it for later, if requested
_stderr_handler = _init_stderr_handler()
# Then forget about it. It's still registered in the error handler.
_current_handler = None
# At this point there is logging to stderr, and it cannot be clobbered
# by set_logfile.
# Later on it is possible to remove the stderr handler
def remove_logging_to_stderr():
"Disable logging to stderr. This cannot be re-enabled."
global _stderr_handler
if _stderr_handler is not None:
_logger.removeHandler(_stderr_handler)
_stderr_handler = None
# This is a simple redirector.
# It fails if none of your prints end with a "\n".
# Don't do that. ;)
class WriteToLogger(object):
def __init__(self, loglevel):
self.loglevel = loglevel
self.chunks = []
def write(self, s):
if s.endswith("\n"):
text = "".join(self.chunks) + s[:-1]
_logger.log(self.loglevel, text)
else:
self.chunks.append(s)
def redirect_stdio():
sys.stdin = StringIO("")
sys.stdout = WriteToLogger(STDOUT)
sys.stderr = WriteToLogger(STDERR)
######## Access logger
# Use the logging mechanism to deal with access logging
# I think this is a bit more cute than I would like.
# Log at the DEBUG level to akara.access.
# That always writes to the _access_logger because of the .setLevel(DEBUG)
# The log event trickles up to the 'akara' logger.
# That only displays in debug mode (most likely with -X)
# Downside: it displays with the standard Akara log prefix
_access_logger = logging.getLogger("akara.access")
_access_logger.setLevel(logging.DEBUG)
_access_log_formatter = logging.Formatter("%(message)s")
_access_handler = None
def set_access_logfile(f):
global _access_handler
new_access_handler = logging.FileHandler(f)
new_access_handler.setFormatter(_access_log_formatter)
_access_logger.addHandler(new_access_handler)
if _access_handler is not None:
_access_logger.removeHandler(_access_handler)
_access_handler = new_access_handler | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/logger_config.py | logger_config.py |
# Used for locking the cache of XSLT processors
import thread
# Used to check whether or not an XSLT stylesheet has been modified
from stat import ST_MTIME
from os import stat
import amara
from amara.lib import iri
from amara.lib import inputsource
from amara.xpath.util import parameterize
from amara.xslt.processor import processor
from amara import sax
USER_AGENT_REGEXEN = [
'.*MSIE 5.5.*',
'.*MSIE 6.0.*',
'.*MSIE 7.0.*',
'.*Gecko/2005.*',
'.*Gecko/2006.*',
'.*Opera/9.*',
'.*AppleWebKit/31.*',
'.*AppleWebKit/4.*',
]
USER_AGENT_REGEXEN = [ re.compile(regex) for regex in USER_AGENT_REGEXEN ]
WSGI_NS = u'http://www.wsgi.org/'
MTYPE_PAT = re.compile('.*/.*xml.*')
#We use processor pooling, but we eventually want to get it to the point where we can just
#cache compiled stylesheets
#FIXME: does not yet handle extensions
DUMMY_SOURCE_DOC_URI = "http://purl.org/xml3k/akara/transform/source-doc"
class processor_info:
def __init__(self, lock, last_modified, instance):
#Even if you're using multiple processes rather than threads use locking, just to be safe
self.lock = lock
self.last_modified = last_modified
self.instance = {}
class processor_pool:
"""
A hash table (LRU trimmed) that caches from XSLT transform tuples to prepared
processor instances
"""
def __init__(self):
#Even if you're using multiple processes rather than threads use locking, just to be safe
self._main_lock = thread.allocate_lock()
self._granular_locks = {}
self._processors = {}
def get_processor(self, transform_hash, ext_functions=None, ext_elements=None):
try:
# First make sure we avoid race conditions...
self._main_lock.acquire()
if transform_hash not in self._granular_locks:
self._granular_locks[transform_hash] = thread.allocate_lock()
finally:
self._main_lock.release()
proc = None
try:
self._granular_locks[transform_hash].acquire()
# If we do not have that stylesheet yet in the pool, let's add it
if not self._processors.has_key(transform_hash):
processor = Processor()
self._processors[transform_hash] = processor_info(
thread.allocate_lock(), stat(transform_hash)[ST_MTIME], processor
)
for (ns, local), func in ext_functions.items():
processor.registerExtensionFunction(ns, local, func)
for (ns, local), elem in ext_elements.items():
processor.registerExtensionElement(ns, local, elem)
self._processors[transform_hash].instance.append_transform(iri.os_path_to_uri(transform_hash))
# If the stylesheet has been modified, reload it
elif stat(transform_hash)[ST_MTIME] != self._processors[transform_hash].last_modified:
self._processors[transform_hash].instance.reset()
self._processors[transform_hash].last_modified = stat(transform_hash)[ST_MTIME]
self._processors[transform_hash].instance.append_transform(iri.os_path_to_uri(transform_hash))
# now we can lock the processor...
self._processors[transform_hash]['lock'].acquire()
proc = self._processors[transform_hash].instance
finally:
self._granular_locks[transform_hash].release()
# ... and return it
return proc
def release_processor(self, transform_hash):
try:
if self._processors[transform_hash]['lock'].locked():
self._processors[transform_hash]['lock'].release()
except: pass
class find_xslt_pis(sax.ContentHandler):
def __init__(self, parser):
parser.setContentHandler(self)
self.parser = parser
return
def startDocument(self):
self.ecount = 0
self.xslt_pi = None
def startElementNS(self, name, qname, attribs):
self.ecount += 1
if self.ecount == 2:
#We're now within the doc proper, so we're done
self.parser.setProperty(sax.PROPERTY_YIELD_RESULT, self.xslt_pi)
return
def processingInstruction(self, target, data):
if target == u'xml-stylesheet':
data = data.split()
pseudo_attrs = {}
for d in data:
seg = d.split('=')
if len(seg) == 2:
pseudo_attrs[seg[0]] = seg[1][1:-1]
# PI must have both href, type pseudo-attributes;
# type pseudo-attr must match valid XSLT types;
# media pseudo-attr must match preferred media
# (which can be None)
if (pseudo_attrs.has_key('href')
and pseudo_attrs.has_key('type')
and pseudo_attrs['type'] in Processor.XSLT_IMT):
self.xslt_pi = pseudo_attrs['href']
self.parser.setProperty(sax.PROPERTY_YIELD_RESULT, self.xslt_pi)
return
ACTIVE_FLAG = 'http://purl.org/xml3k/akara/transform/active'
SERVER_SIDE_FLAG = 'http://purl.org/xml3k/akara/transform/force-server-side'
CACHEABLE_FLAG = 'http://purl.org/xml3k/akara/transform/cacheable'
class xslt_transform_manager(object):
"""
Middleware for XSLT transform processing.
Also optionally checks for XSLT transform capability in the client and
performs the transform on the server side if one is required, and the
client can't do it
"""
def __init__(self, app, use_wsgi_env=True, stock_xslt_params=None,
ext_modules=None):
"""
use_wsgi_env - Optional bool determining whether to make the
WSGI environment available to the XSLT as top level parameter
overrides (e.g. wsgi:SCRIPT_NAME and wsgi:wsgi.url_scheme).
Only passes on values it can handle (UTF-8 strings, Unicode,
numbers, boolean, lists of "nodes"). Default to True
stock_xslt_params - optional dict of dicts to also pass along as XSLT
params. The outer dict is onf the form: {<namespace>: <inner-dict>}
And the inner dicts are of the form {pname: pvalue}. The keys
(pname) may be given as unicode objects if they have no namespace,
or as (uri, localname) tuples if they do. The values are
(UTF-8 strings, Unicode, numbers, boolean, lists of "nodes").
This is usually used for passing configuration info into XSLT
ext_modules - Optional list of modules with XPath and XSLT extensions
"""
#Set-up phase
self.wrapped_app = app
self.use_wsgi_env = use_wsgi_env
self.stock_xslt_params = stock_xslt_params or {}
self.ext_modules = ext_modules or []
self.processor_cache = {}
self.path_cache = {}
return
def __call__(self, environ, start_response):
#Guess whether the client supports XML+XSLT?
#See: http://copia.ogbuji.net/blog/2006-08-26/LazyWeb_Ho
client_ua = environ.get('HTTP_USER_AGENT', '')
path = environ['PATH_INFO']
send_browser_xslt = True in [ ua_pat.match(client_ua) is not None
for ua_pat in USER_AGENT_REGEXEN ]
#We'll hack a bit for dealing with Python's imperfect nested scopes.
response_params = []
def start_response_wrapper(status, response_headers, exc_info=None):
#Assume response does not use XSLT; do not activate middleware
environ[ACTIVE_FLAG] = False
#Check for response content type to see whether it is XML
for name, value in response_headers:
#content-type value is a media type, defined as
#media-type = type "/" subtype *( ";" parameter )
media_type = value.split(';')[0]
if ( name.lower() == 'content-type'
and MTYPE_PAT.match(media_type)):
#.endswith('/xml')
# or media_type.find('/xml+') != -1 )):
environ[ACTIVE_FLAG] = True
response_params.extend([status, response_headers, exc_info])
#Replace any write() callable with a dummy that gives an error
#The idea is to refuse support for apps that use write()
def dummy_write(data):
raise RuntimeError('applyxslt does not support the deprecated write() callable in WSGI apps')
return dummy_write
#Get the iterator from the application that will yield response
#body fragments
iterable = self.wrapped_app(environ, start_response_wrapper)
(status, response_headers, exc_info) = response_params
force_server_side = environ.get(SERVER_SIDE_FLAG, False)
send_browser_xslt = send_browser_xslt and not force_server_side
#import pprint; pprint.pprint(environ)
#This function processes each chunk of output (simple string) from
#the app, returning The modified chunk to be passed on to the server
def next_response_block(response_iter):
if send_browser_xslt or not environ[ACTIVE_FLAG]:
#The client can handle XSLT, or it's not an XML source doc,
#so nothing for this middleware to do
start_response(status, response_headers, exc_info)
for block in response_iter.next():
yield block
elif path in self.path_cache:
print >> sys.stderr, 'Using cached result for path', path
imt, content = self.path_cache[path]
response_headers.append(('content-type', imt))
start_response(status, response_headers, exc_info)
yield content
else:
yield produce_final_output(''.join(response_iter))
return
#After the app has finished sending its response body fragments
#if transform is required, it's necessary to send one more chunk,
#with the fully transformed result
def produce_final_output(response, response_headers=response_headers):
log = sys.stderr
if not send_browser_xslt and environ[ACTIVE_FLAG]:
use_pi = False
if force_server_side and force_server_side != True:
#True is a special flag meaning "don't delegate to the browser but still check for XSLT PIs"
xslt = force_server_side
else:
#Check for a Stylesheet PI
parser = sax.reader()
parser.setFeature(sax.FEATURE_GENERATOR, True)
handler = find_xslt_pis(parser)
pi_iter = parser.parse(inputsource(response))
try:
#Note: only grabs the first PI. Consider whether we should handle multiple
xslt = pi_iter.next()
except StopIteration:
xslt = None
use_pi = True
if xslt:
xslt = xslt.encode('utf-8')
result = StringIO()
#self.xslt_sources = environ.get(
# 'wsgixml.applyxslt.xslt_sources', {})
source = InputSource.DefaultFactory.fromString(
response, uri=get_request_url(environ))
params = {}
for ns in self.stock_xslt_params:
params.update(setup_xslt_params(ns, self.stock_xslt_params[ns]))
start = time.time()
'''
processor = self.processorPool.get_processor(
stylesheet, self.ext_functions, self.ext_elements)
cherrypy.response.body = processor.run(
DefaultFactory.fromString(picket.document,
picket.uri),
topLevelParams=picket.parameters)
if self.default_content_type:
cherrypy.response.headers['Content-Type'] = self.default_content_type
if picket.content_type:
cherrypy.response.headers['Content-Type'] = picket.content_type
finally:
self.processorPool.release_processor(stylesheet)
'''
if xslt in self.processor_cache:
processor = self.processor_cache[xslt]
#Any transform would have already been loaded
use_pi = False
print >> log, 'Using cached processor instance for transform', xslt
else:
print >> log, 'Creating new processor instance for transform', xslt
processor = Processor.Processor()
if self.ext_modules:
processor.registerExtensionModules(self.ext_modules)
if self.use_wsgi_env:
params.update(setup_xslt_params(WSGI_NS, environ))
#srcAsUri = OsPathToUri()
#if False:
if environ.has_key('paste.recursive.include'):
#paste's recursive facilities are available, to
#so we can get the XSLT with a middleware call
#rather than a full Web invocation
#print environ['paste.recursive.include']
xslt_resp = environ['paste.recursive.include'](xslt)
#FIXME: this should be relative to the XSLT, not XML
#print xslt_resp, xslt_resp.body
isrc = InputSource.DefaultFactory.fromString(
xslt_resp.body, get_request_url(environ))
processor.appendStylesheet(isrc)
else:
#We have to make a full Web call to get the XSLT.
#4Suite will do that for us in processing the PI
if not use_pi:
uri = Uri.Absolutize(xslt, get_request_url(environ))
isrc = InputSource.DefaultFactory.fromUri(uri)
processor.appendStylesheet(isrc)
self.processor_cache[xslt] = processor
processor.run(source, outputStream=result,
ignorePis=not use_pi, topLevelParams=params)
#Strip content-length if present (needs to be
#recalculated by server)
#Also strip content-type, which will be replaced below
response_headers = [ (name, value)
for name, value in response_headers
if ( name.lower()
not in ['content-length', 'content-type'])
]
#Put in the updated content type
imt = processor.outputParams.mediaType
content = result.getvalue()
if environ.get(CACHEABLE_FLAG):
self.path_cache[path] = imt, content
response_headers.append(('content-type', imt))
start_response(status, response_headers, exc_info)
end = time.time()
print >> log, '%s: elapsed time: %0.3f\n'%(xslt, end-start)
#environ['wsgi.errors'].write('%s: elapsed time: %0.3f\n'%(xslt, end-start))
return content
#If it reaches this point, no XSLT was applied.
return
return iterwrapper(iterable, next_response_block)
class PicketFilter(BaseFilter):
"""
A filter that applies XSLT templates to XML content
For any published method with this filter attached, return an
instance of the Picket class if you want an XSLT transform invoked.
The string output of the transform becomes the Web response body
"""
def __init__(self, default_stylesheet=None, default_content_type=None, extension_dir=None):
self.processorPool = ProcessorPool()
self.default_stylesheet = default_stylesheet
self.default_content_type = default_content_type
self.extensionDir = extension_dir #cherrypy.config.get("picket.extensionDir")
self.ext_functions, self.ext_elements = _getExtensions(self.extensionDir)
def before_finalize(self):
picket = cherrypy.response.body
if not isinstance(picket, Picket): return
stylesheet = self.default_stylesheet
if picket.stylesheet:
stylesheet = picket.stylesheet
if stylesheet is None:
# If a stylesheet was not set, then raise an error
raise ValueError, "Missing XSLT stylesheet"
extDir = cherrypy.config.get("picket.extensionDir")
if extDir != self.extensionDir:
self.extensionDir =extDir
self.ext_functions, self.ext_elements = _getExtensions(self.extensionDir)
try:
processor = self.processorPool.get_processor(
stylesheet, self.ext_functions, self.ext_elements)
cherrypy.response.body = processor.run(
DefaultFactory.fromString(picket.document,
picket.uri),
topLevelParams=picket.parameters)
if self.default_content_type:
cherrypy.response.headers['Content-Type'] = self.default_content_type
if picket.content_type:
cherrypy.response.headers['Content-Type'] = picket.content_type
finally:
self.processorPool.release_processor(stylesheet)
def _getExtensions(extensionDirectory):
import glob
import os
retval = ({}, {})
try:
os.path.isdir(extensionDirectory)
except TypeError:
return retval
for extensionPath in glob.glob(extensionDirectory + "/*.py"):
try:
ns = {}
execfile(extensionPath, ns, ns)
retval[0].update(ns.get("ExtFunctions", {}))
retval[1].update(ns.get("ExtElements", {}))
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
return retval
import os
from wsgixml.applyxslt import xsltize
import pkg_resources
from cStringIO import StringIO
from Ft.Lib import Uri, UriException
from Ft.Xml import InputSource, CreateInputSource
from Ft.Xml.InputSource import InputSourceFactory
from Ft.Xml.Xslt.Processor import Processor
from Ft.Xml.Xslt.StylesheetReader import StylesheetReader
from Ft.Xml import Domlette, Parse
from pprint import pprint
class LocalTemplateResolver(Uri.FtUriResolver):
def normalize(self, uri_ref, base_uri):
return Uri.Absolutize(uri_ref, base_uri)
def _orig_resolve(self, uri, baseUri=None):
"""
This function takes a URI or a URI reference plus a base URI, produces
a normalized URI using the normalize function if a base URI was given,
then attempts to obtain access to an entity representing the resource
identified by the resulting URI, returning the entity as a stream (a
Python file-like object).
Raises a UriException if the URI scheme is unsupported or if a stream
could not be obtained for any reason.
"""
if baseUri is not None:
uri = self.normalize(uri, baseUri)
scheme = Uri.GetScheme(uri)
else:
scheme = Uri.GetScheme(uri)
# since we didn't use normalize(), we need to verify here
if scheme not in Uri.DEFAULT_URI_SCHEMES:
if scheme is None:
raise ValueError('When the URI to resolve is a relative '
'reference, it must be accompanied by a base URI.')
else:
raise UriException(UriException.UNSUPPORTED_SCHEME,
scheme=scheme,
resolver=self.__class__.__name__)
# Bypass urllib for opening local files. This means we don't get all
# the extra metadata that urllib adds to the stream (Last-modified,
# Content-Length, a poorly guessed Content-Type, and the URI), but
# we also avoid its potentially time-consuming socket.gethostbyname()
# calls, which aren't even warranted and are part of urllib's dubious
# interpretation of RFC 1738.
if scheme == 'file':
path = Uri.UriToOsPath(uri, attemptAbsolute=False)
try:
stream = file(path, 'rb')
except IOError, e:
raise UriException(UriException.RESOURCE_ERROR,
loc='%s (%s)' % (uri, path),
uri=uri, msg=str(e))
else:
# urllib2.urlopen, wrapped by us, will suffice for http, ftp,
# data and gopher
try:
stream = Uri.UrlOpen(uri)
except IOError, e:
raise UriException(UriException.RESOURCE_ERROR,
uri=uri, loc=uri, msg=str(e))
return stream
def resolve(self, uri, base_uri=None):
here = os.path.abspath('.')
if uri.startswith('local:'):
uri = uri[6:]
resource = os.path.join(self.templates, uri)
if os.path.exists(resource):
return file(resource, 'rb')
raise UriException(UriException.RESOURCE_ERROR,
uri=uri, loc=uri,
msg="The file did not exist in '%s'" % self.templates)
elif uri.startswith('pkg:'):
# format: package#path/to/file.xslt
usage = 'usage: package_name#path/to/file'
uri = uri[4:]
package, sep, path = uri.partition('#')
if not package or path:
raise UriException(
UriException.RESOURCE_ERROR,
uri=uri, loc=uri,
msg="Invalid pkg_resources uri. \n %s" % usage
)
if pkg_resources.resource_exists(package, path):
return pkg_resources.resource_stream(package, path)
raise UriException(
UriException.RESOURCE_ERROR,
uri=uri, loc=uri,
msg="'%s' was not found in the python package '%s'" % (path, package)
)
else:
return self._orig_resolve(uri, base_uri)
XParams = 'xsltemplate.params'
XTemplate = 'xsltemplate.template'
XSource = 'xsltemplate.source'
class TemplateMiddleware(object):
def __init__(self, app_conf, app, **kw):
self.ns = unicode(app_conf.get('xsltemplate_namespace',
'http://ionrock.org/ns/xsltemplate'))
if app_conf.get('use_index_xml'):
self.content = app_conf['use_index_xml']
self.template_key = XTemplate
self.params_key = XParams
self.source_key = XSource
self.tdir = app_conf.get('template_directory', 'templates')
self.resolver = LocalTemplateResolver()
self.resolver.templates = self.tdir
self.xslt_factory = InputSourceFactory(resolver=self.resolver)
self.rs = '%s.xslt'
self.app = app
if kw.get('extensions'):
self.extensions = kw['extensions']
else:
self.extensions = None
def start_response(self, status, headers, exc_info=None):
self.status = status
self.headers = headers
self.exc_info = exc_info
def __call__(self, environ, start_response):
source = ''.join(self.app(environ, self.start_response))
if not source and self.content:
source = self.content
if environ.get(self.template_key):
xslt = environ[self.template_key]
params = environ.get(self.params_key, {})
source = self.do_render(source, xslt, params)
for i, value in enumerate(self.headers):
k, v = value
if k.lower() == 'content-length':
del self.headers[i]
start_response(self.status, self.headers, self.exc_info)
return [source]
def get_processor(self):
proc = Processor()
if self.extensions:
for ext in self.extensions:
proc.registerExtensionFunction(*(ext))
return proc
def get(self, fn):
if fn.startswith('pkg://'):
package, sep, path = fn[6:].partition('#')
if pkg_resources.resource_exists(package, path):
return self.xslt_factory.fromString(pkg_resources.resource_string(package, path))
path = Uri.OsPathToUri(os.path.join(self.tdir, fn))
try:
xslt = self.xslt_factory.fromUri(path)
except UriException, e:
xslt = self.xslt_factory.fromString(
fn, Uri.OsPathToUri(os.path.abspath('.'))
)
return xslt
def run(self, xml, xslt, params):
proc = self.get_processor()
xml = CreateInputSource(xml)
xslt = self.get(xslt)
proc.appendStylesheet(xslt)
out = proc.run(xml, topLevelParams=params)
del proc
return out
def do_render(self, xml, xslt, params):
params['check_params'] = "Yup they are working!"
nodes = {}
for k, v in params.items():
if isinstance(v, list):
nodes[k] = v
params = self.setup_xslt_params(params)
for k, v in nodes.items():
params[(self.ns, k)] = v
return self.run(xml, xslt, params=params)
def setup_xslt_params(self, params):
xsltparams = dict([ ((self.ns, k), params[k])
for k in params
if xsltize(params[k]) is not None ])
return xsltparams
class IndexXMLMiddleware(object):
def __init__(self, app_conf, app):
self.app_conf = app_conf
self.content = '<?xml version="1.0"?><page />'
if self.app_conf.get('index_xml'):
if os.path.exists(self.app_conf['index_xml']):
self.content = open(self.app_conf['index_xml'],'rb').read()
self.app = app
def start_response(self, status, headers, exc_info=None):
self.status = status
self.headers = headers
self.exc_info = exc_info
def __call__(self, environ, start_response, exc_info=None):
c = self.app(environ, self.start_response)
start_response(self.status, self.headers, self.exc_info)
if c:
return c
return [self.content]
def set_params(environ, params):
values = environ.get(XParams, {})
values.update(params)
environ[XParams] = values
def set_template(environ, template):
environ[XTemplate] = template
def node_set(xml):
return Parse(xml)
class TemplateConstants(object):
def __init__(self, constants_dict, app, use_environ=True):
self.constants = constants_dict
self.use_environ = use_environ
self.app = app
def __call__(self, environ, start_response):
if self.use_environ:
# strip out xsltemplates params
set_params(environ,
dict([ (k, v) for k, v in environ.items()
if not k.startswith(XParams) ]))
set_params(environ, self.constants)
return self.app(environ, start_response) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/transform/middleware.py | middleware.py |
import re, sys, time
from cStringIO import StringIO
from itertools import *
from xml import sax
from util import geturl
from Ft.Xml import Sax, InputSource, CreateInputSource
from Ft.Xml.Xslt import Processor
from Ft.Lib import Uri
USER_AGENT_REGEXEN = [
'.*MSIE 5.5.*',
'.*MSIE 6.0.*',
'.*MSIE 7.0.*',
'.*Gecko/2005.*',
'.*Gecko/2006.*',
'.*Gecko/2007.*',
'.*Gecko/2008.*',
'.*Opera/9.*',
'.*AppleWebKit/31.*',
'.*AppleWebKit/4.*',
]
USER_AGENT_REGEXEN = [ re.compile(regex) for regex in USER_AGENT_REGEXEN ]
WSGI_NS = u'http://www.wsgi.org/'
MTYPE_PAT = re.compile('.*/.*xml.*')
class find_xslt_pis(sax.ContentHandler):
def __init__(self, parser):
parser.setContentHandler(self)
self.parser = parser
return
def startDocument(self):
self.ecount = 0
self.xslt_pi = None
def startElementNS(self, name, qname, attribs):
self.ecount += 1
if self.ecount == 2:
#We're now within the doc proper, so we're done
self.parser.setProperty(Sax.PROPERTY_YIELD_RESULT, self.xslt_pi)
return
def processingInstruction(self, target, data):
if target == u'xml-stylesheet':
data = data.split()
pseudo_attrs = {}
for d in data:
seg = d.split('=')
if len(seg) == 2:
pseudo_attrs[seg[0]] = seg[1][1:-1]
# PI must have both href, type pseudo-attributes;
# type pseudo-attr must match valid XSLT types;
# media pseudo-attr must match preferred media
# (which can be None)
if (pseudo_attrs.has_key('href')
and pseudo_attrs.has_key('type')
and pseudo_attrs['type'] in Processor.XSLT_IMT):
self.xslt_pi = pseudo_attrs['href']
self.parser.setProperty(Sax.PROPERTY_YIELD_RESULT, self.xslt_pi)
return
ACTIVE_KEY = 'amara.transform.active'
FORCE_SERVER_SIDE_KEY = 'akara.transform.force-server-side'
CACHEABLE_KEY = 'wsgixml.applyxslt.cacheable'
class applyxslt(object):
"""
Middleware to conditionally apply XSLT on the server side.
It checks for XSLT transform capability in the client and performs any
specified transform on the server side if needed. You can force it to
run on the server side too by setting
environ['akara.transform.force-server-side'] to True.
Transforms are specified either through a stylesheet PI or as
environ['akara.transform.xslt'] which can be a single XSLT input source
or a list of same.
You can also specify an environ['akara.transform.xsltparams'] environ entry
with top-level parameters. Remember that you can use
amara.xpath.util.parameterize() to prep these
"""
def __init__(self, app, check_pis=True, use_wsgi_env=True, stock_xslt_params=None,
ext_modules=None):
"""
use_wsgi_env - Optional bool determining whether to make the
WSGI environment available to the XSLT as top level parameter
overrides (e.g. wsgi:SCRIPT_NAME and wsgi:wsgi.url_scheme).
Only passes on values it can handle (UTF-8 strings, Unicode,
numbers, boolean, lists of "nodes"). Default to True
stock_xslt_params - optional dict of dicts to also pass along as XSLT
params. The outer dict is onf the form: {<namespace>: <inner-dict>}
And the inner dicts are of the form {pname: pvalue}. The keys
(pname) may be given as unicode objects if they have no namespace,
or as (uri, localname) tuples if they do. The values are
(UTF-8 strings, Unicode, numbers, boolean, lists of "nodes").
This is usually used for passing configuration info into XSLT
ext_modules - Optional list of modules with XPath and XSLT extensions
"""
#Set-up phase
self.wrapped_app = app
self.use_wsgi_env = use_wsgi_env
self.stock_xslt_params = stock_xslt_params or {}
self.ext_modules = ext_modules or []
self.processor_cache = {}
self.path_cache = {}
return
def __call__(self, environ, start_response):
#Guess whether the client supports XML+XSLT?
#See: http://copia.ogbuji.net/blog/2006-08-26/LazyWeb_Ho
client_ua = environ.get('HTTP_USER_AGENT', '')
path = environ['PATH_INFO']
send_browser_xslt = any( ua_pat.match(client_ua)
for ua_pat in USER_AGENT_REGEXEN )
#We'll hack a bit for dealing with Python's imperfect nested scopes.
response_params = []
def start_response_wrapper(status, response_headers, exc_info=None):
#Assume response does not use XSLT; do not activate middleware
environ[ACTIVE_KEY] = False
#Check for response content type to see whether it is XML
for name, value in response_headers:
#content-type value is a media type, defined as
#media-type = type "/" subtype *( ";" parameter )
media_type = value.split(';')[0]
if ( name.lower() == 'content-type'
and MTYPE_PAT.match(media_type)):
#.endswith('/xml')
# or media_type.find('/xml+') != -1 )):
environ[ACTIVE_KEY] = True
response_params.extend([status, response_headers, exc_info])
#Replace any write() callable with a dummy that gives an error
#The idea is to refuse support for apps that use write()
def dummy_write(data):
raise RuntimeError('applyxslt does not support the deprecated write() callable in WSGI apps')
return dummy_write
#Get the iterator from the application that will yield response
#body fragments
iterable = self.wrapped_app(environ, start_response_wrapper)
(status, response_headers, exc_info) = response_params
force_server_side = environ.get(FORCE_SERVER_SIDE_KEY, False)
send_browser_xslt = send_browser_xslt and not force_server_side
#import pprint; pprint.pprint(environ)
#This function processes each chunk of output (simple string) from
#the app, returning The modified chunk to be passed on to the server
def next_response_block(response_iter):
if send_browser_xslt or not environ['wsgixml.applyxslt.active']:
#The client can handle XSLT, or it's not an XML source doc,
#so nothing for this middleware to do
start_response(status, response_headers, exc_info)
for block in response_iter.next():
yield block
elif path in self.path_cache:
print >> sys.stderr, 'Using cached result for path', path
imt, content = self.path_cache[path]
response_headers.append(('content-type', imt))
start_response(status, response_headers, exc_info)
yield content
else:
yield produce_final_output(''.join(response_iter))
return
#After the app has finished sending its response body fragments
#if transform is required, it's necessary to send one more chunk,
#with the fully transformed result
def produce_final_output(response, response_headers=response_headers):
if not send_browser_xslt and environ[ACTIVE_KEY]:
use_pi = False
if force_server_side and force_server_side != True:
#True is a special flag meaning "don't delegate to the browser but still check for XSLT PIs"
xslt = force_server_side
else:
#Check for a Stylesheet PI
parser = Sax.CreateParser()
parser.setFeature(Sax.FEATURE_GENERATOR, True)
handler = find_xslt_pis(parser)
pi_iter = parser.parse(CreateInputSource(response))
try:
#Note: only grabs the first PI. Consider whether we should handle multiple
xslt = pi_iter.next()
except StopIteration:
xslt = None
use_pi = True
if xslt:
xslt = xslt.encode('utf-8')
result = StringIO()
#self.xslt_sources = environ.get(
# 'wsgixml.applyxslt.xslt_sources', {})
source = InputSource.DefaultFactory.fromString(
response, uri=get_request_url(environ))
params = {}
for ns in self.stock_xslt_params:
params.update(setup_xslt_params(ns, self.stock_xslt_params[ns]))
start = time.time()
if xslt in self.processor_cache:
processor = self.processor_cache[xslt]
#Any transform would have already been loaded
use_pi = False
print >> sys.stderr, 'Using cached processor instance for transform', xslt
#environ['wsgi.errors'].write('Using cached processor instance for transform %s\n'%xslt)
else:
print >> sys.stderr, 'Creating new processor instance for transform', xslt
#environ['wsgi.errors'].write('Creating new processor instance for transform %s\n'%xslt)
processor = Processor.Processor()
if self.ext_modules:
processor.registerExtensionModules(self.ext_modules)
if self.use_wsgi_env:
params.update(setup_xslt_params(WSGI_NS, environ))
#srcAsUri = OsPathToUri()
#if False:
if environ.has_key('paste.recursive.include'):
#paste's recursive facilities are available, to
#so we can get the XSLT with a middleware call
#rather than a full Web invocation
#print environ['paste.recursive.include']
xslt_resp = environ['paste.recursive.include'](xslt)
#FIXME: this should be relative to the XSLT, not XML
#print xslt_resp, xslt_resp.body
isrc = InputSource.DefaultFactory.fromString(
xslt_resp.body, get_request_url(environ))
processor.appendStylesheet(isrc)
else:
#We have to make a full Web call to get the XSLT.
#4Suite will do that for us in processing the PI
if not use_pi:
uri = Uri.Absolutize(xslt, get_request_url(environ))
isrc = InputSource.DefaultFactory.fromUri(uri)
processor.appendStylesheet(isrc)
self.processor_cache[xslt] = processor
processor.run(source, outputStream=result,
ignorePis=not use_pi, topLevelParams=params)
#Strip content-length if present (needs to be
#recalculated by server)
#Also strip content-type, which will be replaced below
response_headers = [ (name, value)
for name, value in response_headers
if ( name.lower()
not in ['content-length', 'content-type'])
]
#Put in the updated content type
imt = processor.outputParams.mediaType
content = result.getvalue()
if environ.get(CACHEABLE_KEY):
self.path_cache[path] = imt, content
response_headers.append(('content-type', imt))
start_response(status, response_headers, exc_info)
end = time.time()
print >> sys.stderr, '%s: elapsed time: %0.3f\n'%(xslt, end-start)
#environ['wsgi.errors'].write('%s: elapsed time: %0.3f\n'%(xslt, end-start))
return content
#If it reaches this point, no XSLT was applied.
return
return iterwrapper(iterable, next_response_block)
#if __name__ == '__main__': | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/transform/xslt.py | xslt.py |
# @@: add in protection against HTTP/1.0 clients who claim to
# be 1.1 but do not send a Content-Length
# @@: add support for chunked encoding, this is not a 1.1 server
# till this is completed.
import atexit
import traceback
import socket, sys, threading, urlparse, Queue, urllib
import posixpath
import time
import thread
import os
from itertools import count
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
#from paste.util import converters # Removed for Akara
import logging
try:
from paste.util import killthread
except ImportError:
# Not available, probably no ctypes
killthread = None
__all__ = ['WSGIHandlerMixin', 'WSGIServer', 'WSGIHandler', 'serve']
__version__ = "0.5"
# Copied from paste.util.converters for use in Akara.
# Reduce dependencies on external modules
def asbool(obj):
if isinstance(obj, (str, unicode)):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError(
"String is not true/false: %r" % obj)
return bool(obj)
class ContinueHook(object):
"""
When a client request includes a 'Expect: 100-continue' header, then
it is the responsibility of the server to send 100 Continue when it
is ready for the content body. This allows authentication, access
levels, and other exceptions to be detected *before* bandwith is
spent on the request body.
This is a rfile wrapper that implements this functionality by
sending 100 Continue to the client immediately after the user
requests the content via a read() operation on the rfile stream.
After this response is sent, it becomes a pass-through object.
"""
def __init__(self, rfile, write):
self._ContinueFile_rfile = rfile
self._ContinueFile_write = write
for attr in ('close', 'closed', 'fileno', 'flush',
'mode', 'bufsize', 'softspace'):
if hasattr(rfile, attr):
setattr(self, attr, getattr(rfile, attr))
for attr in ('read', 'readline', 'readlines'):
if hasattr(rfile, attr):
setattr(self, attr, getattr(self, '_ContinueFile_' + attr))
def _ContinueFile_send(self):
self._ContinueFile_write("HTTP/1.1 100 Continue\r\n\r\n")
rfile = self._ContinueFile_rfile
for attr in ('read', 'readline', 'readlines'):
if hasattr(rfile, attr):
setattr(self, attr, getattr(rfile, attr))
def _ContinueFile_read(self, size=-1):
self._ContinueFile_send()
return self._ContinueFile_rfile.read(size)
def _ContinueFile_readline(self, size=-1):
self._ContinueFile_send()
return self._ContinueFile_rfile.readline(size)
def _ContinueFile_readlines(self, sizehint=0):
self._ContinueFile_send()
return self._ContinueFile_rfile.readlines(sizehint)
class WSGIHandlerMixin:
"""
WSGI mix-in for HTTPRequestHandler
This class is a mix-in to provide WSGI functionality to any
HTTPRequestHandler derivative (as provided in Python's BaseHTTPServer).
This assumes a ``wsgi_application`` handler on ``self.server``.
"""
lookup_addresses = True
def log_request(self, *args, **kwargs):
""" disable success request logging
Logging transactions should not be part of a WSGI server,
if you want logging; look at paste.translogger
"""
pass
def log_message(self, *args, **kwargs):
""" disable error message logging
Logging transactions should not be part of a WSGI server,
if you want logging; look at paste.translogger
"""
pass
def version_string(self):
""" behavior that BaseHTTPServer should have had """
if not self.sys_version:
return self.server_version
else:
return self.server_version + ' ' + self.sys_version
def wsgi_write_chunk(self, chunk):
"""
Write a chunk of the output stream; send headers if they
have not already been sent.
"""
if not self.wsgi_headers_sent and not self.wsgi_curr_headers:
raise RuntimeError(
"Content returned before start_response called")
if not self.wsgi_headers_sent:
self.wsgi_headers_sent = True
(status, headers) = self.wsgi_curr_headers
code, message = status.split(" ", 1)
self.send_response(int(code), message)
#
# HTTP/1.1 compliance; either send Content-Length or
# signal that the connection is being closed.
#
send_close = True
for (k, v) in headers:
lk = k.lower()
if 'content-length' == lk:
send_close = False
if 'connection' == lk:
if 'close' == v.lower():
self.close_connection = 1
send_close = False
self.send_header(k, v)
if send_close:
self.close_connection = 1
self.send_header('Connection', 'close')
self.end_headers()
self.wfile.write(chunk)
def wsgi_start_response(self, status, response_headers, exc_info=None):
if exc_info:
try:
if self.wsgi_headers_sent:
raise exc_info[0], exc_info[1], exc_info[2]
else:
# In this case, we're going to assume that the
# higher-level code is currently handling the
# issue and returning a resonable response.
# self.log_error(repr(exc_info))
pass
finally:
exc_info = None
elif self.wsgi_curr_headers:
assert 0, "Attempt to set headers a second time w/o an exc_info"
self.wsgi_curr_headers = (status, response_headers)
return self.wsgi_write_chunk
def wsgi_setup(self, environ=None):
"""
Setup the member variables used by this WSGI mixin, including
the ``environ`` and status member variables.
After the basic environment is created; the optional ``environ``
argument can be used to override any settings.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(self.path)
path = urllib.unquote(path)
endslash = path.endswith('/')
path = posixpath.normpath(path)
if endslash and path != '/':
# Put the slash back...
path += '/'
(server_name, server_port) = self.server.server_address
rfile = self.rfile
if 'HTTP/1.1' == self.protocol_version and \
'100-continue' == self.headers.get('Expect','').lower():
rfile = ContinueHook(rfile, self.wfile.write)
else:
# We can put in the protection to keep from over-reading the
# file
try:
content_length = int(self.headers.get('Content-Length', '0'))
except ValueError:
content_length = 0
if not hasattr(self.connection, 'get_context'):
# @@: LimitedLengthFile is currently broken in connection
# with SSL (sporatic errors that are diffcult to trace, but
# ones that go away when you don't use LimitedLengthFile)
rfile = LimitedLengthFile(rfile, content_length)
remote_address = self.client_address[0]
self.wsgi_environ = {
'wsgi.version': (1,0)
,'wsgi.url_scheme': 'http'
,'wsgi.input': rfile
,'wsgi.errors': sys.stderr
,'wsgi.multithread': True
,'wsgi.multiprocess': False
,'wsgi.run_once': False
# CGI variables required by PEP-333
,'REQUEST_METHOD': self.command
,'SCRIPT_NAME': '' # application is root of server
,'PATH_INFO': path
,'QUERY_STRING': query
,'CONTENT_TYPE': self.headers.get('Content-Type', '')
,'CONTENT_LENGTH': self.headers.get('Content-Length', '0')
,'SERVER_NAME': server_name
,'SERVER_PORT': str(server_port)
,'SERVER_PROTOCOL': self.request_version
# CGI not required by PEP-333
,'REMOTE_ADDR': remote_address
}
if scheme:
self.wsgi_environ['paste.httpserver.proxy.scheme'] = scheme
if netloc:
self.wsgi_environ['paste.httpserver.proxy.host'] = netloc
if self.lookup_addresses:
# @@: make lookup_addreses actually work, at this point
# it has been address_string() is overriden down in
# file and hence is a noop
if remote_address.startswith("192.168.") \
or remote_address.startswith("10.") \
or remote_address.startswith("172.16."):
pass
else:
address_string = None # self.address_string()
if address_string:
self.wsgi_environ['REMOTE_HOST'] = address_string
if hasattr(self.server, 'thread_pool'):
# Now that we know what the request was for, we should
# tell the thread pool what its worker is working on
self.server.thread_pool.worker_tracker[thread.get_ident()][1] = self.wsgi_environ
self.wsgi_environ['paste.httpserver.thread_pool'] = self.server.thread_pool
for k, v in self.headers.items():
key = 'HTTP_' + k.replace("-","_").upper()
if key in ('HTTP_CONTENT_TYPE','HTTP_CONTENT_LENGTH'):
continue
self.wsgi_environ[key] = ','.join(self.headers.getheaders(k))
if hasattr(self.connection,'get_context'):
self.wsgi_environ['wsgi.url_scheme'] = 'https'
# @@: extract other SSL parameters from pyOpenSSL at...
# http://www.modssl.org/docs/2.8/ssl_reference.html#ToC25
if environ:
assert isinstance(environ, dict)
self.wsgi_environ.update(environ)
if 'on' == environ.get('HTTPS'):
self.wsgi_environ['wsgi.url_scheme'] = 'https'
self.wsgi_curr_headers = None
self.wsgi_headers_sent = False
def wsgi_connection_drop(self, exce, environ=None):
"""
Override this if you're interested in socket exceptions, such
as when the user clicks 'Cancel' during a file download.
"""
pass
def wsgi_execute(self, environ=None):
"""
Invoke the server's ``wsgi_application``.
"""
self.wsgi_setup(environ)
try:
result = self.server.wsgi_application(self.wsgi_environ,
self.wsgi_start_response)
try:
for chunk in result:
self.wsgi_write_chunk(chunk)
if not self.wsgi_headers_sent:
self.wsgi_write_chunk('')
finally:
if hasattr(result,'close'):
result.close()
result = None
except socket.error, exce:
self.wsgi_connection_drop(exce, environ)
return
except:
if not self.wsgi_headers_sent:
error_msg = "Internal Server Error\n"
self.wsgi_curr_headers = (
'500 Internal Server Error',
[('Content-type', 'text/plain'),
('Content-length', str(len(error_msg)))])
self.wsgi_write_chunk("Internal Server Error\n")
raise
class _HTTPServer(HTTPServer):
def server_bind(self):
if self.RequestHandlerClass.protocol_version == "HTTP/1.1":
# Disable Nagle's Algorithm, which causes performance
# problems with Keep-Alive. Sometimes the server has sent
# a response to the client but the TCP stack buffers the
# response in hopes of reducing the number of packets to
# send. After about 200ms it gives up and sends the rest
# of the packet, but 0.2s is a long time to wait when
# there are many small, fast requests on the same
# connection.
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
HTTPServer.server_bind(self)
#
# SSL Functionality
#
# This implementation was motivated by Sebastien Martini's SSL example
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
#
try:
from OpenSSL import SSL, tsafe
SocketErrors = (socket.error, SSL.ZeroReturnError, SSL.SysCallError)
except ImportError:
# Do not require pyOpenSSL to be installed, but disable SSL
# functionality in that case.
SSL = None
SocketErrors = (socket.error,)
class SecureHTTPServer(_HTTPServer):
def __init__(self, server_address, RequestHandlerClass,
ssl_context=None, request_queue_size=None):
assert not ssl_context, "pyOpenSSL not installed"
HTTPServer.__init__(self, server_address, RequestHandlerClass)
if request_queue_size:
self.socket.listen(request_queue_size)
else:
class _ConnFixer(object):
""" wraps a socket connection so it implements makefile """
def __init__(self, conn):
self.__conn = conn
def makefile(self, mode, bufsize):
return socket._fileobject(self.__conn, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self.__conn, attrib)
class SecureHTTPServer(_HTTPServer):
"""
Provides SSL server functionality on top of the BaseHTTPServer
by overriding _private_ members of Python's standard
distribution. The interface for this instance only changes by
adding a an optional ssl_context attribute to the constructor:
cntx = SSL.Context(SSL.SSLv23_METHOD)
cntx.use_privatekey_file("host.pem")
cntx.use_certificate_file("host.pem")
"""
def __init__(self, server_address, RequestHandlerClass,
ssl_context=None, request_queue_size=None):
# This overrides the implementation of __init__ in python's
# SocketServer.TCPServer (which BaseHTTPServer.HTTPServer
# does not override, thankfully).
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family,
self.socket_type)
self.ssl_context = ssl_context
if ssl_context:
class TSafeConnection(tsafe.Connection):
def settimeout(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.settimeout(*args)
finally:
self._lock.release()
def gettimeout(self):
self._lock.acquire()
try:
return self._ssl_conn.gettimeout()
finally:
self._lock.release()
self.socket = TSafeConnection(ssl_context, self.socket)
self.server_bind()
if request_queue_size:
self.socket.listen(request_queue_size)
self.server_activate()
def get_request(self):
# The default SSL request object does not seem to have a
# ``makefile(mode, bufsize)`` method as expected by
# Socketserver.StreamRequestHandler.
(conn, info) = self.socket.accept()
if self.ssl_context:
conn = _ConnFixer(conn)
return (conn, info)
def _auto_ssl_context():
import OpenSSL, time, random
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 768)
cert = OpenSSL.crypto.X509()
cert.set_serial_number(random.randint(0, sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
cert.get_subject().CN = '*'
cert.get_subject().O = 'Dummy Certificate'
cert.get_issuer().CN = 'Untrusted Authority'
cert.get_issuer().O = 'Self-Signed'
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
class WSGIHandler(WSGIHandlerMixin, BaseHTTPRequestHandler):
"""
A WSGI handler that overrides POST, GET and HEAD to delegate
requests to the server's ``wsgi_application``.
"""
server_version = 'PasteWSGIServer/' + __version__
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
self.wsgi_execute()
def handle(self):
# don't bother logging disconnects while handling a request
try:
BaseHTTPRequestHandler.handle(self)
except SocketErrors, exce:
self.wsgi_connection_drop(exce)
def address_string(self):
"""Return the client address formatted for logging.
This is overridden so that no hostname lookup is done.
"""
return ''
class LimitedLengthFile(object):
def __init__(self, file, length):
self.file = file
self.length = length
self._consumed = 0
if hasattr(self.file, 'seek'):
self.seek = self._seek
def __repr__(self):
base_repr = repr(self.file)
return base_repr[:-1] + ' length=%s>' % self.length
def read(self, length=None):
left = self.length - self._consumed
if length is None:
length = left
else:
length = min(length, left)
# next two lines are hnecessary only if read(0) blocks
if not left:
return ''
data = self.file.read(length)
self._consumed += len(data)
return data
def readline(self, *args):
max_read = self.length - self._consumed
if len(args):
max_read = min(args[0], max_read)
data = self.file.readline(max_read)
self._consumed += len(data)
return data
def readlines(self, hint=None):
data = self.file.readlines(hint)
for chunk in data:
self._consumed += len(chunk)
return data
def __iter__(self):
return self
def next(self):
if self.length - self._consumed <= 0:
raise StopIteration
return self.readline()
## Optional methods ##
def _seek(self, place):
self.file.seek(place)
self._consumed = place
def tell(self):
if hasattr(self.file, 'tell'):
return self.file.tell()
else:
return self._consumed
class ThreadPool(object):
"""
Generic thread pool with a queue of callables to consume.
Keeps a notion of the status of its worker threads:
idle: worker thread with nothing to do
busy: worker thread doing its job
hung: worker thread that's been doing a job for too long
dying: a hung thread that has been killed, but hasn't died quite
yet.
zombie: what was a worker thread that we've tried to kill but
isn't dead yet.
At any time you can call track_threads, to get a dictionary with
these keys and lists of thread_ids that fall in that status. All
keys will be present, even if they point to emty lists.
hung threads are threads that have been busy more than
hung_thread_limit seconds. Hung threads are killed when they live
longer than kill_thread_limit seconds. A thread is then
considered dying for dying_limit seconds, if it is still alive
after that it is considered a zombie.
When there are no idle workers and a request comes in, another
worker *may* be spawned. If there are less than spawn_if_under
threads in the busy state, another thread will be spawned. So if
the limit is 5, and there are 4 hung threads and 6 busy threads,
no thread will be spawned.
When there are more than max_zombie_threads_before_die zombie
threads, a SystemExit exception will be raised, stopping the
server. Use 0 or None to never raise this exception. Zombie
threads *should* get cleaned up, but killing threads is no
necessarily reliable. This is turned off by default, since it is
only a good idea if you've deployed the server with some process
watching from above (something similar to daemontools or zdaemon).
Each worker thread only processes ``max_requests`` tasks before it
dies and replaces itself with a new worker thread.
"""
SHUTDOWN = object()
def __init__(
self, nworkers, name="ThreadPool", daemon=False,
max_requests=100, # threads are killed after this many requests
hung_thread_limit=30, # when a thread is marked "hung"
kill_thread_limit=1800, # when you kill that hung thread
dying_limit=300, # seconds that a kill should take to go into effect (longer than this and the thread is a "zombie")
spawn_if_under=5, # spawn if there's too many hung threads
max_zombie_threads_before_die=0, # when to give up on the process
hung_check_period=100, # every 100 requests check for hung workers
logger=None, # Place to log messages to
error_email=None, # Person(s) to notify if serious problem occurs
):
"""
Create thread pool with `nworkers` worker threads.
"""
self.nworkers = nworkers
self.max_requests = max_requests
self.name = name
self.queue = Queue.Queue()
self.workers = []
self.daemon = daemon
if logger is None:
logger = logging.getLogger('paste.httpserver.ThreadPool')
if isinstance(logger, basestring):
logger = logging.getLogger(logger)
self.logger = logger
self.error_email = error_email
self._worker_count = count()
assert (not kill_thread_limit
or kill_thread_limit >= hung_thread_limit), (
"kill_thread_limit (%s) should be higher than hung_thread_limit (%s)"
% (kill_thread_limit, hung_thread_limit))
if not killthread:
kill_thread_limit = 0
self.logger.info(
"Cannot use kill_thread_limit as ctypes/killthread is not available")
self.kill_thread_limit = kill_thread_limit
self.dying_limit = dying_limit
self.hung_thread_limit = hung_thread_limit
assert spawn_if_under <= nworkers, (
"spawn_if_under (%s) should be less than nworkers (%s)"
% (spawn_if_under, nworkers))
self.spawn_if_under = spawn_if_under
self.max_zombie_threads_before_die = max_zombie_threads_before_die
self.hung_check_period = hung_check_period
self.requests_since_last_hung_check = 0
# Used to keep track of what worker is doing what:
self.worker_tracker = {}
# Used to keep track of the workers not doing anything:
self.idle_workers = []
# Used to keep track of threads that have been killed, but maybe aren't dead yet:
self.dying_threads = {}
# This is used to track when we last had to add idle workers;
# we shouldn't cull extra workers until some time has passed
# (hung_thread_limit) since workers were added:
self._last_added_new_idle_workers = 0
if not daemon:
atexit.register(self.shutdown)
for i in range(self.nworkers):
self.add_worker_thread(message='Initial worker pool')
def add_task(self, task):
"""
Add a task to the queue
"""
self.logger.debug('Added task (%i tasks queued)', self.queue.qsize())
if self.hung_check_period:
self.requests_since_last_hung_check += 1
if self.requests_since_last_hung_check > self.hung_check_period:
self.requests_since_last_hung_check = 0
self.kill_hung_threads()
if not self.idle_workers and self.spawn_if_under:
# spawn_if_under can come into effect...
busy = 0
now = time.time()
self.logger.debug('No idle workers for task; checking if we need to make more workers')
for worker in self.workers:
if not hasattr(worker, 'thread_id'):
# Not initialized
continue
time_started, info = self.worker_tracker.get(worker.thread_id,
(None, None))
if time_started is not None:
if now - time_started < self.hung_thread_limit:
busy += 1
if busy < self.spawn_if_under:
self.logger.info(
'No idle tasks, and only %s busy tasks; adding %s more '
'workers', busy, self.spawn_if_under-busy)
self._last_added_new_idle_workers = time.time()
for i in range(self.spawn_if_under - busy):
self.add_worker_thread(message='Response to lack of idle workers')
else:
self.logger.debug(
'No extra workers needed (%s busy workers)',
busy)
if (len(self.workers) > self.nworkers
and len(self.idle_workers) > 3
and time.time()-self._last_added_new_idle_workers > self.hung_thread_limit):
# We've spawned worers in the past, but they aren't needed
# anymore; kill off some
self.logger.info(
'Culling %s extra workers (%s idle workers present)',
len(self.workers)-self.nworkers, len(self.idle_workers))
self.logger.debug(
'Idle workers: %s', self.idle_workers)
for i in range(len(self.workers) - self.nworkers):
self.queue.put(self.SHUTDOWN)
self.queue.put(task)
def track_threads(self):
"""
Return a dict summarizing the threads in the pool (as
described in the ThreadPool docstring).
"""
result = dict(idle=[], busy=[], hung=[], dying=[], zombie=[])
now = time.time()
for worker in self.workers:
if not hasattr(worker, 'thread_id'):
# The worker hasn't fully started up, we should just
# ignore it
continue
time_started, info = self.worker_tracker.get(worker.thread_id,
(None, None))
if time_started is not None:
if now - time_started > self.hung_thread_limit:
result['hung'].append(worker)
else:
result['busy'].append(worker)
else:
result['idle'].append(worker)
for thread_id, (time_killed, worker) in self.dying_threads.items():
if not self.thread_exists(thread_id):
# Cull dying threads that are actually dead and gone
self.logger.info('Killed thread %s no longer around',
thread_id)
try:
del self.dying_threads[thread_id]
except KeyError:
pass
continue
if now - time_killed > self.dying_limit:
result['zombie'].append(worker)
else:
result['dying'].append(worker)
return result
def kill_worker(self, thread_id):
"""
Removes the worker with the given thread_id from the pool, and
replaces it with a new worker thread.
This should only be done for mis-behaving workers.
"""
if killthread is None:
raise RuntimeError(
"Cannot kill worker; killthread/ctypes not available")
thread_obj = threading._active.get(thread_id)
killthread.async_raise(thread_id, SystemExit)
try:
del self.worker_tracker[thread_id]
except KeyError:
pass
self.logger.info('Killing thread %s', thread_id)
if thread_obj in self.workers:
self.workers.remove(thread_obj)
self.dying_threads[thread_id] = (time.time(), thread_obj)
self.add_worker_thread(message='Replacement for killed thread %s' % thread_id)
def thread_exists(self, thread_id):
"""
Returns true if a thread with this id is still running
"""
return thread_id in threading._active
def add_worker_thread(self, *args, **kwargs):
index = self._worker_count.next()
worker = threading.Thread(target=self.worker_thread_callback,
args=args, kwargs=kwargs,
name=("worker %d" % index))
worker.setDaemon(self.daemon)
worker.start()
def kill_hung_threads(self):
"""
Tries to kill any hung threads
"""
if not self.kill_thread_limit:
# No killing should occur
return
now = time.time()
max_time = 0
total_time = 0
idle_workers = 0
starting_workers = 0
working_workers = 0
killed_workers = 0
for worker in self.workers:
if not hasattr(worker, 'thread_id'):
# Not setup yet
starting_workers += 1
continue
time_started, info = self.worker_tracker.get(worker.thread_id,
(None, None))
if time_started is None:
# Must be idle
idle_workers += 1
continue
working_workers += 1
max_time = max(max_time, now-time_started)
total_time += now-time_started
if now - time_started > self.kill_thread_limit:
self.logger.warning(
'Thread %s hung (working on task for %i seconds)',
worker.thread_id, now - time_started)
try:
import pprint
info_desc = pprint.pformat(info)
except:
out = StringIO()
traceback.print_exc(file=out)
info_desc = 'Error:\n%s' % out.getvalue()
self.notify_problem(
"Killing worker thread (id=%(thread_id)s) because it has been \n"
"working on task for %(time)s seconds (limit is %(limit)s)\n"
"Info on task:\n"
"%(info)s"
% dict(thread_id=worker.thread_id,
time=now - time_started,
limit=self.kill_thread_limit,
info=info_desc))
self.kill_worker(worker.thread_id)
killed_workers += 1
if working_workers:
ave_time = float(total_time) / working_workers
ave_time = '%.2fsec' % ave_time
else:
ave_time = 'N/A'
self.logger.info(
"kill_hung_threads status: %s threads (%s working, %s idle, %s starting) "
"ave time %s, max time %.2fsec, killed %s workers"
% (idle_workers + starting_workers + working_workers,
working_workers, idle_workers, starting_workers,
ave_time, max_time, killed_workers))
self.check_max_zombies()
def check_max_zombies(self):
"""
Check if we've reached max_zombie_threads_before_die; if so
then kill the entire process.
"""
if not self.max_zombie_threads_before_die:
return
found = []
now = time.time()
for thread_id, (time_killed, worker) in self.dying_threads.items():
if not self.thread_exists(thread_id):
# Cull dying threads that are actually dead and gone
try:
del self.dying_threads[thread_id]
except KeyError:
pass
continue
if now - time_killed > self.dying_limit:
found.append(thread_id)
if found:
self.logger.info('Found %s zombie threads', found)
if len(found) > self.max_zombie_threads_before_die:
self.logger.fatal(
'Exiting process because %s zombie threads is more than %s limit',
len(found), self.max_zombie_threads_before_die)
self.notify_problem(
"Exiting process because %(found)s zombie threads "
"(more than limit of %(limit)s)\n"
"Bad threads (ids):\n"
" %(ids)s\n"
% dict(found=len(found),
limit=self.max_zombie_threads_before_die,
ids="\n ".join(map(str, found))),
subject="Process restart (too many zombie threads)")
self.shutdown(10)
print 'Shutting down', threading.currentThread()
raise ServerExit(3)
def worker_thread_callback(self, message=None):
"""
Worker thread should call this method to get and process queued
callables.
"""
thread_obj = threading.currentThread()
thread_id = thread_obj.thread_id = thread.get_ident()
self.workers.append(thread_obj)
self.idle_workers.append(thread_id)
requests_processed = 0
add_replacement_worker = False
self.logger.debug('Started new worker %s: %s', thread_id, message)
try:
while True:
if self.max_requests and self.max_requests < requests_processed:
# Replace this thread then die
self.logger.debug('Thread %s processed %i requests (limit %s); stopping thread'
% (thread_id, requests_processed, self.max_requests))
add_replacement_worker = True
break
runnable = self.queue.get()
if runnable is ThreadPool.SHUTDOWN:
self.logger.debug('Worker %s asked to SHUTDOWN', thread_id)
break
try:
self.idle_workers.remove(thread_id)
except ValueError:
pass
self.worker_tracker[thread_id] = [time.time(), None]
requests_processed += 1
try:
try:
runnable()
except:
# We are later going to call sys.exc_clear(),
# removing all remnants of any exception, so
# we should log it now. But ideally no
# exception should reach this level
print >> sys.stderr, (
'Unexpected exception in worker %r' % runnable)
traceback.print_exc()
if thread_id in self.dying_threads:
# That last exception was intended to kill me
break
finally:
try:
del self.worker_tracker[thread_id]
except KeyError:
pass
sys.exc_clear()
self.idle_workers.append(thread_id)
finally:
try:
del self.worker_tracker[thread_id]
except KeyError:
pass
try:
self.idle_workers.remove(thread_id)
except ValueError:
pass
try:
self.workers.remove(thread_obj)
except ValueError:
pass
try:
del self.dying_threads[thread_id]
except KeyError:
pass
if add_replacement_worker:
self.add_worker_thread(message='Voluntary replacement for thread %s' % thread_id)
def shutdown(self, force_quit_timeout=0):
"""
Shutdown the queue (after finishing any pending requests).
"""
self.logger.info('Shutting down threadpool')
# Add a shutdown request for every worker
for i in range(len(self.workers)):
self.queue.put(ThreadPool.SHUTDOWN)
# Wait for each thread to terminate
hung_workers = []
for worker in self.workers:
worker.join(0.5)
if worker.isAlive():
hung_workers.append(worker)
zombies = []
for thread_id in self.dying_threads:
if self.thread_exists(thread_id):
zombies.append(thread_id)
if hung_workers or zombies:
self.logger.info("%s workers didn't stop properly, and %s zombies",
len(hung_workers), len(zombies))
if hung_workers:
for worker in hung_workers:
self.kill_worker(worker.thread_id)
self.logger.info('Workers killed forcefully')
if force_quit_timeout:
hung = []
timed_out = False
need_force_quit = bool(zombies)
for workers in self.workers:
if not timed_out and worker.isAlive():
timed_out = True
worker.join(force_quit_timeout)
if worker.isAlive():
print "Worker %s won't die" % worker
need_force_quit = True
if need_force_quit:
import atexit
# Remove the threading atexit callback
for callback in list(atexit._exithandlers):
func = getattr(callback[0], 'im_func', None)
if not func:
continue
globs = getattr(func, 'func_globals', {})
mod = globs.get('__name__')
if mod == 'threading':
atexit._exithandlers.remove(callback)
atexit._run_exitfuncs()
print 'Forcefully exiting process'
os._exit(3)
else:
self.logger.info('All workers eventually killed')
else:
self.logger.info('All workers stopped')
def notify_problem(self, msg, subject=None, spawn_thread=True):
"""
Called when there's a substantial problem. msg contains the
body of the notification, subject the summary.
If spawn_thread is true, then the email will be send in
another thread (so this doesn't block).
"""
if not self.error_email:
return
if spawn_thread:
t = threading.Thread(
target=self.notify_problem,
args=(msg, subject, False))
t.start()
return
from_address = 'errors@localhost'
if not subject:
subject = msg.strip().splitlines()[0]
subject = subject[:50]
subject = '[http threadpool] %s' % subject
headers = [
"To: %s" % self.error_email,
"From: %s" % from_address,
"Subject: %s" % subject,
]
try:
system = ' '.join(os.uname())
except:
system = '(unknown)'
body = (
"An error has occurred in the paste.httpserver.ThreadPool\n"
"Error:\n"
" %(msg)s\n"
"Occurred at: %(time)s\n"
"PID: %(pid)s\n"
"System: %(system)s\n"
"Server .py file: %(file)s\n"
% dict(msg=msg,
time=time.strftime("%c"),
pid=os.getpid(),
system=system,
file=os.path.abspath(__file__),
))
message = '\n'.join(headers) + "\n\n" + body
import smtplib
server = smtplib.SMTP('localhost')
error_emails = [
e.strip() for e in self.error_email.split(",")
if e.strip()]
server.sendmail(from_address, error_emails, message)
server.quit()
print 'email sent to', error_emails, message
class ThreadPoolMixIn(object):
"""
Mix-in class to process requests from a thread pool
"""
def __init__(self, nworkers, daemon=False, **threadpool_options):
# Create and start the workers
self.running = True
assert nworkers > 0, "ThreadPoolMixIn servers must have at least one worker"
self.thread_pool = ThreadPool(
nworkers,
"ThreadPoolMixIn HTTP server on %s:%d"
% (self.server_name, self.server_port),
daemon,
**threadpool_options)
def process_request(self, request, client_address):
"""
Queue the request to be processed by on of the thread pool threads
"""
# This sets the socket to blocking mode (and no timeout) since it
# may take the thread pool a little while to get back to it. (This
# is the default but since we set a timeout on the parent socket so
# that we can trap interrupts we need to restore this,.)
request.setblocking(1)
# Queue processing of the request
self.thread_pool.add_task(
lambda: self.process_request_in_thread(request, client_address))
def handle_error(self, request, client_address):
exc_class, exc, tb = sys.exc_info()
if exc_class is ServerExit:
# This is actually a request to stop the server
raise
return super(ThreadPoolMixIn, self).handle_error(request, client_address)
def process_request_in_thread(self, request, client_address):
"""
The worker thread should call back here to do the rest of the
request processing. Error handling normaller done in 'handle_request'
must be done here.
"""
try:
self.finish_request(request, client_address)
self.close_request(request)
except:
self.handle_error(request, client_address)
self.close_request(request)
exc = sys.exc_info()[1]
if isinstance(exc, (MemoryError, KeyboardInterrupt)):
raise
def serve_forever(self):
"""
Overrides `serve_forever` to shut the threadpool down cleanly.
"""
try:
while self.running:
try:
self.handle_request()
except socket.timeout:
# Timeout is expected, gives interrupts a chance to
# propogate, just keep handling
pass
finally:
self.thread_pool.shutdown()
def server_activate(self):
"""
Overrides server_activate to set timeout on our listener socket.
"""
# We set the timeout here so that we can trap interrupts on windows
self.socket.settimeout(1)
def server_close(self):
"""
Finish pending requests and shutdown the server.
"""
self.running = False
self.socket.close()
self.thread_pool.shutdown(60)
class WSGIServerBase(SecureHTTPServer):
def __init__(self, wsgi_application, server_address,
RequestHandlerClass=None, ssl_context=None,
request_queue_size=None):
SecureHTTPServer.__init__(self, server_address,
RequestHandlerClass, ssl_context,
request_queue_size=request_queue_size)
self.wsgi_application = wsgi_application
self.wsgi_socket_timeout = None
def get_request(self):
# If there is a socket_timeout, set it on the accepted
(conn,info) = SecureHTTPServer.get_request(self)
if self.wsgi_socket_timeout:
conn.settimeout(self.wsgi_socket_timeout)
return (conn, info)
class WSGIServer(ThreadingMixIn, WSGIServerBase):
daemon_threads = False
class WSGIThreadPoolServer(ThreadPoolMixIn, WSGIServerBase):
def __init__(self, wsgi_application, server_address,
RequestHandlerClass=None, ssl_context=None,
nworkers=10, daemon_threads=False,
threadpool_options=None, request_queue_size=None):
WSGIServerBase.__init__(self, wsgi_application, server_address,
RequestHandlerClass, ssl_context,
request_queue_size=request_queue_size)
if threadpool_options is None:
threadpool_options = {}
ThreadPoolMixIn.__init__(self, nworkers, daemon_threads,
**threadpool_options)
class ServerExit(SystemExit):
"""
Raised to tell the server to really exit (SystemExit is normally
caught)
"""
def serve(application, host=None, port=None, handler=None, ssl_pem=None,
ssl_context=None, server_version=None, protocol_version=None,
start_loop=True, daemon_threads=None, socket_timeout=None,
use_threadpool=None, threadpool_workers=10,
threadpool_options=None, request_queue_size=5):
"""
Serves your ``application`` over HTTP(S) via WSGI interface
``host``
This is the ipaddress to bind to (or a hostname if your
nameserver is properly configured). This defaults to
127.0.0.1, which is not a public interface.
``port``
The port to run on, defaults to 8080 for HTTP, or 4443 for
HTTPS. This can be a string or an integer value.
``handler``
This is the HTTP request handler to use, it defaults to
``WSGIHandler`` in this module.
``ssl_pem``
This an optional SSL certificate file (via OpenSSL). You can
supply ``*`` and a development-only certificate will be
created for you, or you can generate a self-signed test PEM
certificate file as follows::
$ openssl genrsa 1024 > host.key
$ chmod 400 host.key
$ openssl req -new -x509 -nodes -sha1 -days 365 \\
-key host.key > host.cert
$ cat host.cert host.key > host.pem
$ chmod 400 host.pem
``ssl_context``
This an optional SSL context object for the server. A SSL
context will be automatically constructed for you if you supply
``ssl_pem``. Supply this to use a context of your own
construction.
``server_version``
The version of the server as reported in HTTP response line. This
defaults to something like "PasteWSGIServer/0.5". Many servers
hide their code-base identity with a name like 'Amnesiac/1.0'
``protocol_version``
This sets the protocol used by the server, by default
``HTTP/1.0``. There is some support for ``HTTP/1.1``, which
defaults to nicer keep-alive connections. This server supports
``100 Continue``, but does not yet support HTTP/1.1 Chunked
Encoding. Hence, if you use HTTP/1.1, you're somewhat in error
since chunked coding is a mandatory requirement of a HTTP/1.1
server. If you specify HTTP/1.1, every response *must* have a
``Content-Length`` and you must be careful not to read past the
end of the socket.
``start_loop``
This specifies if the server loop (aka ``server.serve_forever()``)
should be called; it defaults to ``True``.
``daemon_threads``
This flag specifies if when your webserver terminates all
in-progress client connections should be droppped. It defaults
to ``False``. You might want to set this to ``True`` if you
are using ``HTTP/1.1`` and don't set a ``socket_timeout``.
``socket_timeout``
This specifies the maximum amount of time that a connection to a
given client will be kept open. At this time, it is a rude
disconnect, but at a later time it might follow the RFC a bit
more closely.
``use_threadpool``
Server requests from a pool of worker threads (``threadpool_workers``)
rather than creating a new thread for each request. This can
substantially reduce latency since there is a high cost associated
with thread creation.
``threadpool_workers``
Number of worker threads to create when ``use_threadpool`` is true. This
can be a string or an integer value.
``threadpool_options``
A dictionary of options to be used when instantiating the
threadpool. See paste.httpserver.ThreadPool for specific
options (``threadpool_workers`` is a specific option that can
also go here).
``request_queue_size``
The 'backlog' argument to socket.listen(); specifies the
maximum number of queued connections.
"""
is_ssl = False
if ssl_pem or ssl_context:
assert SSL, "pyOpenSSL is not installed"
is_ssl = True
port = int(port or 4443)
if not ssl_context:
if ssl_pem == '*':
ssl_context = _auto_ssl_context()
else:
ssl_context = SSL.Context(SSL.SSLv23_METHOD)
ssl_context.use_privatekey_file(ssl_pem)
ssl_context.use_certificate_chain_file(ssl_pem)
host = host or '127.0.0.1'
if port is None:
if ':' in host:
host, port = host.split(':', 1)
else:
port = 8080
server_address = (host, int(port))
if not handler:
handler = WSGIHandler
if server_version:
handler.server_version = server_version
handler.sys_version = None
if protocol_version:
assert protocol_version in ('HTTP/0.9', 'HTTP/1.0', 'HTTP/1.1')
handler.protocol_version = protocol_version
if use_threadpool is None:
use_threadpool = True
if asbool(use_threadpool):
server = WSGIThreadPoolServer(application, server_address, handler,
ssl_context, int(threadpool_workers),
daemon_threads,
threadpool_options=threadpool_options,
request_queue_size=request_queue_size)
else:
server = WSGIServer(application, server_address, handler, ssl_context,
request_queue_size=request_queue_size)
if daemon_threads:
server.daemon_threads = daemon_threads
if socket_timeout:
server.wsgi_socket_timeout = int(socket_timeout)
if asbool(start_loop):
protocol = is_ssl and 'https' or 'http'
host, port = server.server_address
if host == '0.0.0.0':
print 'serving on 0.0.0.0:%s view at %s://127.0.0.1:%s' % \
(port, protocol, port)
else:
print "serving on %s://%s:%s" % (protocol, host, port)
try:
server.serve_forever()
except KeyboardInterrupt:
# allow CTRL+C to shutdown
pass
return server
# For paste.deploy server instantiation (egg:Paste#http)
# Note: this gets a separate function because it has to expect string
# arguments (though that's not much of an issue yet, ever?)
def server_runner(wsgi_app, global_conf, **kwargs):
from paste.deploy.converters import asbool
for name in ['port', 'socket_timeout', 'threadpool_workers',
'threadpool_hung_thread_limit',
'threadpool_kill_thread_limit',
'threadpool_dying_limit', 'threadpool_spawn_if_under',
'threadpool_max_zombie_threads_before_die',
'threadpool_hung_check_period',
'threadpool_max_requests', 'request_queue_size']:
if name in kwargs:
kwargs[name] = int(kwargs[name])
for name in ['use_threadpool', 'daemon_threads']:
if name in kwargs:
kwargs[name] = asbool(kwargs[name])
threadpool_options = {}
for name, value in kwargs.items():
if name.startswith('threadpool_') and name != 'threadpool_workers':
threadpool_options[name[len('threadpool_'):]] = value
del kwargs[name]
if ('error_email' not in threadpool_options
and 'error_email' in global_conf):
threadpool_options['error_email'] = global_conf['error_email']
kwargs['threadpool_options'] = threadpool_options
serve(wsgi_app, **kwargs)
server_runner.__doc__ = (serve.__doc__ or '') + """
You can also set these threadpool options:
``threadpool_max_requests``:
The maximum number of requests a worker thread will process
before dying (and replacing itself with a new worker thread).
Default 100.
``threadpool_hung_thread_limit``:
The number of seconds a thread can work on a task before it is
considered hung (stuck). Default 30 seconds.
``threadpool_kill_thread_limit``:
The number of seconds a thread can work before you should kill it
(assuming it will never finish). Default 600 seconds (10 minutes).
``threadpool_dying_limit``:
The length of time after killing a thread that it should actually
disappear. If it lives longer than this, it is considered a
"zombie". Note that even in easy situations killing a thread can
be very slow. Default 300 seconds (5 minutes).
``threadpool_spawn_if_under``:
If there are no idle threads and a request comes in, and there are
less than this number of *busy* threads, then add workers to the
pool. Busy threads are threads that have taken less than
``threadpool_hung_thread_limit`` seconds so far. So if you get
*lots* of requests but they complete in a reasonable amount of time,
the requests will simply queue up (adding more threads probably
wouldn't speed them up). But if you have lots of hung threads and
one more request comes in, this will add workers to handle it.
Default 5.
``threadpool_max_zombie_threads_before_die``:
If there are more zombies than this, just kill the process. This is
only good if you have a monitor that will automatically restart
the server. This can clean up the mess. Default 0 (disabled).
`threadpool_hung_check_period``:
Every X requests, check for hung threads that need to be killed,
or for zombie threads that should cause a restart. Default 100
requests.
``threadpool_logger``:
Logging messages will go the logger named here.
``threadpool_error_email`` (or global ``error_email`` setting):
When threads are killed or the process restarted, this email
address will be contacted (using an SMTP server on localhost).
"""
if __name__ == '__main__':
from paste.wsgilib import dump_environ
#serve(dump_environ, ssl_pem="test.pem")
serve(dump_environ, server_version="Wombles/1.0",
protocol_version="HTTP/1.1", port="8888") | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/thirdparty/httpserver.py | httpserver.py |
__author__ = 'Allan Saddi <[email protected]>'
__version__ = '$Revision$'
import sys
import os
import socket
import select
import errno
import signal
import random
import time
try:
import fcntl
except ImportError:
def setCloseOnExec(sock):
pass
else:
def setCloseOnExec(sock):
fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
# If running Python < 2.4, require eunuchs module for socket.socketpair().
# See <http://www.inoi.fi/open/trac/eunuchs>.
if not hasattr(socket, 'socketpair'):
try:
import eunuchs.socketpair
except ImportError:
# TODO: Other alternatives? Perhaps using os.pipe()?
raise ImportError, 'Requires eunuchs module for Python < 2.4'
def socketpair():
s1, s2 = eunuchs.socketpair.socketpair()
p, c = (socket.fromfd(s1, socket.AF_UNIX, socket.SOCK_STREAM),
socket.fromfd(s2, socket.AF_UNIX, socket.SOCK_STREAM))
os.close(s1)
os.close(s2)
return p, c
socket.socketpair = socketpair
class PreforkServer(object):
"""
A preforked server model conceptually similar to Apache httpd(2). At
any given time, ensures there are at least minSpare children ready to
process new requests (up to a maximum of maxChildren children total).
If the number of idle children is ever above maxSpare, the extra
children are killed.
If maxRequests is positive, each child will only handle that many
requests in its lifetime before exiting.
jobClass should be a class whose constructor takes at least two
arguments: the client socket and client address. jobArgs, which
must be a list or tuple, is any additional (static) arguments you
wish to pass to the constructor.
jobClass should have a run() method (taking no arguments) that does
the actual work. When run() returns, the request is considered
complete and the child process moves to idle state.
"""
def __init__(self, minSpare=1, maxSpare=5, maxChildren=50,
maxRequests=0, jobClass=None, jobArgs=()):
self._minSpare = minSpare
self._maxSpare = maxSpare
self._maxChildren = max(maxSpare, maxChildren)
self._maxRequests = maxRequests
self._jobClass = jobClass
self._jobArgs = jobArgs
# Internal state of children. Maps pids to dictionaries with two
# members: 'file' and 'avail'. 'file' is the socket to that
# individidual child and 'avail' is whether or not the child is
# free to process requests.
self._children = {}
self._children_to_purge = []
self._last_purge = 0
if minSpare < 1:
raise ValueError("minSpare must be at least 1!")
if maxSpare < minSpare:
raise ValueError("maxSpare must be greater than, or equal to, minSpare!")
def run(self, sock):
"""
The main loop. Pass a socket that is ready to accept() client
connections. Return value will be True or False indiciating whether
or not the loop was exited due to SIGHUP.
"""
# Set up signal handlers.
self._keepGoing = True
self._hupReceived = False
self._installSignalHandlers()
# Don't want operations on main socket to block.
sock.setblocking(0)
# Set close-on-exec
setCloseOnExec(sock)
# Main loop.
while self._keepGoing:
# Maintain minimum number of children. Note that we are checking
# the absolute number of children, not the number of "available"
# children. We explicitly test against _maxSpare to maintain
# an *optimistic* absolute minimum. The number of children will
# always be in the range [_maxSpare, _maxChildren].
while len(self._children) < self._maxSpare:
if not self._spawnChild(sock): break
# Wait on any socket activity from live children.
r = [x['file'] for x in self._children.values()
if x['file'] is not None]
if len(r) == len(self._children) and not self._children_to_purge:
timeout = None
else:
# There are dead children that need to be reaped, ensure
# that they are by timing out, if necessary. Or there are some
# children that need to die.
timeout = 2
w = []
if (time.time() > self._last_purge + 10):
w = [x for x in self._children_to_purge if x.fileno() != -1]
try:
r, w, e = select.select(r, w, [], timeout)
except select.error, e:
if e[0] != errno.EINTR:
raise
# Scan child sockets and tend to those that need attention.
for child in r:
# Receive status byte.
try:
state = child.recv(1)
except socket.error, e:
if e[0] in (errno.EAGAIN, errno.EINTR):
# Guess it really didn't need attention?
continue
raise
# Try to match it with a child. (Do we need a reverse map?)
for pid,d in self._children.items():
if child is d['file']:
if state:
# Set availability status accordingly.
self._children[pid]['avail'] = state != '\x00'
else:
# Didn't receive anything. Child is most likely
# dead.
d = self._children[pid]
d['file'].close()
d['file'] = None
d['avail'] = False
for child in w:
# purging child
child.send('bye, bye')
del self._children_to_purge[self._children_to_purge.index(child)]
self._last_purge = time.time()
# Try to match it with a child. (Do we need a reverse map?)
for pid,d in self._children.items():
if child is d['file']:
d['file'].close()
d['file'] = None
d['avail'] = False
break
# Reap children.
self._reapChildren()
# See who and how many children are available.
availList = filter(lambda x: x[1]['avail'], self._children.items())
avail = len(availList)
if avail < self._minSpare:
# Need to spawn more children.
while avail < self._minSpare and \
len(self._children) < self._maxChildren:
if not self._spawnChild(sock): break
avail += 1
elif avail > self._maxSpare:
# Too many spares, kill off the extras.
pids = [x[0] for x in availList]
pids.sort()
pids = pids[self._maxSpare:]
for pid in pids:
d = self._children[pid]
d['file'].close()
d['file'] = None
d['avail'] = False
# Clean up all child processes.
self._cleanupChildren()
# Restore signal handlers.
self._restoreSignalHandlers()
# Return bool based on whether or not SIGHUP was received.
return self._hupReceived
def _cleanupChildren(self):
"""
Closes all child sockets (letting those that are available know
that it's time to exit). Sends SIGINT to those that are currently
processing (and hopes that it finishses ASAP).
Any children remaining after 10 seconds is SIGKILLed.
"""
# Let all children know it's time to go.
for pid,d in self._children.items():
if d['file'] is not None:
d['file'].close()
d['file'] = None
if not d['avail']:
# Child is unavailable. SIGINT it.
try:
os.kill(pid, signal.SIGINT)
except OSError, e:
if e[0] != errno.ESRCH:
raise
def alrmHandler(signum, frame):
pass
# Set up alarm to wake us up after 10 seconds.
oldSIGALRM = signal.getsignal(signal.SIGALRM)
signal.signal(signal.SIGALRM, alrmHandler)
signal.alarm(10)
# Wait for all children to die.
while len(self._children):
try:
pid, status = os.wait()
except OSError, e:
if e[0] in (errno.ECHILD, errno.EINTR):
break
if self._children.has_key(pid):
del self._children[pid]
signal.signal(signal.SIGALRM, oldSIGALRM)
# Forcefully kill any remaining children.
for pid in self._children.keys():
try:
os.kill(pid, signal.SIGKILL)
except OSError, e:
if e[0] != errno.ESRCH:
raise
def _reapChildren(self):
"""Cleans up self._children whenever children die."""
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except OSError, e:
if e[0] == errno.ECHILD:
break
raise
if pid <= 0:
break
if self._children.has_key(pid): # Sanity check.
if self._children[pid]['file'] is not None:
self._children[pid]['file'].close()
self._children[pid]['file'] = None
del self._children[pid]
def _spawnChild(self, sock):
"""
Spawn a single child. Returns True if successful, False otherwise.
"""
# This socket pair is used for very simple communication between
# the parent and its children.
parent, child = socket.socketpair()
parent.setblocking(0)
setCloseOnExec(parent)
child.setblocking(0)
setCloseOnExec(child)
try:
pid = os.fork()
except OSError, e:
if e[0] in (errno.EAGAIN, errno.ENOMEM):
return False # Can't fork anymore.
raise
if not pid:
# Child
child.close()
# Put child into its own process group.
pid = os.getpid()
os.setpgid(pid, pid)
# Restore signal handlers.
self._restoreSignalHandlers()
# Close copies of child sockets.
for f in [x['file'] for x in self._children.values()
if x['file'] is not None]:
f.close()
self._children = {}
try:
# Enter main loop.
self._child(sock, parent)
except KeyboardInterrupt:
pass
sys.exit(0)
else:
# Parent
parent.close()
d = self._children[pid] = {}
d['file'] = child
d['avail'] = True
return True
def _isClientAllowed(self, addr):
"""Override to provide access control."""
return True
def _notifyParent(self, parent, msg):
"""Send message to parent, ignoring EPIPE and retrying on EAGAIN"""
while True:
try:
parent.send(msg)
return True
except socket.error, e:
if e[0] == errno.EPIPE:
return False # Parent is gone
if e[0] == errno.EAGAIN:
# Wait for socket change before sending again
select.select([], [parent], [])
else:
raise
def _child(self, sock, parent):
"""Main loop for children."""
requestCount = 0
# Re-seed random module
preseed = ''
# urandom only exists in Python >= 2.4
if hasattr(os, 'urandom'):
try:
preseed = os.urandom(16)
except NotImplementedError:
pass
# Have doubts about this. random.seed will just hash the string
random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
del preseed
while True:
# Wait for any activity on the main socket or parent socket.
r, w, e = select.select([sock, parent], [], [])
for f in r:
# If there's any activity on the parent socket, it
# means the parent wants us to die or has died itself.
# Either way, exit.
if f is parent:
return
# Otherwise, there's activity on the main socket...
try:
clientSock, addr = sock.accept()
except socket.error, e:
if e[0] == errno.EAGAIN:
# Or maybe not.
continue
raise
setCloseOnExec(clientSock)
# Check if this client is allowed.
if not self._isClientAllowed(addr):
clientSock.close()
continue
# Notify parent we're no longer available.
self._notifyParent(parent, '\x00')
# Do the job.
self._jobClass(clientSock, addr, *self._jobArgs).run()
# If we've serviced the maximum number of requests, exit.
if self._maxRequests > 0:
requestCount += 1
if requestCount >= self._maxRequests:
break
# Tell parent we're free again.
if not self._notifyParent(parent, '\xff'):
return # Parent is gone.
# Signal handlers
def _hupHandler(self, signum, frame):
self._keepGoing = False
self._hupReceived = True
def _intHandler(self, signum, frame):
self._keepGoing = False
def _chldHandler(self, signum, frame):
# Do nothing (breaks us out of select and allows us to reap children).
pass
def _usr1Handler(self, signum, frame):
self._children_to_purge = [x['file'] for x in self._children.values()
if x['file'] is not None]
def _installSignalHandlers(self):
supportedSignals = [signal.SIGINT, signal.SIGTERM]
if hasattr(signal, 'SIGHUP'):
supportedSignals.append(signal.SIGHUP)
if hasattr(signal, 'SIGUSR1'):
supportedSignals.append(signal.SIGUSR1)
self._oldSIGs = [(x,signal.getsignal(x)) for x in supportedSignals]
for sig in supportedSignals:
if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP:
signal.signal(sig, self._hupHandler)
elif hasattr(signal, 'SIGUSR1') and sig == signal.SIGUSR1:
signal.signal(sig, self._usr1Handler)
else:
signal.signal(sig, self._intHandler)
def _restoreSignalHandlers(self):
"""Restores previous signal handlers."""
for signum,handler in self._oldSIGs:
signal.signal(signum, handler)
if __name__ == '__main__':
class TestJob(object):
def __init__(self, sock, addr):
self._sock = sock
self._addr = addr
def run(self):
print "Client connection opened from %s:%d" % self._addr
self._sock.send('Hello World!\n')
self._sock.setblocking(1)
self._sock.recv(1)
self._sock.close()
print "Client connection closed from %s:%d" % self._addr
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', 8080))
sock.listen(socket.SOMAXCONN)
PreforkServer(maxChildren=10, jobClass=TestJob).run(sock) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/thirdparty/preforkserver.py | preforkserver.py |
# Copyright © 2006-2009 Steven J. Bethard <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command-line parsing library
This module is an optparse-inspired command-line parsing library that:
- handles both optional and positional arguments
- produces highly informative usage messages
- supports parsers that dispatch to sub-parsers
The following is a simple usage example that sums integers from the
command-line and writes the result to a file::
parser = argparse.ArgumentParser(
description='sum the integers at the command line')
parser.add_argument(
'integers', metavar='int', nargs='+', type=int,
help='an integer to be summed')
parser.add_argument(
'--log', default=sys.stdout, type=argparse.FileType('w'),
help='the file where the sum should be written')
args = parser.parse_args()
args.log.write('%s' % sum(args.integers))
args.log.close()
The module contains the following public classes:
- ArgumentParser -- The main entry point for command-line parsing. As the
example above shows, the add_argument() method is used to populate
the parser with actions for optional and positional arguments. Then
the parse_args() method is invoked to convert the args at the
command-line into an object with attributes.
- ArgumentError -- The exception raised by ArgumentParser objects when
there are errors with the parser's actions. Errors raised while
parsing the command-line are caught by ArgumentParser and emitted
as command-line messages.
- FileType -- A factory for defining types of files to be created. As the
example above shows, instances of FileType are typically passed as
the type= argument of add_argument() calls.
- Action -- The base class for parser actions. Typically actions are
selected by passing strings like 'store_true' or 'append_const' to
the action= argument of add_argument(). However, for greater
customization of ArgumentParser actions, subclasses of Action may
be defined and passed as the action= argument.
- HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
ArgumentDefaultsHelpFormatter -- Formatter classes which
may be passed as the formatter_class= argument to the
ArgumentParser constructor. HelpFormatter is the default,
RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
not to change the formatting for help text, and
ArgumentDefaultsHelpFormatter adds information about argument defaults
to the help.
All other classes in this module are considered implementation details.
(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
considered public as object names -- the API of the formatter objects is
still considered an implementation detail.)
"""
__version__ = '1.0.1'
__all__ = [
'ArgumentParser',
'ArgumentError',
'Namespace',
'Action',
'FileType',
'HelpFormatter',
'RawDescriptionHelpFormatter',
'RawTextHelpFormatter'
'ArgumentDefaultsHelpFormatter',
]
import copy as _copy
import os as _os
import re as _re
import sys as _sys
import textwrap as _textwrap
from gettext import gettext as _
try:
_set = set
except NameError:
from sets import Set as _set
try:
_basestring = basestring
except NameError:
_basestring = str
try:
_sorted = sorted
except NameError:
def _sorted(iterable, reverse=False):
result = list(iterable)
result.sort()
if reverse:
result.reverse()
return result
# silence Python 2.6 buggy warnings about Exception.message
if _sys.version_info[:2] == (2, 6):
import warnings
warnings.filterwarnings(
action='ignore',
message='BaseException.message has been deprecated as of Python 2.6',
category=DeprecationWarning,
module='argparse')
SUPPRESS = '==SUPPRESS=='
OPTIONAL = '?'
ZERO_OR_MORE = '*'
ONE_OR_MORE = '+'
PARSER = '==PARSER=='
# =============================
# Utility functions and classes
# =============================
class _AttributeHolder(object):
"""Abstract base class that provides __repr__.
The __repr__ method returns a string in the format::
ClassName(attr=name, attr=name, ...)
The attributes are determined either by a class-level attribute,
'_kwarg_names', or by inspecting the instance __dict__.
"""
def __repr__(self):
type_name = type(self).__name__
arg_strings = []
for arg in self._get_args():
arg_strings.append(repr(arg))
for name, value in self._get_kwargs():
arg_strings.append('%s=%r' % (name, value))
return '%s(%s)' % (type_name, ', '.join(arg_strings))
def _get_kwargs(self):
return _sorted(self.__dict__.items())
def _get_args(self):
return []
def _ensure_value(namespace, name, value):
if getattr(namespace, name, None) is None:
setattr(namespace, name, value)
return getattr(namespace, name)
# ===============
# Formatting Help
# ===============
class HelpFormatter(object):
"""Formatter for generating usage messages and argument help strings.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def __init__(self,
prog,
indent_increment=2,
max_help_position=24,
width=None):
# default setting for width
if width is None:
try:
width = int(_os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self._prog = prog
self._indent_increment = indent_increment
self._max_help_position = max_help_position
self._width = width
self._current_indent = 0
self._level = 0
self._action_max_length = 0
self._root_section = self._Section(self, None)
self._current_section = self._root_section
self._whitespace_matcher = _re.compile(r'\s+')
self._long_break_matcher = _re.compile(r'\n\n\n+')
# ===============================
# Section and indentation methods
# ===============================
def _indent(self):
self._current_indent += self._indent_increment
self._level += 1
def _dedent(self):
self._current_indent -= self._indent_increment
assert self._current_indent >= 0, 'Indent decreased below 0.'
self._level -= 1
class _Section(object):
def __init__(self, formatter, parent, heading=None):
self.formatter = formatter
self.parent = parent
self.heading = heading
self.items = []
def format_help(self):
# format the indented section
if self.parent is not None:
self.formatter._indent()
join = self.formatter._join_parts
for func, args in self.items:
func(*args)
item_help = join([func(*args) for func, args in self.items])
if self.parent is not None:
self.formatter._dedent()
# return nothing if the section was empty
if not item_help:
return ''
# add the heading if the section was non-empty
if self.heading is not SUPPRESS and self.heading is not None:
current_indent = self.formatter._current_indent
heading = '%*s%s:\n' % (current_indent, '', self.heading)
else:
heading = ''
# join the section-initial newline, the heading and the help
return join(['\n', heading, item_help, '\n'])
def _add_item(self, func, args):
self._current_section.items.append((func, args))
# ========================
# Message building methods
# ========================
def start_section(self, heading):
self._indent()
section = self._Section(self, self._current_section, heading)
self._add_item(section.format_help, [])
self._current_section = section
def end_section(self):
self._current_section = self._current_section.parent
self._dedent()
def add_text(self, text):
if text is not SUPPRESS and text is not None:
self._add_item(self._format_text, [text])
def add_usage(self, usage, actions, groups, prefix=None):
if usage is not SUPPRESS:
args = usage, actions, groups, prefix
self._add_item(self._format_usage, args)
def add_argument(self, action):
if action.help is not SUPPRESS:
# find all invocations
get_invocation = self._format_action_invocation
invocations = [get_invocation(action)]
for subaction in self._iter_indented_subactions(action):
invocations.append(get_invocation(subaction))
# update the maximum item length
invocation_length = max([len(s) for s in invocations])
action_length = invocation_length + self._current_indent
self._action_max_length = max(self._action_max_length,
action_length)
# add the item to the list
self._add_item(self._format_action, [action])
def add_arguments(self, actions):
for action in actions:
self.add_argument(action)
# =======================
# Help-formatting methods
# =======================
def format_help(self):
help = self._root_section.format_help()
if help:
help = self._long_break_matcher.sub('\n\n', help)
help = help.strip('\n') + '\n'
return help
def _join_parts(self, part_strings):
return ''.join([part
for part in part_strings
if part and part is not SUPPRESS])
def _format_usage(self, usage, actions, groups, prefix):
if prefix is None:
prefix = _('usage: ')
# if usage is specified, use that
if usage is not None:
usage = usage % dict(prog=self._prog)
# if no optionals or positionals are available, usage is just prog
elif usage is None and not actions:
usage = '%(prog)s' % dict(prog=self._prog)
# if optionals and positionals are available, calculate usage
elif usage is None:
prog = '%(prog)s' % dict(prog=self._prog)
# split optionals from positionals
optionals = []
positionals = []
for action in actions:
if action.option_strings:
optionals.append(action)
else:
positionals.append(action)
# build full usage string
format = self._format_actions_usage
action_usage = format(optionals + positionals, groups)
usage = ' '.join([s for s in [prog, action_usage] if s])
# wrap the usage parts if it's too long
text_width = self._width - self._current_indent
if len(prefix) + len(usage) > text_width:
# break usage into wrappable parts
part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'
opt_usage = format(optionals, groups)
pos_usage = format(positionals, groups)
opt_parts = _re.findall(part_regexp, opt_usage)
pos_parts = _re.findall(part_regexp, pos_usage)
assert ' '.join(opt_parts) == opt_usage
assert ' '.join(pos_parts) == pos_usage
# helper for wrapping lines
def get_lines(parts, indent, prefix=None):
lines = []
line = []
if prefix is not None:
line_len = len(prefix) - 1
else:
line_len = len(indent) - 1
for part in parts:
if line_len + 1 + len(part) > text_width:
lines.append(indent + ' '.join(line))
line = []
line_len = len(indent) - 1
line.append(part)
line_len += len(part) + 1
if line:
lines.append(indent + ' '.join(line))
if prefix is not None:
lines[0] = lines[0][len(indent):]
return lines
# if prog is short, follow it with optionals or positionals
if len(prefix) + len(prog) <= 0.75 * text_width:
indent = ' ' * (len(prefix) + len(prog) + 1)
if opt_parts:
lines = get_lines([prog] + opt_parts, indent, prefix)
lines.extend(get_lines(pos_parts, indent))
elif pos_parts:
lines = get_lines([prog] + pos_parts, indent, prefix)
else:
lines = [prog]
# if prog is long, put it on its own line
else:
indent = ' ' * len(prefix)
parts = opt_parts + pos_parts
lines = get_lines(parts, indent)
if len(lines) > 1:
lines = []
lines.extend(get_lines(opt_parts, indent))
lines.extend(get_lines(pos_parts, indent))
lines = [prog] + lines
# join lines into usage
usage = '\n'.join(lines)
# prefix with 'usage:'
return '%s%s\n\n' % (prefix, usage)
def _format_actions_usage(self, actions, groups):
# find group indices and identify actions in groups
group_actions = _set()
inserts = {}
for group in groups:
try:
start = actions.index(group._group_actions[0])
except ValueError:
continue
else:
end = start + len(group._group_actions)
if actions[start:end] == group._group_actions:
for action in group._group_actions:
group_actions.add(action)
if not group.required:
inserts[start] = '['
inserts[end] = ']'
else:
inserts[start] = '('
inserts[end] = ')'
for i in range(start + 1, end):
inserts[i] = '|'
# collect all actions format strings
parts = []
for i, action in enumerate(actions):
# suppressed arguments are marked with None
# remove | separators for suppressed arguments
if action.help is SUPPRESS:
parts.append(None)
if inserts.get(i) == '|':
inserts.pop(i)
elif inserts.get(i + 1) == '|':
inserts.pop(i + 1)
# produce all arg strings
elif not action.option_strings:
part = self._format_args(action, action.dest)
# if it's in a group, strip the outer []
if action in group_actions:
if part[0] == '[' and part[-1] == ']':
part = part[1:-1]
# add the action string to the list
parts.append(part)
# produce the first way to invoke the option in brackets
else:
option_string = action.option_strings[0]
# if the Optional doesn't take a value, format is:
# -s or --long
if action.nargs == 0:
part = '%s' % option_string
# if the Optional takes a value, format is:
# -s ARGS or --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
part = '%s %s' % (option_string, args_string)
# make it look optional if it's not required or in a group
if not action.required and action not in group_actions:
part = '[%s]' % part
# add the action string to the list
parts.append(part)
# insert things at the necessary indices
for i in _sorted(inserts, reverse=True):
parts[i:i] = [inserts[i]]
# join all the action items with spaces
text = ' '.join([item for item in parts if item is not None])
# clean up separators for mutually exclusive groups
open = r'[\[(]'
close = r'[\])]'
text = _re.sub(r'(%s) ' % open, r'\1', text)
text = _re.sub(r' (%s)' % close, r'\1', text)
text = _re.sub(r'%s *%s' % (open, close), r'', text)
text = _re.sub(r'\(([^|]*)\)', r'\1', text)
text = text.strip()
# return the text
return text
def _format_text(self, text):
text_width = self._width - self._current_indent
indent = ' ' * self._current_indent
return self._fill_text(text, text_width, indent) + '\n\n'
def _format_action(self, action):
# determine the required width and the entry label
help_position = min(self._action_max_length + 2,
self._max_help_position)
help_width = self._width - help_position
action_width = help_position - self._current_indent - 2
action_header = self._format_action_invocation(action)
# ho nelp; start on same line and add a final newline
if not action.help:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
# short action name; start on the same line and pad two spaces
elif len(action_header) <= action_width:
tup = self._current_indent, '', action_width, action_header
action_header = '%*s%-*s ' % tup
indent_first = 0
# long action name; start on the next line
else:
tup = self._current_indent, '', action_header
action_header = '%*s%s\n' % tup
indent_first = help_position
# collect the pieces of the action help
parts = [action_header]
# if there was help for the action, add lines of help text
if action.help:
help_text = self._expand_help(action)
help_lines = self._split_lines(help_text, help_width)
parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
for line in help_lines[1:]:
parts.append('%*s%s\n' % (help_position, '', line))
# or add a newline if the description doesn't end with one
elif not action_header.endswith('\n'):
parts.append('\n')
# if there are any sub-actions, add their help as well
for subaction in self._iter_indented_subactions(action):
parts.append(self._format_action(subaction))
# return a single string
return self._join_parts(parts)
def _format_action_invocation(self, action):
if not action.option_strings:
metavar, = self._metavar_formatter(action, action.dest)(1)
return metavar
else:
parts = []
# if the Optional doesn't take a value, format is:
# -s, --long
if action.nargs == 0:
parts.extend(action.option_strings)
# if the Optional takes a value, format is:
# -s ARGS, --long ARGS
else:
default = action.dest.upper()
args_string = self._format_args(action, default)
for option_string in action.option_strings:
parts.append('%s %s' % (option_string, args_string))
return ', '.join(parts)
def _metavar_formatter(self, action, default_metavar):
if action.metavar is not None:
result = action.metavar
elif action.choices is not None:
choice_strs = [str(choice) for choice in action.choices]
result = '{%s}' % ','.join(choice_strs)
else:
result = default_metavar
def format(tuple_size):
if isinstance(result, tuple):
return result
else:
return (result, ) * tuple_size
return format
def _format_args(self, action, default_metavar):
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs is None:
result = '%s' % get_metavar(1)
elif action.nargs == OPTIONAL:
result = '[%s]' % get_metavar(1)
elif action.nargs == ZERO_OR_MORE:
result = '[%s [%s ...]]' % get_metavar(2)
elif action.nargs == ONE_OR_MORE:
result = '%s [%s ...]' % get_metavar(2)
elif action.nargs is PARSER:
result = '%s ...' % get_metavar(1)
else:
formats = ['%s' for _ in range(action.nargs)]
result = ' '.join(formats) % get_metavar(action.nargs)
return result
def _expand_help(self, action):
params = dict(vars(action), prog=self._prog)
for name in list(params):
if params[name] is SUPPRESS:
del params[name]
if params.get('choices') is not None:
choices_str = ', '.join([str(c) for c in params['choices']])
params['choices'] = choices_str
return self._get_help_string(action) % params
def _iter_indented_subactions(self, action):
try:
get_subactions = action._get_subactions
except AttributeError:
pass
else:
self._indent()
for subaction in get_subactions():
yield subaction
self._dedent()
def _split_lines(self, text, width):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.wrap(text, width)
def _fill_text(self, text, width, indent):
text = self._whitespace_matcher.sub(' ', text).strip()
return _textwrap.fill(text, width, initial_indent=indent,
subsequent_indent=indent)
def _get_help_string(self, action):
return action.help
class RawDescriptionHelpFormatter(HelpFormatter):
"""Help message formatter which retains any formatting in descriptions.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _fill_text(self, text, width, indent):
return ''.join([indent + line for line in text.splitlines(True)])
class RawTextHelpFormatter(RawDescriptionHelpFormatter):
"""Help message formatter which retains formatting of all help text.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _split_lines(self, text, width):
return text.splitlines()
class ArgumentDefaultsHelpFormatter(HelpFormatter):
"""Help message formatter which adds default values to argument help.
Only the name of this class is considered a public API. All the methods
provided by the class are considered an implementation detail.
"""
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help:
if action.default is not SUPPRESS:
defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
if action.option_strings or action.nargs in defaulting_nargs:
help += ' (default: %(default)s)'
return help
# =====================
# Options and Arguments
# =====================
def _get_action_name(argument):
if argument is None:
return None
elif argument.option_strings:
return '/'.join(argument.option_strings)
elif argument.metavar not in (None, SUPPRESS):
return argument.metavar
elif argument.dest not in (None, SUPPRESS):
return argument.dest
else:
return None
class ArgumentError(Exception):
"""An error from creating or using an argument (optional or positional).
The string value of this exception is the message, augmented with
information about the argument that caused it.
"""
def __init__(self, argument, message):
self.argument_name = _get_action_name(argument)
self.message = message
def __str__(self):
if self.argument_name is None:
format = '%(message)s'
else:
format = 'argument %(argument_name)s: %(message)s'
return format % dict(message=self.message,
argument_name=self.argument_name)
# ==============
# Action classes
# ==============
class Action(_AttributeHolder):
"""Information about how to convert command line strings to Python objects.
Action objects are used by an ArgumentParser to represent the information
needed to parse a single argument from one or more strings from the
command line. The keyword arguments to the Action constructor are also
all attributes of Action instances.
Keyword Arguments:
- option_strings -- A list of command-line option strings which
should be associated with this action.
- dest -- The name of the attribute to hold the created object(s)
- nargs -- The number of command-line arguments that should be
consumed. By default, one argument will be consumed and a single
value will be produced. Other values include:
- N (an integer) consumes N arguments (and produces a list)
- '?' consumes zero or one arguments
- '*' consumes zero or more arguments (and produces a list)
- '+' consumes one or more arguments (and produces a list)
Note that the difference between the default and nargs=1 is that
with the default, a single value will be produced, while with
nargs=1, a list containing a single value will be produced.
- const -- The value to be produced if the option is specified and the
option uses an action that takes no values.
- default -- The value to be produced if the option is not specified.
- type -- The type which the command-line arguments should be converted
to, should be one of 'string', 'int', 'float', 'complex' or a
callable object that accepts a single string argument. If None,
'string' is assumed.
- choices -- A container of values that should be allowed. If not None,
after a command-line argument has been converted to the appropriate
type, an exception will be raised if it is not a member of this
collection.
- required -- True if the action must always be specified at the
command line. This is only meaningful for optional command-line
arguments.
- help -- The help string describing the argument.
- metavar -- The name to be used for the option's argument with the
help string. If None, the 'dest' value will be used as the name.
"""
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.option_strings = option_strings
self.dest = dest
self.nargs = nargs
self.const = const
self.default = default
self.type = type
self.choices = choices
self.required = required
self.help = help
self.metavar = metavar
def _get_kwargs(self):
names = [
'option_strings',
'dest',
'nargs',
'const',
'default',
'type',
'choices',
'help',
'metavar',
]
return [(name, getattr(self, name)) for name in names]
def __call__(self, parser, namespace, values, option_string=None):
raise NotImplementedError(_('.__call__() not defined'))
class _StoreAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for store actions must be > 0; if you '
'have nothing to store, actions such as store '
'true or store const may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_StoreAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_StoreConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=False,
required=False,
help=None):
super(_StoreTrueAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
required=required,
help=help)
class _StoreFalseAction(_StoreConstAction):
def __init__(self,
option_strings,
dest,
default=True,
required=False,
help=None):
super(_StoreFalseAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=False,
default=default,
required=required,
help=help)
class _AppendAction(Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
if nargs == 0:
raise ValueError('nargs for append actions must be > 0; if arg '
'strings are not supplying the value to append, '
'the append const action may be more appropriate')
if const is not None and nargs != OPTIONAL:
raise ValueError('nargs must be %r to supply const' % OPTIONAL)
super(_AppendAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=default,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(values)
setattr(namespace, self.dest, items)
class _AppendConstAction(Action):
def __init__(self,
option_strings,
dest,
const,
default=None,
required=False,
help=None,
metavar=None):
super(_AppendConstAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
metavar=metavar)
def __call__(self, parser, namespace, values, option_string=None):
items = _copy.copy(_ensure_value(namespace, self.dest, []))
items.append(self.const)
setattr(namespace, self.dest, items)
class _CountAction(Action):
def __init__(self,
option_strings,
dest,
default=None,
required=False,
help=None):
super(_CountAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
new_count = _ensure_value(namespace, self.dest, 0) + 1
setattr(namespace, self.dest, new_count)
class _HelpAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
parser.exit()
class _VersionAction(Action):
def __init__(self,
option_strings,
dest=SUPPRESS,
default=SUPPRESS,
help=None):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help)
def __call__(self, parser, namespace, values, option_string=None):
parser.print_version()
parser.exit()
class _SubParsersAction(Action):
class _ChoicesPseudoAction(Action):
def __init__(self, name, help):
sup = super(_SubParsersAction._ChoicesPseudoAction, self)
sup.__init__(option_strings=[], dest=name, help=help)
def __init__(self,
option_strings,
prog,
parser_class,
dest=SUPPRESS,
help=None,
metavar=None):
self._prog_prefix = prog
self._parser_class = parser_class
self._name_parser_map = {}
self._choices_actions = []
super(_SubParsersAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=PARSER,
choices=self._name_parser_map,
help=help,
metavar=metavar)
def add_parser(self, name, **kwargs):
# set prog from the existing prefix
if kwargs.get('prog') is None:
kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
# create a pseudo-action to hold the choice help
if 'help' in kwargs:
help = kwargs.pop('help')
choice_action = self._ChoicesPseudoAction(name, help)
self._choices_actions.append(choice_action)
# create the parser and add it to the map
parser = self._parser_class(**kwargs)
self._name_parser_map[name] = parser
return parser
def _get_subactions(self):
return self._choices_actions
def __call__(self, parser, namespace, values, option_string=None):
parser_name = values[0]
arg_strings = values[1:]
# set the parser name if requested
if self.dest is not SUPPRESS:
setattr(namespace, self.dest, parser_name)
# select the parser
try:
parser = self._name_parser_map[parser_name]
except KeyError:
tup = parser_name, ', '.join(self._name_parser_map)
msg = _('unknown parser %r (choices: %s)' % tup)
raise ArgumentError(self, msg)
# parse all the remaining options into the namespace
parser.parse_args(arg_strings, namespace)
# ==============
# Type classes
# ==============
class FileType(object):
"""Factory for creating file object types
Instances of FileType are typically passed as type= arguments to the
ArgumentParser add_argument() method.
Keyword Arguments:
- mode -- A string indicating how the file is to be opened. Accepts the
same values as the builtin open() function.
- bufsize -- The file's desired buffer size. Accepts the same values as
the builtin open() function.
"""
def __init__(self, mode='r', bufsize=None):
self._mode = mode
self._bufsize = bufsize
def __call__(self, string):
# the special argument "-" means sys.std{in,out}
if string == '-':
if 'r' in self._mode:
return _sys.stdin
elif 'w' in self._mode:
return _sys.stdout
else:
msg = _('argument "-" with mode %r' % self._mode)
raise ValueError(msg)
# all other arguments are used as file names
if self._bufsize:
return open(string, self._mode, self._bufsize)
else:
return open(string, self._mode)
def __repr__(self):
args = [self._mode, self._bufsize]
args_str = ', '.join([repr(arg) for arg in args if arg is not None])
return '%s(%s)' % (type(self).__name__, args_str)
# ===========================
# Optional and Positional Parsing
# ===========================
class Namespace(_AttributeHolder):
"""Simple object for storing attributes.
Implements equality by attribute names and values, and provides a simple
string representation.
"""
def __init__(self, **kwargs):
for name in kwargs:
setattr(self, name, kwargs[name])
def __eq__(self, other):
return vars(self) == vars(other)
def __ne__(self, other):
return not (self == other)
class _ActionsContainer(object):
def __init__(self,
description,
prefix_chars,
argument_default,
conflict_handler):
super(_ActionsContainer, self).__init__()
self.description = description
self.argument_default = argument_default
self.prefix_chars = prefix_chars
self.conflict_handler = conflict_handler
# set up registries
self._registries = {}
# register actions
self.register('action', None, _StoreAction)
self.register('action', 'store', _StoreAction)
self.register('action', 'store_const', _StoreConstAction)
self.register('action', 'store_true', _StoreTrueAction)
self.register('action', 'store_false', _StoreFalseAction)
self.register('action', 'append', _AppendAction)
self.register('action', 'append_const', _AppendConstAction)
self.register('action', 'count', _CountAction)
self.register('action', 'help', _HelpAction)
self.register('action', 'version', _VersionAction)
self.register('action', 'parsers', _SubParsersAction)
# raise an exception if the conflict handler is invalid
self._get_handler()
# action storage
self._actions = []
self._option_string_actions = {}
# groups
self._action_groups = []
self._mutually_exclusive_groups = []
# defaults storage
self._defaults = {}
# determines whether an "option" looks like a negative number
self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
# whether or not there are any optionals that look like negative
# numbers -- uses a list so it can be shared and edited
self._has_negative_number_optionals = []
# ====================
# Registration methods
# ====================
def register(self, registry_name, value, object):
registry = self._registries.setdefault(registry_name, {})
registry[value] = object
def _registry_get(self, registry_name, value, default=None):
return self._registries[registry_name].get(value, default)
# ==================================
# Namespace default settings methods
# ==================================
def set_defaults(self, **kwargs):
self._defaults.update(kwargs)
# if these defaults match any existing arguments, replace
# the previous default on the object with the new one
for action in self._actions:
if action.dest in kwargs:
action.default = kwargs[action.dest]
# =======================
# Adding argument actions
# =======================
def add_argument(self, *args, **kwargs):
"""
add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)
"""
# if no positional args are supplied or only one is supplied and
# it doesn't look like an option string, parse a positional
# argument
chars = self.prefix_chars
if not args or len(args) == 1 and args[0][0] not in chars:
kwargs = self._get_positional_kwargs(*args, **kwargs)
# otherwise, we're adding an optional argument
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
# if no default was supplied, use the parser-level default
if 'default' not in kwargs:
dest = kwargs['dest']
if dest in self._defaults:
kwargs['default'] = self._defaults[dest]
elif self.argument_default is not None:
kwargs['default'] = self.argument_default
# create the action object, and add it to the parser
action_class = self._pop_action_class(kwargs)
action = action_class(**kwargs)
return self._add_action(action)
def add_argument_group(self, *args, **kwargs):
group = _ArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
def add_mutually_exclusive_group(self, **kwargs):
group = _MutuallyExclusiveGroup(self, **kwargs)
self._mutually_exclusive_groups.append(group)
return group
def _add_action(self, action):
# resolve any conflicts
self._check_conflict(action)
# add to actions list
self._actions.append(action)
action.container = self
# index the action by any option strings it has
for option_string in action.option_strings:
self._option_string_actions[option_string] = action
# set the flag if any option strings look like negative numbers
for option_string in action.option_strings:
if self._negative_number_matcher.match(option_string):
if not self._has_negative_number_optionals:
self._has_negative_number_optionals.append(True)
# return the created action
return action
def _remove_action(self, action):
self._actions.remove(action)
def _add_container_actions(self, container):
# collect groups by titles
title_group_map = {}
for group in self._action_groups:
if group.title in title_group_map:
msg = _('cannot merge actions - two groups are named %r')
raise ValueError(msg % (group.title))
title_group_map[group.title] = group
# map each action to its group
group_map = {}
for group in container._action_groups:
# if a group with the title exists, use that, otherwise
# create a new group matching the container's group
if group.title not in title_group_map:
title_group_map[group.title] = self.add_argument_group(
title=group.title,
description=group.description,
conflict_handler=group.conflict_handler)
# map the actions to their new group
for action in group._group_actions:
group_map[action] = title_group_map[group.title]
# add container's mutually exclusive groups
# NOTE: if add_mutually_exclusive_group ever gains title= and
# description= then this code will need to be expanded as above
for group in container._mutually_exclusive_groups:
mutex_group = self.add_mutually_exclusive_group(
required=group.required)
# map the actions to their new mutex group
for action in group._group_actions:
group_map[action] = mutex_group
# add all actions to this container or their group
for action in container._actions:
group_map.get(action, self)._add_action(action)
def _get_positional_kwargs(self, dest, **kwargs):
# make sure required is not specified
if 'required' in kwargs:
msg = _("'required' is an invalid argument for positionals")
raise TypeError(msg)
# mark positional arguments as required if at least one is
# always required
if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
kwargs['required'] = True
if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
kwargs['required'] = True
# return the keyword arguments with no option strings
return dict(kwargs, dest=dest, option_strings=[])
def _get_optional_kwargs(self, *args, **kwargs):
# determine short and long option strings
option_strings = []
long_option_strings = []
for option_string in args:
# error on one-or-fewer-character option strings
if len(option_string) < 2:
msg = _('invalid option string %r: '
'must be at least two characters long')
raise ValueError(msg % option_string)
# error on strings that don't start with an appropriate prefix
if not option_string[0] in self.prefix_chars:
msg = _('invalid option string %r: '
'must start with a character %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# error on strings that are all prefix characters
if not (_set(option_string) - _set(self.prefix_chars)):
msg = _('invalid option string %r: '
'must contain characters other than %r')
tup = option_string, self.prefix_chars
raise ValueError(msg % tup)
# strings starting with two prefix characters are long options
option_strings.append(option_string)
if option_string[0] in self.prefix_chars:
if option_string[1] in self.prefix_chars:
long_option_strings.append(option_string)
# infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
dest = kwargs.pop('dest', None)
if dest is None:
if long_option_strings:
dest_option_string = long_option_strings[0]
else:
dest_option_string = option_strings[0]
dest = dest_option_string.lstrip(self.prefix_chars)
dest = dest.replace('-', '_')
# return the updated keyword arguments
return dict(kwargs, dest=dest, option_strings=option_strings)
def _pop_action_class(self, kwargs, default=None):
action = kwargs.pop('action', default)
return self._registry_get('action', action, action)
def _get_handler(self):
# determine function from conflict handler string
handler_func_name = '_handle_conflict_%s' % self.conflict_handler
try:
return getattr(self, handler_func_name)
except AttributeError:
msg = _('invalid conflict_resolution value: %r')
raise ValueError(msg % self.conflict_handler)
def _check_conflict(self, action):
# find all options that conflict with this option
confl_optionals = []
for option_string in action.option_strings:
if option_string in self._option_string_actions:
confl_optional = self._option_string_actions[option_string]
confl_optionals.append((option_string, confl_optional))
# resolve any conflicts
if confl_optionals:
conflict_handler = self._get_handler()
conflict_handler(action, confl_optionals)
def _handle_conflict_error(self, action, conflicting_actions):
message = _('conflicting option string(s): %s')
conflict_string = ', '.join([option_string
for option_string, action
in conflicting_actions])
raise ArgumentError(action, message % conflict_string)
def _handle_conflict_resolve(self, action, conflicting_actions):
# remove all conflicting options
for option_string, action in conflicting_actions:
# remove the conflicting option
action.option_strings.remove(option_string)
self._option_string_actions.pop(option_string, None)
# if the option now has no option string, remove it from the
# container holding it
if not action.option_strings:
action.container._remove_action(action)
class _ArgumentGroup(_ActionsContainer):
def __init__(self, container, title=None, description=None, **kwargs):
# add any missing keyword arguments by checking the container
update = kwargs.setdefault
update('conflict_handler', container.conflict_handler)
update('prefix_chars', container.prefix_chars)
update('argument_default', container.argument_default)
super_init = super(_ArgumentGroup, self).__init__
super_init(description=description, **kwargs)
# group attributes
self.title = title
self._group_actions = []
# share most attributes with the container
self._registries = container._registries
self._actions = container._actions
self._option_string_actions = container._option_string_actions
self._defaults = container._defaults
self._has_negative_number_optionals = \
container._has_negative_number_optionals
def _add_action(self, action):
action = super(_ArgumentGroup, self)._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
super(_ArgumentGroup, self)._remove_action(action)
self._group_actions.remove(action)
class _MutuallyExclusiveGroup(_ArgumentGroup):
def __init__(self, container, required=False):
super(_MutuallyExclusiveGroup, self).__init__(container)
self.required = required
self._container = container
def _add_action(self, action):
if action.required:
msg = _('mutually exclusive arguments must be optional')
raise ValueError(msg)
action = self._container._add_action(action)
self._group_actions.append(action)
return action
def _remove_action(self, action):
self._container._remove_action(action)
self._group_actions.remove(action)
class ArgumentParser(_AttributeHolder, _ActionsContainer):
"""Object for parsing command line strings into Python objects.
Keyword Arguments:
- prog -- The name of the program (default: sys.argv[0])
- usage -- A usage message (default: auto-generated from arguments)
- description -- A description of what the program does
- epilog -- Text following the argument descriptions
- version -- Add a -v/--version option with the given version string
- parents -- Parsers whose arguments should be copied into this one
- formatter_class -- HelpFormatter class for printing help messages
- prefix_chars -- Characters that prefix optional arguments
- fromfile_prefix_chars -- Characters that prefix files containing
additional arguments
- argument_default -- The default value for all arguments
- conflict_handler -- String indicating how to handle conflicts
- add_help -- Add a -h/-help option
"""
def __init__(self,
prog=None,
usage=None,
description=None,
epilog=None,
version=None,
parents=[],
formatter_class=HelpFormatter,
prefix_chars='-',
fromfile_prefix_chars=None,
argument_default=None,
conflict_handler='error',
add_help=True):
superinit = super(ArgumentParser, self).__init__
superinit(description=description,
prefix_chars=prefix_chars,
argument_default=argument_default,
conflict_handler=conflict_handler)
# default setting for prog
if prog is None:
prog = _os.path.basename(_sys.argv[0])
self.prog = prog
self.usage = usage
self.epilog = epilog
self.version = version
self.formatter_class = formatter_class
self.fromfile_prefix_chars = fromfile_prefix_chars
self.add_help = add_help
add_group = self.add_argument_group
self._positionals = add_group(_('positional arguments'))
self._optionals = add_group(_('optional arguments'))
self._subparsers = None
# register types
def identity(string):
return string
self.register('type', None, identity)
# add help and version arguments if necessary
# (using explicit default to override global argument_default)
if self.add_help:
self.add_argument(
'-h', '--help', action='help', default=SUPPRESS,
help=_('show this help message and exit'))
if self.version:
self.add_argument(
'-v', '--version', action='version', default=SUPPRESS,
help=_("show program's version number and exit"))
# add parent arguments and defaults
for parent in parents:
self._add_container_actions(parent)
try:
defaults = parent._defaults
except AttributeError:
pass
else:
self._defaults.update(defaults)
# =======================
# Pretty __repr__ methods
# =======================
def _get_kwargs(self):
names = [
'prog',
'usage',
'description',
'version',
'formatter_class',
'conflict_handler',
'add_help',
]
return [(name, getattr(self, name)) for name in names]
# ==================================
# Optional/Positional adding methods
# ==================================
def add_subparsers(self, **kwargs):
if self._subparsers is not None:
self.error(_('cannot have multiple subparser arguments'))
# add the parser class to the arguments if it's not present
kwargs.setdefault('parser_class', type(self))
if 'title' in kwargs or 'description' in kwargs:
title = _(kwargs.pop('title', 'subcommands'))
description = _(kwargs.pop('description', None))
self._subparsers = self.add_argument_group(title, description)
else:
self._subparsers = self._positionals
# prog defaults to the usage message of this parser, skipping
# optional arguments and with no "usage:" prefix
if kwargs.get('prog') is None:
formatter = self._get_formatter()
positionals = self._get_positional_actions()
groups = self._mutually_exclusive_groups
formatter.add_usage(self.usage, positionals, groups, '')
kwargs['prog'] = formatter.format_help().strip()
# create the parsers action and add it to the positionals list
parsers_class = self._pop_action_class(kwargs, 'parsers')
action = parsers_class(option_strings=[], **kwargs)
self._subparsers._add_action(action)
# return the created parsers action
return action
def _add_action(self, action):
if action.option_strings:
self._optionals._add_action(action)
else:
self._positionals._add_action(action)
return action
def _get_optional_actions(self):
return [action
for action in self._actions
if action.option_strings]
def _get_positional_actions(self):
return [action
for action in self._actions
if not action.option_strings]
# =====================================
# Command line argument parsing methods
# =====================================
def parse_args(self, args=None, namespace=None):
args, argv = self.parse_known_args(args, namespace)
if argv:
msg = _('unrecognized arguments: %s')
self.error(msg % ' '.join(argv))
return args
def parse_known_args(self, args=None, namespace=None):
# args default to the system args
if args is None:
args = _sys.argv[1:]
# default Namespace built from parser defaults
if namespace is None:
namespace = Namespace()
# add any action defaults that aren't present
for action in self._actions:
if action.dest is not SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not SUPPRESS:
default = action.default
if isinstance(action.default, _basestring):
default = self._get_value(action, default)
setattr(namespace, action.dest, default)
# add any parser defaults that aren't present
for dest in self._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, self._defaults[dest])
# parse the arguments and exit if there are any errors
try:
return self._parse_known_args(args, namespace)
except ArgumentError:
err = _sys.exc_info()[1]
self.error(str(err))
def _parse_known_args(self, arg_strings, namespace):
# replace arg strings that are file references
if self.fromfile_prefix_chars is not None:
arg_strings = self._read_args_from_files(arg_strings)
# map all mutually exclusive arguments to the other arguments
# they can't occur with
action_conflicts = {}
for mutex_group in self._mutually_exclusive_groups:
group_actions = mutex_group._group_actions
for i, mutex_action in enumerate(mutex_group._group_actions):
conflicts = action_conflicts.setdefault(mutex_action, [])
conflicts.extend(group_actions[:i])
conflicts.extend(group_actions[i + 1:])
# find all option indices, and determine the arg_string_pattern
# which has an 'O' if there is an option at an index,
# an 'A' if there is an argument, or a '-' if there is a '--'
option_string_indices = {}
arg_string_pattern_parts = []
arg_strings_iter = iter(arg_strings)
for i, arg_string in enumerate(arg_strings_iter):
# all args after -- are non-options
if arg_string == '--':
arg_string_pattern_parts.append('-')
for arg_string in arg_strings_iter:
arg_string_pattern_parts.append('A')
# otherwise, add the arg to the arg strings
# and note the index if it was an option
else:
option_tuple = self._parse_optional(arg_string)
if option_tuple is None:
pattern = 'A'
else:
option_string_indices[i] = option_tuple
pattern = 'O'
arg_string_pattern_parts.append(pattern)
# join the pieces together to form the pattern
arg_strings_pattern = ''.join(arg_string_pattern_parts)
# converts arg strings to the appropriate and then takes the action
seen_actions = _set()
seen_non_default_actions = _set()
def take_action(action, argument_strings, option_string=None):
seen_actions.add(action)
argument_values = self._get_values(action, argument_strings)
# error if this argument is not allowed with other previously
# seen arguments, assuming that actions that use the default
# value don't really count as "present"
if argument_values is not action.default:
seen_non_default_actions.add(action)
for conflict_action in action_conflicts.get(action, []):
if conflict_action in seen_non_default_actions:
msg = _('not allowed with argument %s')
action_name = _get_action_name(conflict_action)
raise ArgumentError(action, msg % action_name)
# take the action if we didn't receive a SUPPRESS value
# (e.g. from a default)
if argument_values is not SUPPRESS:
action(self, namespace, argument_values, option_string)
# function to convert arg_strings into an optional action
def consume_optional(start_index):
# get the optional identified at this index
option_tuple = option_string_indices[start_index]
action, option_string, explicit_arg = option_tuple
# identify additional optionals in the same arg string
# (e.g. -xyz is the same as -x -y -z if no args are required)
match_argument = self._match_argument
action_tuples = []
while True:
# if we found no optional action, skip it
if action is None:
extras.append(arg_strings[start_index])
return start_index + 1
# if there is an explicit argument, try to match the
# optional's string arguments to only this
if explicit_arg is not None:
arg_count = match_argument(action, 'A')
# if the action is a single-dash option and takes no
# arguments, try to parse more single-dash options out
# of the tail of the option string
chars = self.prefix_chars
if arg_count == 0 and option_string[1] not in chars:
action_tuples.append((action, [], option_string))
for char in self.prefix_chars:
option_string = char + explicit_arg[0]
explicit_arg = explicit_arg[1:] or None
optionals_map = self._option_string_actions
if option_string in optionals_map:
action = optionals_map[option_string]
break
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if the action expect exactly one argument, we've
# successfully matched the option; exit the loop
elif arg_count == 1:
stop = start_index + 1
args = [explicit_arg]
action_tuples.append((action, args, option_string))
break
# error if a double-dash option did not use the
# explicit argument
else:
msg = _('ignored explicit argument %r')
raise ArgumentError(action, msg % explicit_arg)
# if there is no explicit argument, try to match the
# optional's string arguments with the following strings
# if successful, exit the loop
else:
start = start_index + 1
selected_patterns = arg_strings_pattern[start:]
arg_count = match_argument(action, selected_patterns)
stop = start + arg_count
args = arg_strings[start:stop]
action_tuples.append((action, args, option_string))
break
# add the Optional to the list and return the index at which
# the Optional's string args stopped
assert action_tuples
for action, args, option_string in action_tuples:
take_action(action, args, option_string)
return stop
# the list of Positionals left to be parsed; this is modified
# by consume_positionals()
positionals = self._get_positional_actions()
# function to convert arg_strings into positional actions
def consume_positionals(start_index):
# match as many Positionals as possible
match_partial = self._match_arguments_partial
selected_pattern = arg_strings_pattern[start_index:]
arg_counts = match_partial(positionals, selected_pattern)
# slice off the appropriate arg strings for each Positional
# and add the Positional and its args to the list
for action, arg_count in zip(positionals, arg_counts):
args = arg_strings[start_index: start_index + arg_count]
start_index += arg_count
take_action(action, args)
# slice off the Positionals that we just parsed and return the
# index at which the Positionals' string args stopped
positionals[:] = positionals[len(arg_counts):]
return start_index
# consume Positionals and Optionals alternately, until we have
# passed the last option string
extras = []
start_index = 0
if option_string_indices:
max_option_string_index = max(option_string_indices)
else:
max_option_string_index = -1
while start_index <= max_option_string_index:
# consume any Positionals preceding the next option
next_option_string_index = min([
index
for index in option_string_indices
if index >= start_index])
if start_index != next_option_string_index:
positionals_end_index = consume_positionals(start_index)
# only try to parse the next optional if we didn't consume
# the option string during the positionals parsing
if positionals_end_index > start_index:
start_index = positionals_end_index
continue
else:
start_index = positionals_end_index
# if we consumed all the positionals we could and we're not
# at the index of an option string, there were extra arguments
if start_index not in option_string_indices:
strings = arg_strings[start_index:next_option_string_index]
extras.extend(strings)
start_index = next_option_string_index
# consume the next optional and any arguments for it
start_index = consume_optional(start_index)
# consume any positionals following the last Optional
stop_index = consume_positionals(start_index)
# if we didn't consume all the argument strings, there were extras
extras.extend(arg_strings[stop_index:])
# if we didn't use all the Positional objects, there were too few
# arg strings supplied.
if positionals:
self.error(_('too few arguments'))
# make sure all required actions were present
for action in self._actions:
if action.required:
if action not in seen_actions:
name = _get_action_name(action)
self.error(_('argument %s is required') % name)
# make sure all required groups had one option present
for group in self._mutually_exclusive_groups:
if group.required:
for action in group._group_actions:
if action in seen_non_default_actions:
break
# if no actions were used, report the error
else:
names = [_get_action_name(action)
for action in group._group_actions
if action.help is not SUPPRESS]
msg = _('one of the arguments %s is required')
self.error(msg % ' '.join(names))
# return the updated namespace and the extra arguments
return namespace, extras
def _read_args_from_files(self, arg_strings):
# expand arguments referencing files
new_arg_strings = []
for arg_string in arg_strings:
# for regular arguments, just add them back into the list
if arg_string[0] not in self.fromfile_prefix_chars:
new_arg_strings.append(arg_string)
# replace arguments referencing files with the file content
else:
try:
args_file = open(arg_string[1:])
try:
arg_strings = args_file.read().splitlines()
arg_strings = self._read_args_from_files(arg_strings)
new_arg_strings.extend(arg_strings)
finally:
args_file.close()
except IOError:
err = _sys.exc_info()[1]
self.error(str(err))
# return the modified argument list
return new_arg_strings
def _match_argument(self, action, arg_strings_pattern):
# match the pattern for this action to the arg strings
nargs_pattern = self._get_nargs_pattern(action)
match = _re.match(nargs_pattern, arg_strings_pattern)
# raise an exception if we weren't able to find a match
if match is None:
nargs_errors = {
None: _('expected one argument'),
OPTIONAL: _('expected at most one argument'),
ONE_OR_MORE: _('expected at least one argument'),
}
default = _('expected %s argument(s)') % action.nargs
msg = nargs_errors.get(action.nargs, default)
raise ArgumentError(action, msg)
# return the number of arguments matched
return len(match.group(1))
def _match_arguments_partial(self, actions, arg_strings_pattern):
# progressively shorten the actions list by slicing off the
# final actions until we find a match
result = []
for i in range(len(actions), 0, -1):
actions_slice = actions[:i]
pattern = ''.join([self._get_nargs_pattern(action)
for action in actions_slice])
match = _re.match(pattern, arg_strings_pattern)
if match is not None:
result.extend([len(string) for string in match.groups()])
break
# return the list of arg string counts
return result
def _parse_optional(self, arg_string):
# if it's an empty string, it was meant to be a positional
if not arg_string:
return None
# if it doesn't start with a prefix, it was meant to be positional
if not arg_string[0] in self.prefix_chars:
return None
# if it's just dashes, it was meant to be positional
if not arg_string.strip('-'):
return None
# if the option string is present in the parser, return the action
if arg_string in self._option_string_actions:
action = self._option_string_actions[arg_string]
return action, arg_string, None
# search through all possible prefixes of the option string
# and all actions in the parser for possible interpretations
option_tuples = self._get_option_tuples(arg_string)
# if multiple actions match, the option string was ambiguous
if len(option_tuples) > 1:
options = ', '.join([option_string
for action, option_string, explicit_arg in option_tuples])
tup = arg_string, options
self.error(_('ambiguous option: %s could match %s') % tup)
# if exactly one action matched, this segmentation is good,
# so return the parsed action
elif len(option_tuples) == 1:
option_tuple, = option_tuples
return option_tuple
# if it was not found as an option, but it looks like a negative
# number, it was meant to be positional
# unless there are negative-number-like options
if self._negative_number_matcher.match(arg_string):
if not self._has_negative_number_optionals:
return None
# if it contains a space, it was meant to be a positional
if ' ' in arg_string:
return None
# it was meant to be an optional but there is no such option
# in this parser (though it might be a valid option in a subparser)
return None, arg_string, None
def _get_option_tuples(self, option_string):
result = []
# option strings starting with two prefix characters are only
# split at the '='
chars = self.prefix_chars
if option_string[0] in chars and option_string[1] in chars:
if '=' in option_string:
option_prefix, explicit_arg = option_string.split('=', 1)
else:
option_prefix = option_string
explicit_arg = None
for option_string in self._option_string_actions:
if option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# single character options can be concatenated with their arguments
# but multiple character options always have to have their argument
# separate
elif option_string[0] in chars and option_string[1] not in chars:
option_prefix = option_string
explicit_arg = None
short_option_prefix = option_string[:2]
short_explicit_arg = option_string[2:]
for option_string in self._option_string_actions:
if option_string == short_option_prefix:
action = self._option_string_actions[option_string]
tup = action, option_string, short_explicit_arg
result.append(tup)
elif option_string.startswith(option_prefix):
action = self._option_string_actions[option_string]
tup = action, option_string, explicit_arg
result.append(tup)
# shouldn't ever get here
else:
self.error(_('unexpected option string: %s') % option_string)
# return the collected option tuples
return result
def _get_nargs_pattern(self, action):
# in all examples below, we have to allow for '--' args
# which are represented as '-' in the pattern
nargs = action.nargs
# the default (None) is assumed to be a single argument
if nargs is None:
nargs_pattern = '(-*A-*)'
# allow zero or one arguments
elif nargs == OPTIONAL:
nargs_pattern = '(-*A?-*)'
# allow zero or more arguments
elif nargs == ZERO_OR_MORE:
nargs_pattern = '(-*[A-]*)'
# allow one or more arguments
elif nargs == ONE_OR_MORE:
nargs_pattern = '(-*A[A-]*)'
# allow one argument followed by any number of options or arguments
elif nargs is PARSER:
nargs_pattern = '(-*A[-AO]*)'
# all others should be integers
else:
nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
# if this is an optional action, -- is not allowed
if action.option_strings:
nargs_pattern = nargs_pattern.replace('-*', '')
nargs_pattern = nargs_pattern.replace('-', '')
# return the pattern
return nargs_pattern
# ========================
# Value conversion methods
# ========================
def _get_values(self, action, arg_strings):
# for everything but PARSER args, strip out '--'
if action.nargs is not PARSER:
arg_strings = [s for s in arg_strings if s != '--']
# optional argument produces a default when not present
if not arg_strings and action.nargs == OPTIONAL:
if action.option_strings:
value = action.const
else:
value = action.default
if isinstance(value, _basestring):
value = self._get_value(action, value)
self._check_value(action, value)
# when nargs='*' on a positional, if there were no command-line
# args, use the default if it is anything other than None
elif (not arg_strings and action.nargs == ZERO_OR_MORE and
not action.option_strings):
if action.default is not None:
value = action.default
else:
value = arg_strings
self._check_value(action, value)
# single argument or optional argument produces a single value
elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
arg_string, = arg_strings
value = self._get_value(action, arg_string)
self._check_value(action, value)
# PARSER arguments convert all values, but check only the first
elif action.nargs is PARSER:
value = [self._get_value(action, v) for v in arg_strings]
self._check_value(action, value[0])
# all other types of nargs produce a list
else:
value = [self._get_value(action, v) for v in arg_strings]
for v in value:
self._check_value(action, v)
# return the converted value
return value
def _get_value(self, action, arg_string):
type_func = self._registry_get('type', action.type, action.type)
if not hasattr(type_func, '__call__'):
if not hasattr(type_func, '__bases__'): # classic classes
msg = _('%r is not callable')
raise ArgumentError(action, msg % type_func)
# convert the value to the appropriate type
try:
result = type_func(arg_string)
# TypeErrors or ValueErrors indicate errors
except (TypeError, ValueError):
name = getattr(action.type, '__name__', repr(action.type))
msg = _('invalid %s value: %r')
raise ArgumentError(action, msg % (name, arg_string))
# return the converted value
return result
def _check_value(self, action, value):
# converted value must be one of the choices (if specified)
if action.choices is not None and value not in action.choices:
tup = value, ', '.join(map(repr, action.choices))
msg = _('invalid choice: %r (choose from %s)') % tup
raise ArgumentError(action, msg)
# =======================
# Help-formatting methods
# =======================
def format_usage(self):
formatter = self._get_formatter()
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
return formatter.format_help()
def format_help(self):
formatter = self._get_formatter()
# usage
formatter.add_usage(self.usage, self._actions,
self._mutually_exclusive_groups)
# description
formatter.add_text(self.description)
# positionals, optionals and user-defined groups
for action_group in self._action_groups:
formatter.start_section(action_group.title)
formatter.add_text(action_group.description)
formatter.add_arguments(action_group._group_actions)
formatter.end_section()
# epilog
formatter.add_text(self.epilog)
# determine help from format above
return formatter.format_help()
def format_version(self):
formatter = self._get_formatter()
formatter.add_text(self.version)
return formatter.format_help()
def _get_formatter(self):
return self.formatter_class(prog=self.prog)
# =====================
# Help-printing methods
# =====================
def print_usage(self, file=None):
self._print_message(self.format_usage(), file)
def print_help(self, file=None):
self._print_message(self.format_help(), file)
def print_version(self, file=None):
self._print_message(self.format_version(), file)
def _print_message(self, message, file=None):
if message:
if file is None:
file = _sys.stderr
file.write(message)
# ===============
# Exiting methods
# ===============
def exit(self, status=0, message=None):
if message:
_sys.stderr.write(message)
_sys.exit(status)
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(_sys.stderr)
self.exit(2, _('%s: error: %s\n') % (self.prog, message)) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/thirdparty/argparse.py | argparse.py |
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:[email protected]",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main() | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/httplib2/iri2uri.py | iri2uri.py |
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "$Rev$"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
try:
import socks
except ImportError:
socks = None
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
_ssl_wrap_socket = ssl.wrap_socket
except ImportError:
def _ssl_wrap_socket(sock, key_file, cert_file):
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % response_headers.status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['Authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['Authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['Authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return socks and (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""HTTPConnection subclass that supports timeouts"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"This class allows communication via SSL."
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"Connect to a host on a given (SSL) port."
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(sock, self.key_file, self.cert_file)
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None):
"""The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name
for a disk cache. Otherwise it must be an object that supports
the same interface as FileCache."""
self.proxy_info = proxy_info
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, str):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
conn.request(method, request_uri, body, headers)
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except (socket.error, httplib.HTTPException):
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = ((response.status == 303) and (method not in ["GET", "HEAD"])) and "GET" or method
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit( _("Redirected more times than rediection_limit allows."), response, content)
elif response.status in [200, 203] and method == "GET":
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = (scheme == 'https') and HTTPSConnectionWithTimeout or HTTPConnectionWithTimeout
certs = list(self.certificates.iter(authority))
if scheme == 'https' and certs:
conn = self.connections[conn_key] = connection_type(authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout, proxy_info=self.proxy_info)
else:
conn = self.connections[conn_key] = connection_type(authority, timeout=self.timeout, proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if method in ["GET", "HEAD"] and 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, '') != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/httplib2/__init__.py | __init__.py |
#Detailed license and copyright information: http://4suite.org/COPYRIGHT
from __future__ import with_statement
SAMPLE_QUERIES_DOC = '''
= Some sample queries =
Get a list of files:
curl http://localhost:8880/collection
Add a file:
curl --request POST --data-binary "@foo.txt" --header "Content-Type: text/plain" "http://localhost:8880/collection"
Note: You might want to see the response headers. Add "-i" right after "curl" (works with any of these commands)
Get a file:
curl http://localhost:8880/collection/1
Update file content
curl --request PUT --data-binary "@foo1.txt" --header "Content-Type: text/plain" "http://localhost:8880/collection/1"
'''
__doc__ += SAMPLE_QUERIES_DOC
import sys
import os
import time
import httplib
from string import Template
from gettext import gettext as _
from contextlib import closing
from wsgiref.util import shift_path_info, request_uri
from akara.util import multipart_post_handler, wsgibase, http_method_handler
from akara.services import method_dispatcher
from akara import response
#AKARA is automatically defined at global scope for a module running within Akara
BASE = None
def akara_init(config):
global BASE
BASE = config["collection"].BASE
# Templates
four_oh_four = Template("""
<html><body>
<h1>404-ed!</h1>
The requested URL <i>$fronturl</i> was not found (<i>$backurl</i> in the target wiki).
</body></html>""")
SERVICE_ID = 'http://purl.org/akara/services/demo/collection'
DEFAULT_MOUNT = 'akara.collection'
def status_response(code):
return '%i %s'%(code, httplib.responses[code])
#
@method_dispatcher(SERVICE_ID, DEFAULT_MOUNT)
def collection_resource():
__doc__ = SAMPLE_QUERIES_DOC
return
@collection_resource.method("GET")
def get_file(environ, start_response):
'''
GETting the collection resource itself returns a simple file listing.
GETting a subsidiary resource returns the file
'''
print >> sys.stderr, 'GRIPPO', environ['PATH_INFO']
if environ['PATH_INFO'] == '/':
#Get index
start_response(status_response(httplib.OK), [("Content-Type", "text/plain")])
return '\n'.join(os.listdir(BASE)) + '\n'
resource_fname = shift_path_info(environ)
#Not needed because the shift_path_info will ignore anything after the '/' and they'll probably get a 404
#'..' will not be expanded by os.path.join
#if "/" in resource_fname:
# start_response(status_response(httplib.BAD_REQUEST), [("Content-Type", "text/plain")])
# return 'You must not include forward slashes in your request (%s)'%resource_fname
resource_path = os.path.join(BASE, resource_fname)
print >> sys.stderr, 'Getting the file at: ', resource_fname
try:
f = open(resource_path, 'rb')
#FIXME: do it chunk by chunk
rbody = f.read()
#FIXME: work out content type mappings (perhaps by file extension)
start_response(status_response(httplib.OK), [("Content-Type", "text/plain")])
return rbody
except IOError:
rbody = four_oh_four.substitute(fronturl=request_uri(environ), backurl=resource_fname)
start_response(status_response(httplib.NOT_FOUND), [("Content-Type", "text/html")])
return rbody
#
@collection_resource.method("POST")
def post_file(environ, start_response):
'''
Add a new file to the collection
'''
#Not needed because the shift_path_info will ignore anything after the '/' and they'll probably get a 404
#'..' will not be expanded by os.path.join
#if "/" in resource_fname:
# start_response(status_response(httplib.BAD_REQUEST), [("Content-Type", "text/plain")])
# return 'You must not include forward slashes in your request (%s)'%resource_fname
fname = str(int(time.time()))
#resource_fname = shift_path_info(environ)
resource_path = os.path.join(BASE, fname)
fp = open(resource_path, 'wb')
if not read_http_body_to_file(environ, start_response, fp):
return 'Content length Required'
msg = 'File created OK: %s\n'%(fname)
print >> sys.stderr, 'Creating a file at: ', resource_path
#response.add_header("Content-Length", str(len(msg)))
#FIXME: use the full URI for Location header
start_response(status_response(httplib.CREATED), [("Content-Type", "text/plain"), ("Content-Location", fname)])
return msg
#def check_auth(self, user=None, password=None):
@collection_resource.method("PUT")
def put_page(environ, start_response):
'''
'''
raise NotImplementedErr
#
CHUNKLEN = 4096
def read_http_body_to_file(environ, start_response, fp):
'''
Handle the reading of a file from an HTTP message body (file pointer from wsgi.input)
in chunks to a temporary file
Returns the file path of the resulting temp file
'''
clen = int(environ.get('CONTENT_LENGTH', None))
if not clen:
start_response(status_response(httplib.LENGTH_REQUIRED), [("Content-Type", "text/plain")])
return False
http_body = environ['wsgi.input']
while clen != 0:
chunk_len = min(CHUNKLEN, clen)
data = http_body.read(chunk_len)
if data:
fp.write(data)
clen -= chunk_len
else:
clen = 0
fp.close()
return True | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/collection.py | collection.py |
from __future__ import with_statement
import sys
import urllib, urllib2, urlparse
from cStringIO import StringIO
from gettext import gettext as _
from unicodedata import *
from amara.bindery import html
#from amara.lib.util import *
from akara.services import simple_service
from amara.writers.struct import *
NAME_REQUIRED = _("The 'name' query parameter is mandatory.")
SERVICE_ID = 'http://purl.org/akara/services/demo/unicode.charbyname'
#@simple_service('get', SERVICE_ID, 'akara.rdfa.json', 'application/json')
@simple_service('GET', SERVICE_ID, 'akara.unicode.charbyname', 'text/plain')
def charbyname(name=None):
'''
name - the Unicode character name to look up
Sample request:
curl "http://localhost:8880/akara.unicode.charbyname?name=DOUBLE+DAGGER"
'''
if name is None:
raise AssertionError(NAME_REQUIRED)
try:
return lookup(name).encode('utf-8')
except KeyError:
return ""
Q_REQUIRED = _("The 'q' query parameter is mandatory.")
UINFO_SEARCH_URL = u"http://www.fileformat.info/info/unicode/char/search.htm?preview=entity&"
SERVICE_ID = 'http://purl.org/akara/services/demo/unicode.search'
@simple_service('GET', SERVICE_ID, 'akara.unicode.search', 'application/xml')
def charsearch(q=None):
'''
name - a string to search for in Unicode information (using http://www.fileformat.info )
Sample request:
curl "http://localhost:8880/akara.unicode.search?q=dagger"
'''
if q is None:
raise AssertionError(Q_REQUIRED)
query = urllib.urlencode({"q": q})
search_url = UINFO_SEARCH_URL + query
doc = html.parse(search_url)
buf = StringIO()
structwriter(indent=u"yes", stream=buf).feed(
ROOT(
E((u'characters'),
(E(u'character', {u'see-also': urlparse.urljoin(search_url, row.td[0].a.href),
u'name': unicode(row.td[2]) },
unicode(row.td[3]))
for row in doc.xml_select(u'//*[@class="list"]//*[starts-with(@class, "row")]'))
)
))
return buf.getvalue() | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/unicodetools.py | unicodetools.py |
from datetime import datetime, timedelta
import glob
from itertools import dropwhile
import amara
from amara import bindery
from amara.tools import atomtools
from amara.thirdparty import httplib2
from amara.lib.util import first_item
from amara.thirdparty import json
from akara.services import simple_service
from akara import request, response
from akara import logger, module_config
# These come from the akara.demos.atomtools section of the Akara configuration file
ENTRIES = module_config().warn("entries", "/path/to/entry/files/*.atom",
"glob path to Atom entries")
FEED_ENVELOPE = module_config().warn("feed_envelope",
'''<feed xmlns="http://www.w3.org/2005/Atom">
<title>This is my feed</title><id>http://example.com/my_feed</id>
</feed>''', "XML envelope around the Atom entries")
#text/uri-list from RFC 2483
SERVICE_ID = 'http://purl.org/akara/services/demo/atom.json'
@simple_service('GET', SERVICE_ID, 'akara.atom.json', 'application/json')
def atom_json(url):
'''
Convert Atom syntax to Exhibit JSON
(see: http://www.ibm.com/developerworks/web/library/wa-realweb6/ ; this is based on listing 3)
Sample requests:
* curl "http://localhost:8880/akara.atom.json?url=url=http://zepheira.com/feeds/news.atom"
* curl "http://localhost:8880/akara.atom.json?url=http://picasaweb.google.com/data/feed/base/user/dysryi/albumid/5342439351589940049"
* curl "http://localhost:8880/akara.atom.json?url=http://earthquake.usgs.gov/eqcenter/catalogs/7day-M2.5.xml"
'''
entries = atomtools.ejsonize(url)
return json.dumps({'items': entries}, indent=4)
# This uses a simple caching mechanism.
# If the cache is over 15 minutes old then rebuild the cache.
DOC_CACHE = None
def _need_refresh():
if DOC_CACHE is None:
return True
if datetime.now() > DOC_CACHE[1]: # check for expiration
return True
return False
SERVICE_ID = 'http://purl.org/akara/services/demo/aggregate.atom'
@simple_service('GET', SERVICE_ID, 'akara.aggregate.atom', str(atomtools.ATOM_IMT))
def aggregate_atom():
"""Aggregate a set of Atom entries and return as an Atom feed
Sample request:
* curl "http://localhost:8880/akara.aggregate.atom"
"""
global DOC_CACHE
if _need_refresh():
filenames = glob.glob(ENTRIES)
doc, metadata = atomtools.aggregate_entries(FEED_ENVELOPE, filenames)
DOC_CACHE = doc.xml_encode('xml-indent'), datetime.now() + timedelta(minutes=15)
return DOC_CACHE[0]
# We love Atom, but for sake of practicality (and JSON fans), here is
# a transform for general feeds
SERVICE_ID = 'http://purl.org/akara/services/demo/webfeed.json'
@simple_service('GET', SERVICE_ID, 'akara.webfeed.json', 'application/json')
def webfeed_json(url):
"""Convert an Atom feed to Exhibit JSON
Sample request:
* curl "http://localhost:8880/akara.webfeed.json?url=http://feeds.delicious.com/v2/rss/recent%3Fmin=1%26count=15"
* curl http://localhost:8880/akara.webfeed.json?url=http://localhost:8880/akara.aggregate.atom
"""
import feedparser # From http://www.feedparser.org/
feed = feedparser.parse(url)
# Note: bad URLs might mean the feed doesn't have headers
def process_entry(e):
data = {
u'id': e.link,
u'label': e.link,
u'title': e.title,
u'link': e.link,
u'updated': e.updated,
}
#Optional bits
if 'content' in data:
data[u'content'] = e.content
if 'description' in data:
data[u'description'] = e.description
if 'author_detail' in data:
data[u'author_name'] = e.author_detail.name
return data
entries = [ process_entry(e) for e in feed.entries ]
return json.dumps({'items': entries}, indent=4)
RDF_IMT = 'application/rdf+xml'
ATOM_IMT = 'application/atom+xml'
# Read RSS2, and generate Atom or other format
SERVICE_ID = 'http://purl.org/akara/services/demo/rss2translate'
@simple_service('GET', SERVICE_ID, 'akara.rss2translate')
def rss2translate(url=None, format=None):
"""Convert RSS 2.0 feed to Atom or RSS 1.0
Sample request:
* curl "http://localhost:8880/akara.rss2translate?url=http://feeds.delicious.com/v2/rss/recent"
This is a demo and is not meant as an industrial-strength converter.
"""
# Support connection-negotiation in addition to query parameter
if not format:
accepted_imts = request.environ.get('HTTP_ACCEPT', '').split(',')
imt = first_item(dropwhile(lambda x: '*' in x, accepted_imts))
if imt == 'RDF_IMT':
format = 'rss1'
else:
format = 'atom'
if not url:
raise AssertionError("The 'url' query parameter is mandatory.")
import feedparser # From http://www.feedparser.org/
feed = feedparser.parse(url)
# Note: bad URLs might mean the feed doesn't have headers
logger.debug('Feed info: ' + repr((url, feed.version, feed.encoding, feed.headers.get('Content-type'))))
updated = getattr(feed.feed, 'updated_parsed', None)
if updated:
#FIXME: Double-check this conversion
updated = datetime(*updated[:7]).isoformat()
f = atomtools.feed(title=feed.feed.title, updated=updated, id=feed.feed.link)
for e in feed.entries:
updated = getattr(e, 'updated_parsed', None)
if updated:
#FIXME: Double-check this conversion
updated = datetime(*updated[:7]).isoformat()
links = [
#FIXME: self?
(e.link, u'alternate'),
]
f.append(
e.link,
e.title,
updated = updated,
summary=e.description,
#e.author_detail.name
#authors=authors,
links=links,
)
if format == 'atom':
result = f.xml_encode()
response.add_header("Content-Type", ATOM_IMT)
else:
result = f.rss1format()
response.add_header("Content-Type", RDF_IMT)
return result | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/atomtools.py | atomtools.py |
#See also: Beazley's approach: http://www.dabeaz.com/generators/Generators.pdf (slide 61 et seq)
import sys, time, re
import urllib2
import httplib
import datetime
from itertools import *
# Requires Python 2.6 or http://code.google.com/p/json/
from amara.thirdparty import json
#from amara.tools.atomtools import feed
from amara.tools import rdfascrape
from akara.services import simple_service
#def rdfa2json(url=None):
#Support POST body as well
LOGLINE_PAT = re.compile(r'(?P<origin>\d+\.\d+\.\d+\.\d+) '
+r'(?P<identd>-|\w*) (?P<auth>-|\w*) '
+r'\[(?P<date>[^\[\]:]+):(?P<time>\d+:\d+:\d+) (?P<tz>[\-\+]?\d\d\d\d)\] '
+r'(?P<request>"[^"]*") (?P<status>\d+) (?P<bytes>-|\d+) (?P<referrer>"[^"]*") (?P<client>".*")\s*\Z')
#For logs with destination host (at the beginning)
#LOGLINE_PAT = re.compile(r'(?P<targetserver>\S+) (?P<origin>\d+\.\d+\.\d+\.\d+) '
#+r'(?P<identd>-|\w*) (?P<auth>-|\w*) '
#+r'\[(?P<date>[^\[\]:]+):(?P<time>\d+:\d+:\d+) (?P<tz>[\-\+]?\d\d\d\d)\] '
#+r'(?P<request>"[^"]*") (?P<status>\d+) (?P<bytes>-|\d+) (?P<referrer>"[^"]*") (?P<client>".*")\s*\Z')
#For logs where referrer and UA may not be included
#LOGLINE_PAT = re.compile(r'(\d+\.\d+\.\d+\.\d+) (-|\w*) (-|\w*) '
#+r'\[([^\[\]:]+):(\d+:\d+:\d+) -(\d\d\d\d)\] '
#+r'("[^"]*") (\d+) (-|\d+) ("[^"]*")? (".*")?\s*\Z')
# This regular expresion is the heart of the code.
# Python uses Perl regex, so it should be readily portable
# The r'' string form is just a convenience so you don't have to escape backslashes
COMBINED_LOGLINE_PAT = re.compile(
r'(?P<origin>[^\s]+) '
+ r'(?P<identd>-|\w*) (?P<auth>-|\w*) '
+ r'\[(?P<ts>(?P<date>[^\[\]:]+):(?P<time>\d+:\d+:\d+)) (?P<tz>[\-\+]?\d\d\d\d)\] '
+ r'"(?P<method>\w+) (?P<path>[\S]+) (?P<protocol>[^"]+)" (?P<status>\d+) (?P<bytes>-|\d+)'
+ r'( (?P<referrer>"[^"]*")( (?P<client>"[^"]*")( (?P<cookie>"[^"]*"))?)?)?\s*\Z'
)
# Patterns in the client field for sniffing out bots
BOT_TRACES = [
(re.compile(r".*http://help\.yahoo\.com/help/us/ysearch/slurp.*"),
"Yahoo robot"),
(re.compile(r".*\+http://www\.google\.com/bot\.html.*"),
"Google robot"),
(re.compile(r".*\+http://about\.ask\.com/en/docs/about/webmasters.shtml.*"),
"Ask Jeeves/Teoma robot"),
(re.compile(r".*\+http://search\.msn\.com\/msnbot\.htm.*"),
"MSN robot"),
(re.compile(r".*http://www\.entireweb\.com/about/search_tech/speedy_spider/.*"),
"Speedy Spider"),
(re.compile(r".*\+http://www\.baidu\.com/search/spider_jp\.html.*"),
"Baidu spider"),
(re.compile(r".*\+http://www\.gigablast\.com/spider\.html.*"),
"Gigabot robot"),
]
# Apache's date/time format is very messy, so dealing with it is messy
# This class provides support for managing timezones in the Apache time field
# Reuses some code from: http://seehuhn.de/blog/52
class timezone(datetime.tzinfo):
def __init__(self, name="+0000"):
self.name = name
seconds = int(name[:-2])*3600+int(name[-2:])*60
self.offset = datetime.timedelta(seconds=seconds)
def utcoffset(self, dt):
return self.offset
def dst(self, dt):
return timedelta(0)
def tzname(self, dt):
return self.name
def parse_apache_date(date_str, tz_str):
'''
Parse the timestamp from the Apache log file, and return a datetime object
'''
tt = time.strptime(date_str, "%d/%b/%Y:%H:%M:%S")
tt = tt[:6] + (0, timezone(tz_str))
return datetime.datetime(*tt)
def bot_check(match_info):
'''
Return True if the matched line looks like a robot
'''
for pat, botname in BOT_TRACES:
if pat.match(match_info.group('client')):
return True
break
return False
SERVICE_ID = 'http://purl.org/akara/services/demo/wwwlog.json'
@simple_service('POST', SERVICE_ID, 'akara.wwwlog.json', 'application/json')
def wwwlog2json(body, ctype, maxrecords=None, nobots=False):
'''
Convert Apache log info to Exhibit JSON
(see: http://www.ibm.com/developerworks/web/library/wa-realweb6/ )
Sample request:
* curl --request POST --data-binary "@access.log" --header "Content-Type: text/plain" "http://localhost:8880/akara.wwwlog.json"
'''
if maxrecords:
maxrecords = int(maxrecords)
entries = []
for count, line in enumerate(body.splitlines()):
if maxrecords and count >= maxrecords:
break
match_info = COMBINED_LOGLINE_PAT.match(line)
if not match_info:
print >> sys.stderr, "Unable to parse log line: ", line
continue
if nobots and bot_check(match_info):
continue
entry = {}
timestamp = parse_apache_date(match_info.group('ts'), match_info.group('tz'))
timestamp_str = timestamp.isoformat()
# To make Exhibit happy, set id and label fields that give some information
# about the entry, but are unique across all entries (ensured by appending count)
entry['id'] = match_info.group('origin') + ':' + timestamp_str + ':' + str(count)
entry['label'] = entry['id']
entry['origin'] = match_info.group('origin')
entry['timestamp'] = timestamp_str
entry['path'] = match_info.group('path')
entry['method'] = match_info.group('method')
entry['protocol'] = match_info.group('protocol')
entry['status'] = match_info.group('status')
entry['status'] += ' ' + httplib.responses[int(entry['status'])]
if match_info.group('bytes') != '-':
entry['bytes'] = match_info.group('bytes')
if match_info.group('referrer') != '"-"':
entry['referrer'] = match_info.group('referrer')
entry['client'] = match_info.group('client')
entries.append(entry)
return json.dumps({'items': entries}, indent=4)
"""
#Geolocation support
entry['latlong'] = ''
if True:
#if locateipfor and entry['request_path'].find(locateipfor) != -1:
result = ip2geo(entry['ip'], db, log)
if result is not None:
entry.update(result)
ids.add(entry['id'])
entries.append(entry)
else:
print >> log, 'Unable to parse line: ', line
""" | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/wwwlogviewer.py | wwwlogviewer.py |
import amara
from amara.xslt import transform
from amara.xpath.util import simplify
from amara.bindery import html
from amara.lib import irihelpers, inputsource
import akara
from akara.services import simple_service
from akara import response
XSLT_SERVICE_ID = 'http://purl.org/akara/services/demo/xslt'
XPATH_SERVICE_ID = 'http://purl.org/akara/services/demo/xpath'
DEFAULT_TRANSFORM = akara.module_config().get('default_transform')
URI_SPACE = akara.module_config().get('uri_space', 'http://github.com/zepheira').split()
#print DEFAULT_TRANSFORM
#FIXME! The core URI auth code is tested, but not the use in this module
if URI_SPACE == '*':
#Allow all URI access
ALLOWED = [(True, True)]
else:
ALLOWED = []
for baseuri in URI_SPACE:
#dAd a rule that permits URIs starting with this URISPACE item
#FIXME: Technically should normalize uri and base, but this will work for most cases
ALLOWED.append((lambda uri, base=baseuri: uri.startswith(base), True))
@simple_service('POST', XSLT_SERVICE_ID, 'akara.xslt')
def akara_xslt(body, ctype, **params):
'''
@xslt - URL to the XSLT transform to be applied
all other query parameters are passed ot the XSLT processor as top-level params
Sample request:
curl --request POST --data-binary "@foo.xml" --header "Content-Type: application/xml" "http://localhost:8880/akara.xslt?@xslt=http://hg.akara.info/amara/trunk/raw-file/tip/demo/data/identity.xslt"
'''
if "@xslt" in params:
akaraxslttransform = params["@xslt"]
else:
if not DEFAULT_TRANSFORM:
raise ValueError('XSLT transform required')
akaraxslttransform = DEFAULT_TRANSFORM
restricted_resolver = irihelpers.resolver(authorizations=ALLOWED)
#Using restricted_resolver should forbid Any URI access outside the specified "jails"
#Including access through imports and includes
body = inputsource(body, resolver=restricted_resolver)
akaraxslttransform = inputsource(akaraxslttransform, resolver=restricted_resolver)
result = transform(body, akaraxslttransform)
response.add_header("Content-Type", result.parameters.media_type)
return result
@simple_service('POST', XPATH_SERVICE_ID, 'akara.xpath', 'text/xml')
def akara_xpath(body, ctype, **params):
'''
select - XPath expression to be evaluated against the document
tidy - 'yes' to tidy HTML, or 'no'
Sample request:
curl --request POST --data-binary "@foo.xml" --header "Content-Type: application/xml" "http://localhost:8880/akara.xpath?select=/html/head/title&tidy=yes"
'''
if params.get("tidy") == 'yes':
doc = html.parse(body)
else:
doc = amara.parse(body)
result = simplify(doc.xml_select(params['select'].decode('utf-8')))
return str(result) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/xslt.py | xslt.py |
import amara
from amara.thirdparty import httplib2
import akara
from akara.services import simple_service
from akara import response
from akara import logger
from akara.util import normalize_http_header_name
import calendar
import email
import email.Utils
import time
MAXLEN = akara.module_config().get('maxlen')
if None in MAXLEN:
DEFAULT_MAXLEN = MAXLEN[None]
del MAXLEN[None]
else:
DEFAULT_MAXLEN = 3600
OVERRIDE_STALE = akara.module_config().get('override_stale',0)
CACHE_PROXY_SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/cache-proxy'
MAXAGE_HEADER = lambda age: ('Cache-Control','max-age={0}'.format(age))
#FIXME: recycle after N uses
H = httplib2.Http()
def get_max_age(url):
for k in MAXLEN:
#XXX url normalize?
if url.startswith(k):
return MAXLEN[k]
break
else:
return DEFAULT_MAXLEN
def is_fresh(resp):
"""
Returns a tuple, the first element a boolean whether the response can be
considered (for our purposes) fresh or not, and the second the freshness
lifetime of the response.
Much of this is reworked from httplib2._entry_disposition. We can't reuse it
directly since it assumes responses are stale unless otherwise marked as
fresh, and we want to do the opposite.
"""
fresh = True
freshness_lifetime = 0
cc_response = httplib2._parse_cache_control(resp)
if 'no-cache' in cc_response or 'private' in cc_response:
fresh = False
elif 'date' in resp:
date = calendar.timegm(email.Utils.parsedate_tz(resp['date']))
now = time.time()
current_age = max(0, now - date - 5) # Give us 5 seconds to get this far
if 'max-age' in cc_response:
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif 'expires' in resp:
expires = email.Utils.parsedate_tz(resp['expires'])
if expires == None:
freshness_lifetime = 0
else:
freshness_lifetime = calendar.timegm(expires) - date
else:
freshness_lifetime = 0
if freshness_lifetime < current_age:
logger.debug('lifetime = {0}, age = {1}, so marking explicitly stale'.format(freshness_lifetime,current_age))
fresh = False
return fresh, freshness_lifetime
@simple_service('GET', CACHE_PROXY_SERVICE_ID, 'akara.cache-proxy')
def akara_cache_proxy(url=None):
'''
Sample request:
curl -I "http://localhost:8880/akara.cache-proxy?url=http://poemtree.com/poems/UsefulAdvice.htm"
'''
logger.debug('remote URL {0}: '.format(repr(url)))
if not url:
raise ValueError('url query parameter required')
resp, content = H.request(url)
if OVERRIDE_STALE:
response.add_header(*MAXAGE_HEADER(get_max_age(url)))
else:
(fresh, lifetime) = is_fresh(resp)
if fresh:
response.add_header(*MAXAGE_HEADER( max(get_max_age(url),lifetime) ))
else:
response.add_header(*MAXAGE_HEADER(0))
logger.debug('remote response headers {0}: '.format(repr(resp)))
#Oof. What about 'transfer-encoding' and other such headers
for k in resp:
if k not in ('server','status', 'transfer-encoding', 'content-length','cache-control','expires','date'):
response.add_header(normalize_http_header_name(k), resp[k])
#response.add_header(k, resp[k])
#FIXME: This might distort return encoding, which would of course throw off content length & encoding. Workaround for now is removal of e.g. transfer-encoding (above)
return content | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/cache_proxy.py | cache_proxy.py |
from __future__ import with_statement
import sys, time
import urllib, urlparse
import tempfile
import os
import re
import csv
import cgi
from cStringIO import StringIO
from gettext import gettext as _
from itertools import *
from functools import *
from subprocess import *
import amara
from amara.xslt import transform
from amara.xpath.util import simplify
from amara.bindery import html
from amara.lib.util import *
# Requires Python 2.6 or http://code.google.com/p/json/
from amara.thirdparty import json
import akara
from akara.services import simple_service
VAR_PAT = re.compile('VARIABLE\s+LABELS\s+(((\w+)\s+"([^"]+)"\s*)+)\.')
VAR_DEF_PAT = re.compile('(\w+)\s+"([^"]+)"')
VALUE_PAT = re.compile('VALUE\s+LABELS\s+((/(\w+)\s+(\'(\w+)\'\s+"([^"]+)"\s*)+)+)\.')
VALUE_DEF_SET_PAT = re.compile('/(\w+)\s+((\'(\w+)\'\s+"([^"]+)"\s*)+)')
VALUE_DEF_PAT = re.compile('\'(\w+)\'\s+"([^"]+)"')
VALUE_SET_TYPE = 'value_set'
VARIABLE_LABELS_TYPE = 'variable_labels'
VALUE_LABELS_TYPE = 'value_labels'
#R_SCRIPT = '''library(foreign)
#mydata <- read.spss(file='%s')
#write.csv2(mydata)
#'''
R_SCRIPT = '''library(Hmisc)
mydata <- spss.get(file='%s')
write.csv2(mydata)
'''
R_FILE_CMD = akara.module_config(__name__).get('r_command', 'r')
POR_REQUIRED = _("The 'POR' POST parameter is mandatory.")
SERVICE_ID = 'http://purl.org/akara/services/demo/spss.json'
@simple_service('POST', SERVICE_ID, 'spss.json', 'application/json')
def spss2json(body, ctype, **params):
'''
Uses GNU R to convert SPSS to JSON
Optionally tries to guess long labels from an original .SPS file
Requires POST body of multipart/form-data
Sample request:
curl -F "[email protected]" http://localhost:8880/spss.json
curl -F "[email protected]" -F "[email protected]" http://localhost:8880/spss.json
'''
#curl --request POST -F "[email protected]" -F "[email protected]" http://labs.zepheira.com:8880/spss.json
#Useful:
# * [[http://wiki.math.yorku.ca/index.php/R:_Data_conversion_from_SPSS|R: Data conversion from SPSS]]
body = StringIO(body)
form = cgi.FieldStorage(fp=body, environ=WSGI_ENVIRON)
#for k in form:
# print >> sys.stderr, (k, form[k][:100])
por = form.getvalue('POR')
assert_not_equal(por, None, msg=POR_REQUIRED)
spss = form.getvalue('SPSS')
(items, varlabels, valuelabels) = parse_spss(por, spss)
for count, item in enumerate(items):
#print >> sys.stderr, row
item['id'] = item['label'] = '_' + str(count)
item['type'] = VALUE_SET_TYPE
return json.dumps({'items': items, VARIABLE_LABELS_TYPE: varlabels, VALUE_LABELS_TYPE: valuelabels}, indent=4)
def parse_spss(spss_por, spss_syntax=None):
'''
Uses GNU R to convert SPSS to a simple Python data structure
Optionally tries to guess long labels from an original .SPS file
'''
varlabels = {}
valuelabels = {}
if spss_syntax:
matchinfo = VAR_PAT.search(spss_syntax)
if matchinfo:
#print >> sys.stderr, matchinfo.groups
defns = matchinfo.group(1)
for defn in VAR_DEF_PAT.finditer(defns):
varlabels[defn.group(1)] = defn.group(2)
matchinfo = VALUE_PAT.search(spss_syntax)
defsets = matchinfo.group(1)
for defset in VALUE_DEF_SET_PAT.finditer(defsets):
valuelabelset = {}
for defn in VALUE_DEF_PAT.finditer(defset.group(2)):
valuelabelset[defn.group(1)] = defn.group(2)
valuelabels[defset.group(1)] = valuelabelset
#print >> sys.stderr, varlabels
#print >> sys.stderr, valuelabels
#print >> sys.stderr, por[:100]
#print >> sys.stderr, spss[:100]
temp = tempfile.mkstemp(suffix=".por")
os.write(temp[0], spss_por)
cmdline = R_FILE_CMD
process = Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
csvdata, perr = process.communicate(input=R_SCRIPT%temp[1])
os.close(temp[0])
os.remove(temp[1])
if not csvdata:
print >> sys.stderr, R_SCRIPT%temp[1]
print >> sys.stderr, perr
#FIXME: L10N
raise ValueError('Empty output from the command line. Probably a failure. Command line: "%s"'%cmdline)
def value(k, v):
if k in valuelabels and v in valuelabels[k]:
return valuelabels[k][v]
else:
return v
r_reader = csv.DictReader(csvdata.splitlines(), delimiter=';')
rows = [
dict(((k, value(k, v.strip())) for (k, v) in row.iteritems()))
for row in r_reader
]
return (rows, varlabels, valuelabels) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/statstools.py | statstools.py |
import os
import stat
import mimetypes
from email.utils import formatdate
import warnings
from akara import registry
SERVICE_ID = 'http://purl.org/akara/services/demo/static'
class MediaHandler(object):
__name__ = 'MediaHandler'
def __init__(self, media_root):
media_root = os.path.abspath(media_root)
if not media_root.endswith(os.sep):
media_root += os.sep
self.media_root = media_root
def __call__(self, environ, start_response):
path_info = environ['PATH_INFO']
if path_info[:1] == '/':
path_info = path_info[1:]
filename = os.path.join(self.media_root, path_info)
# Simple security check.
# Things like "con" on Windows will mess it up.
filename = os.path.normpath(filename)
if not filename.startswith(self.media_root):
start_response('401 Unauthorized', [('Content-Type', 'text/plain')])
return ["Path is outside of the served directory"]
if not os.path.exists(filename):
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['Nothing matches the given URI']
try:
fp = open(filename, 'rb')
except IOError:
start_response('401 Unauthorized', [('Content-Type', 'text/plain')])
return ['Permission denied']
# This is a very simple implementation of conditional GET with
# the Last-Modified header. It makes media files a bit speedier
# because the files are only read off disk for the first request
# (assuming the browser/client supports conditional GET).
mtime = formatdate(os.stat(filename).st_mtime, usegmt=True)
headers = [('Last-Modified', mtime)]
if environ.get('HTTP_IF_MODIFIED_SINCE', None) == mtime:
status = '304 Not Modified'
output = ()
else:
status = '200 OK'
mime_type = mimetypes.guess_type(filename)[0]
if mime_type:
headers.append(('Content-Type', mime_type))
output = [fp.read()]
fp.close()
start_response(status, headers)
return output
import akara
if not akara.module_config():
akara.logger.warn("No configuration section found for %r" % (__name__,))
paths = akara.module_config().get("paths", {})
for path, root in paths.items():
handler = MediaHandler(root)
registry.register_service(SERVICE_ID, path, handler) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/static.py | static.py |
from __future__ import with_statement
import os
import sys
import re
import pprint
import httplib
import urllib, urllib2
import datetime
import cgi
from string import Template
from cStringIO import StringIO
from functools import partial
from itertools import *
from contextlib import closing
# Requires Python 2.6 or http://code.google.com/p/json/
from amara.thirdparty import json
from dateutil.parser import parse as dateparse
import amara
from amara import bindery, _
from amara.namespaces import *
from amara.bindery.model import generate_metadata
from amara.bindery.model.examplotron import examplotron_model
from amara.writers.struct import *
from amara.bindery.html import parse as htmlparse
from amara.lib import U
from amara.lib.iri import split_fragment, relativize, absolutize
from amara.lib.date import timezone, UTC
from amara.bindery.util import dispatcher, node_handler, property_sequence_getter
from akara.util import copy_auth
from akara.util.moin import node, ORIG_BASE_HEADER, DOCBOOK_IMT, RDF_IMT, HTML_IMT, CMS_BASE, register_node_type
from akara.services import simple_service
CMS_BASE = "asdfasd" # Get from akara_init config? XXX
#
# This part is partly obsolete, and is used to handle the Web/CMS component.
# It needs a bit of update for the more general Moin/CMS framework
# FIXME: It should actually probably go in a different file
#
class webcms_node(node):
'''
Akara CMS node, a Moin wiki page in a lightly specialized format
from which semi-structured information can be extracted
'''
NODES = {}
#Processing priority
PRIORITY = 0
@staticmethod
def factory(rest_uri, relative, outputdir):
req = urllib2.Request(rest_uri, headers={'Accept': DOCBOOK_IMT})
resp = urllib2.urlopen(req)
doc = bindery.parse(resp, standalone=True, model=MOIN_DOCBOOK_MODEL)
original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#self.original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#amara.xml_print(self.content_cache)
output = os.path.join(outputdir, relative)
parent_dir = os.path.split(output)[0]
try:
os.makedirs(parent_dir)
except OSError:
pass
metadata, first_id = metadata_dict(generate_metadata(doc))
metadata = metadata[first_id]
akara_type = first_item(first_item(metadata[u'ak-type']))
#import sys; print >> sys.stderr, 'GRIPPO', akara_type.xml_value
cls = node.NODES[akara_type.xml_value]
instance = cls(rest_uri, relative, outputdir, cache=(doc, metadata, original_wiki_base))
return instance
def __init__(self, rest_uri, relative, outputdir, cache=None):
'''
rest_uri - the full URI to the Moin/REST wrapper for this page
relative - the URI of this page relative to the Wiki base
'''
self.relative = relative
self.rest_uri = rest_uri
self.output = os.path.join(outputdir, relative)
self.outputdir = outputdir
self.cache = cache#(doc, metadata)
return
def load(self):
return
def up_to_date(self, force_update=False):
'''
Checks whether there needs to be an update of the CMS output file or folder
'''
#By default just always update
return False
if force_update:
self.load()
doc, metadata, original_wiki_base = self.cache
entrydate = dateparse(unicode(doc.article.articleinfo.revhistory.revision.date))
if entrydate.tzinfo == None: entrydate = entrydate.replace(tzinfo=UTC)
if not os.access(self.output, os.R_OK):
return False
try:
lastrev = dateparse(unicode(bindery.parse(self.output).entry.updated))
except amara.ReaderError:
return False
if lastrev.tzinfo == None: lastrev = lastrev.replace(tzinfo=UTC)
if (entrydate == lastrev):
#print >> sys.stderr, 'Not updated. Skipped...'
return False
return True
class folder(webcms_node):
AKARA_TYPE = CMS_BASE + u'/folder'
PRIORITY = 1000
def render(self):
#Copy attachments to dir
req = urllib2.Request(self.rest_uri, headers={'Accept': ATTACHMENTS_IMT})
resp = urllib2.urlopen(req)
doc = bindery.parse(resp, model=ATTACHMENTS_MODEL)
for attachment in (doc.attachments.attachment or ()):
print attachment
return
node.NODES[folder.AKARA_TYPE] = folder
class page(webcms_node):
AKARA_TYPE = CMS_BASE + u'/page'
def up_to_date(self, force_update=False):
'''
Checks whether there needs to be an update of the CMS output file or folder
'''
if force_update:
self.load()
doc, metadata, original_wiki_base = self.cache
pagedate = dateparse(unicode(doc.article.articleinfo.revhistory.revision.date))
#Note from the Moin FAQ: http://moinmo.in/MoinMoinQuestions/UsingTheWiki
#"Moin internally only uses UTC, but calculates your local time according to your UserPreferences setting on page view. If you set your timezone offset to 0, you get UTC."
#Check the behavior if using the Moin REST wrapper with user auth where that user's prefs specify TZ
if pagedate.tzinfo == None: pagedate = pagedate.replace(tzinfo=UTC)
if not os.access(self.output, os.R_OK):
return False
lastrev = datetime.datetime.utcfromtimestamp(os.stat(self.output)[stat.ST_MTIME])
#try:
# published_doc = bindery.parse(self.output)
# datestr = first_item([ m for m in published_doc.html.head.meta if m.name==u'updated']).content
# lastrev = dateparse(datestr)
#except amara.ReaderError:
# return False
if lastrev.tzinfo == None: lastrev = lastrev.replace(tzinfo=UTC)
if (lastrev > pagedate):
return True
return False
def render(self):
'''
The typical approach is along the lines of "Style-free XSLT Style Sheets"
* http://www.xml.com/pub/a/2000/07/26/xslt/xsltstyle.html
* http://www.cocooncenter.org/articles/stylefree.html
But using div/@id rather than custome elements
'''
doc, metadata, original_wiki_base = self.cache
self.content = content_handlers(original_wiki_base)
#metadata = doc.article.xml_model.generate_metadata(doc)
#import pprint
#pprint.pprint(resources)
'''
akara:type:: [[http://purl.org/dc/org/xml3k/akara/cms/folder|folder]]
title:: A page
template:: http://wiki.example.com/Site;attachment=foo.xslt ##Just XSLT for now. Plan to support other templating systems soon
link:: [[http://example.org|]] rel=...
meta:: dc:Creator value=Uche Ogbuji
script:: `...` ##preferably they'd only use linked scripts: [[myscript...]]
'''
page_id = doc.article.xml_nodeid
header = doc.article.glosslist[0]
#node_type = first_item(header.xml_select(u'glossentry[glossterm = "akara:type"]/glossdef'))
template = unicode(first_item(header.xml_select(u'glossentry[glossterm = "template"]/glossdef'))).strip()
template = os.path.join(self.outputdir, template)
title = first_item(header.xml_select(u'glossentry[glossterm = "title"]/glossdef'))
#title = resources[articleid]['title']
#sections = dict([ (unicode(s.title), s) for s in page.article.section ])
#print sections
# if unicode(g.glossterm) == u'page:header' ]
#authors = [ a
# for a in page.article.section.glosslist.glossentry
# if unicode(a.glossterm) == u'entry:authors'
#]
#title = article.xml_select(u'section[@title = ]')
#revdate = dateparse(unicode(page.article.articleinfo.revhistory.revision.date))
#if revdate.tzinfo == None: revdate = revdate.replace(tzinfo=UTC)
#Create ouput file
#print >> sys.stderr, 'Writing to ', self.output
buf = StringIO()
w = structwriter(indent=u"yes", stream=buf)
w.feed(
ROOT(
E((XHTML_NAMESPACE, u'html'), {(XML_NAMESPACE, u'xml:lang'): u'en'},
E(u'head',
E(u'title', title),
E(u'meta', {u'content': unicode(first_item(metadata[u'ak-updated'])), u'name': u'updated'}),
#E(u'link', {u'href': unicode(uri), u'rel': u'alternate', u'title': u"Permalink"}),
),
E(u'body',
(self.content.dispatch(s) for s in doc.article.section)
),
),
))
with open(self.output, 'w') as output:
#text = f.read().rstrip()
#print buf.getvalue()
transform(buf.getvalue(), template, output=output)
return
def meta(self):
#Create ouput file
doc = bindery.parse(source, model=AK_DOCBOOK_MODEL)
node.NODES[page.AKARA_TYPE] = page
#AKARA_TYPES = [page, folder]
class content_handlers(dispatcher):
def __init__(self, orig_wikibase):
dispatcher.__init__(self)
self.orig_wikibase = orig_wikibase
return
@node_handler(u'article/section', priority=10)
def top_section(self, node):
yield E((XHTML_NAMESPACE, u'div'), {u'id': node.title},
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'section')
def section(self, node):
depth = node.xml_select(u'count(ancestor::section)')
yield E((XHTML_NAMESPACE, u'h%i'%depth), unicode(node.title))
for node in chain(*imap(self.dispatch, node.xml_children)):
yield node
@node_handler(u'section/title')
def section_title(self, node):
#Ignore this node
raise StopIteration
@node_handler(u'para')
def para(self, node):
#print 'content_handlers.para'
yield E((XHTML_NAMESPACE, u'p'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'code')
def code(self, node):
yield E((XHTML_NAMESPACE, u'code'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'emphasis')
def emphasis(self, node):
ename = u'strong' if node.xml_attributes.get((None, u'role')) == u'strong' else u'em'
yield E((XHTML_NAMESPACE, ename),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'screen')
def screen(self, node):
yield E((XHTML_NAMESPACE, u'pre'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'ulink')
def a(self, node):
'''
[[Some_page]] -> @url == $WIKIBASE/Some_page
[[Some_page/Child]] -> @url == $WIKIBASE/Some_page/Child
[[http://moinmo.in/]] -> @url == http://moinmo.in/
'''
url = node.url
if url.startswith(self.orig_wikibase):
url = url[len(self.orig_wikibase):]
yield E((XHTML_NAMESPACE, u'a'), {u'href': url},
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'inlinemediaobject')
def img(self, node):
'''
{{http://static.moinmo.in/logos/moinmoin.png}} -> img/@src=...
'''
url = node.imageobject.imagedata.fileref
if url.startswith(self.orig_wikibase):
url = url[len(self.orig_wikibase):]
yield E((XHTML_NAMESPACE, u'img'), {u'src': url},
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'itemizedlist')
def ul(self, node):
'''
* foo
'''
yield E((XHTML_NAMESPACE, u'ul'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'listitem')
def li(self, node):
yield E((XHTML_NAMESPACE, u'li'),
chain(*imap(self.dispatch, [ grandchild for grandchild in node.para.xml_children ]))
)
#@node_handler(u'*', priority=-1)
#def etc(self, node):
def moincms(wikibase, outputdir, pattern):
if pattern: pattern = re.compile(pattern)
#print (wikibase, outputdir, rewrite)
req = urllib2.Request(wikibase, headers={'Accept': RDF_IMT})
resp = urllib2.urlopen(req)
original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
feed = bindery.parse(resp)
process_list = []
for item in feed.RDF.channel.items.Seq.li:
uri = split_fragment(item.resource)[0]
#print >> sys.stderr, (uri, str(item.resource), split_fragment(item.resource))
#Deal with the wrapped URI
if original_wiki_base:
#print >> sys.stderr, (uri, original_wiki_base.rstrip('/')+'/')
relative = relativize(uri, original_wiki_base.rstrip('/')+'/').lstrip('/')
uri = absolutize(relative, wikibase)
#print >> sys.stderr, (uri, relative)
if pattern and not pattern.match(relative):
continue
n = node.factory(uri, relative, outputdir)
if n.up_to_date():
pass
#print >> sys.stderr, 'Up to date. Skipped...'
else:
process_list.append(n)
#Process nodes needing update according to priority
for n in sorted(process_list, key=attrgetter('PRIORITY'), reverse=True):
#print >> sys.stderr, "processing ", n.rest_uri
n.render()
return
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
import sys
import SocketServer
from wsgiref import simple_server
def command_line_prep():
from optparse import OptionParser
usage = "%prog [options] wikibase outputdir"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--pattern",
action="store", type="string", dest="pattern",
help="limit the pages treated as Atom entries to those matching this pattern")
return parser
def main(argv=None):
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
wikibase = args[0]
try:
outputdir = args[1]
except IndexError:
optparser.error("Missing output directory")
except IndexError:
optparser.error("Missing Wiki base URL")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
pattern = options.pattern and options.pattern.decode('utf-8')
moincms(wikibase, outputdir, pattern)
return
#aname = partial(property_sequence_getter, u"name")
#aemail = partial(property_sequence_getter, u"email")
#auri = partial(property_sequence_getter, u"uri")
UNSUPPORTED_IN_FILENAME = re.compile('\W')
#SOURCE = AKARA_MODULE_CONFIG['source-wiki-root']
#POST_TO = AKARA_MODULE_CONFIG['post-to']
class atom_entry(node):
AKARA_TYPE = CMS_BASE + u'/atom-entry'
OUTPUTPATTERN = None
def __init__(self, rest_uri, relative, outputdir, cache=None):
node.__init__(self, rest_uri, relative, outputdir, cache)
self.relative = relative
return
def up_to_date(self, force_update=False):
'''
Checks whether there needs to be an update of the CMS output file or folder
'''
doc, metadata, original_wiki_base = self.cache
entrydate = dateparse(unicode(doc.article.articleinfo.revhistory.revision.date))
if entrydate.tzinfo == None: entrydate = entrydate.replace(tzinfo=UTC)
output = os.path.join(outputdir, self.OUTPUTPATTERN%pathsegment(relative))
if os.access(output, os.R_OK):
lastrev = dateparse(unicode(bindery.parse(output).entry.updated))
if lastrev.tzinfo == None: lastrev = lastrev.replace(tzinfo=UTC)
if (entrydate == lastrev):
print >> sys.stderr, 'Not updated. Skipped...'
# continue
return
if force_update:
self.load()
pagedate = dateparse(unicode(doc.article.articleinfo.revhistory.revision.date))
#Note from the Moin FAQ: http://moinmo.in/MoinMoinQuestions/UsingTheWiki
#"Moin internally only uses UTC, but calculates your local time according to your UserPreferences setting on page view. If you set your timezone offset to 0, you get UTC."
#Check the behavior if using the Moin REST wrapper with user auth where that user's prefs specify TZ
if pagedate.tzinfo == None: pagedate = pagedate.replace(tzinfo=UTC)
if not os.access(self.output, os.R_OK):
return False
lastrev = datetime.datetime.utcfromtimestamp(os.stat(self.output)[stat.ST_MTIME])
#try:
# published_doc = bindery.parse(self.output)
# datestr = first_item([ m for m in published_doc.html.head.meta if m.name==u'updated']).content
# lastrev = dateparse(datestr)
#except amara.ReaderError:
# return False
if lastrev.tzinfo == None: lastrev = lastrev.replace(tzinfo=UTC)
if (lastrev > pagedate):
return True
return False
def render(self):
'''
The typical approach is along the lines of "Style-free XSLT Style Sheets"
* http://www.xml.com/pub/a/2000/07/26/xslt/xsltstyle.html
* http://www.cocooncenter.org/articles/stylefree.html
But using div/@id rather than custome elements
'''
doc, metadata, original_wiki_base = self.cache
self.content = content_handlers(original_wiki_base)
#metadata = doc.article.xml_model.generate_metadata(doc)
#import pprint
#pprint.pprint(resources)
'''
akara:type:: [[http://purl.org/xml3k/akara/xmlmodel/cms/folder|folder]]
title:: A page
template:: http://wiki.example.com/Site;attachment=foo.xslt ##Just XSLT for now. Plan to support other templating systems soon
link:: [[http://example.org|]] rel=...
meta:: dc:Creator value=Uche Ogbuji
script:: `...` ##preferably they'd only use linked scripts: [[myscript...]]
'''
page_id = doc.article.xml_nodeid
header = doc.article.glosslist[0]
#node_type = first_item(header.xml_select(u'glossentry[glossterm = "akara:type"]/glossdef'))
template = unicode(first_item(header.xml_select(u'glossentry[glossterm = "template"]/glossdef'))).strip()
template = os.path.join(self.outputdir, template)
title = first_item(header.xml_select(u'glossentry[glossterm = "title"]/glossdef'))
#title = resources[articleid]['title']
#sections = dict([ (unicode(s.title), s) for s in page.article.section ])
#print sections
# if unicode(g.glossterm) == u'page:header' ]
#authors = [ a
# for a in page.article.section.glosslist.glossentry
# if unicode(a.glossterm) == u'entry:authors'
#]
#title = article.xml_select(u'section[@title = ]')
#revdate = dateparse(unicode(page.article.articleinfo.revhistory.revision.date))
#if revdate.tzinfo == None: revdate = revdate.replace(tzinfo=UTC)
#Create ouput file
print >> sys.stderr, 'Writing to ', self.output
buf = StringIO()
w = structwriter(indent=u"yes", stream=buf)
w.feed(
ROOT(
E((XHTML_NAMESPACE, u'html'), {(XML_NAMESPACE, u'xml:lang'): u'en'},
E(u'head',
E(u'title', title),
E(u'meta', {u'content': U(metadata[u'ak-updated']), u'name': u'updated'}),
#E(u'link', {u'href': unicode(uri), u'rel': u'alternate', u'title': u"Permalink"}),
),
E(u'body',
(self.content.dispatch(s) for s in doc.article.section)
),
),
))
with open(self.output, 'w') as output:
#text = f.read().rstrip()
#print buf.getvalue()
transform(buf.getvalue(), template, output=output)
return
register_node_type(page.AKARA_TYPE, page)
class freemix(node):
AKARA_TYPE = 'http://purl.org/dc/gov/loc/recollection/collection'
def __init__(self, rest_uri, opener):
self.rest_uri = rest_uri
self.opener = opener
#from node.factory
req = urllib2.Request(rest_uri, headers={'Accept': DOCBOOK_IMT})
print >> sys.stderr, 'rest_uri: ', rest_uri
with closing(opener.open(req)) as resp:
doc = bindery.parse(resp, standalone=True, model=MOIN_DOCBOOK_MODEL)
original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#self.original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#amara.xml_print(self.content_cache)
metadata = metadata_dict(generate_metadata(doc))
self.cache=(doc, metadata, original_wiki_base)
return
def up_to_date(self, force_update=False):
'''
Checks whether there needs to be an update of the output
'''
return False
def render(self):
'''
'''
doc, metadata, original_wiki_base = self.cache
#metadata = doc.article.xml_model.generate_metadata(doc)
#import pprint
#pprint.pprint(resources)
#amara.xml_print(doc, stream=sys.stderr, indent=True)
header = doc.article.glosslist[0]
freemix_info = {
'id': self.rest_uri,
'label': self.rest_uri,
'title': doc.article.xml_select(u'string(section[title = "collection:title"]/para)'),
'date-created': header.xml_select(u'string(glossentry[glossterm = "date-created"]/glossdef)'),
'description': doc.article.xml_select(u'string(section[title = "collection:description"]/para)'),
'link': header.xml_select(u'string(glossentry[glossterm = "link"]/glossdef//ulink/@url)'),
'original_site': doc.article.xml_select(u'string(section[title = "collection:original site"]/para)'),
'organization': doc.article.xml_select(u'string(section[title = "collection:organization"]/para)'),
'restrictions': doc.article.xml_select(u'string(section[title = "collection:restrictions"]/para)'),
'content': doc.article.xml_select(u'string(section[title = "collection:content"]/para)'),
'thumbnail': header.xml_select(u'string(glossentry[glossterm = "thumbnail"]/glossdef//ulink/@url)'),
'tags': [ unicode(tag).strip() for tag in doc.article.xml_select(u'section[title = "collection:tags"]//para')],
}
#print >> sys.stderr, 'FINFO ', freemix_info
return freemix_info
def meta(self):
#Create ouput file
doc = bindery.parse(source, model=AK_DOCBOOK_MODEL)
node.NODES[page.AKARA_TYPE] = page
#AKARA_TYPES = [page, folder]
#print >> sys.stderr, 'Writing to ', POST_TO
SELF = AKARA.module_config.get('self', 'http://localhost:8880/')
REST_WIKI_BASE = AKARA.module_config.get('rest_wiki_base', 'http://localhost:8880/moin/loc/')
def wrapped_uri(original_wiki_base, link):
abs_link = absolutize(link, original_wiki_base)
#print >> sys.stderr, 'abs_link: ', abs_link
rel_link = relativize(abs_link, original_wiki_base)
#print >> sys.stderr, 'rel_link: ', rel_link
rest_uri = absolutize(rel_link, REST_WIKI_BASE)
#print >> sys.stderr, 'rest_uri: ', rest_uri
return rest_uri
WIKI_REQUIRED = _("The 'wiki' query parameter is mandatory.")
PATTERN_REQUIRED = _("The 'pattern' query parameter is mandatory.")
DEFAULT_TRANSFORM = AKARA.module_config.get('default_transform')
#print DEFAULT_TRANSFORM
SERVICE_ID = 'http://purl.org/akara/services/demo/moincms.execute'
@simple_service('POST', SERVICE_ID, 'moincms.execute')
def execute(top=None):
'''
Sample request:
curl -F "pattern=wiki/path" -F "wiki=http://localhost:8880/moin/foo/" "http://localhost:8880/moincms.execute"
'''
#
#wikibase_len = len(rewrite)
body = StringIO(body)
form = cgi.FieldStorage(fp=body, environ=WSGI_ENVIRON)
#for k in form:
# print >> sys.stderr, (k, form[k][:100])
wiki = form.getvalue('wiki')
assert_not_equal(wiki, None, msg=WIKI_REQUIRED)
pattern = form.getvalue('pattern')
assert_not_equal(pattern, None, msg=PATTERN_REQUIRED)
pattern = re.compile(pattern)
handler = copy_auth(WSGI_ENVIRON, top)
opener = urllib2.build_opener(handler) if handler else urllib2.build_opener()
req = urllib2.Request(wiki, headers={'Accept': RDF_IMT})
with closing(opener.open(req)) as resp:
feed = bindery.parse(resp)
original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
for item in feed.RDF.channel.items.Seq.li:
uri = split_fragment(item.resource)[0]
relative = uri[len(original_wiki_base):]
print >> sys.stderr, uri, relative
if pattern and not pattern.match(relative):
continue
if rewrite:
uri = uri.replace(rewrite, wikibase)
req = urllib2.Request(uri, headers={'Accept': DOCBOOK_IMT})
with closing(urllib2.urlopen(req)) as resp:
page = bindery.parse(resp)
print >> sys.stderr, 'Writing to ', output
with open(output, 'w') as output:
handle_page(uri, page, outputdir, relative, output)
doc = htmlparse(resp)
#print (wikibase, outputdir, rewrite)
with closing(urllib2.urlopen(req)) as resp:
raise NotImplementedError
return
#wikibase, outputdir, rewrite, pattern
#wikibase_len = len(rewrite)
items = []
for navchild in doc.xml_select(u'//*[@class="navigation"]//@href'):
link = navchild.xml_value
#print >> sys.stderr, 'LINK:', link
#uri = split_fragment(item.resource)[0]
#relative = uri[wikibase_len:]
#print >> sys.stderr, uri, relative
#if rewrite:
# uri = uri.replace(rewrite, wikibase)
rest_uri = wrapped_uri(original_wiki_base, link)
#print >> sys.stderr, 'rest uri:', rest_uri
items.append(freemix(rest_uri, opener).render())
return json.dumps({'items': items}, indent=4) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/moincms.py | moincms.py |
import re, os, time
import calendar
from datetime import date; from dateutil.relativedelta import *
from itertools import *
from wsgiref.util import shift_path_info, request_uri
from dateutil.parser import parse
import amara
from amara.lib.iri import join
from akara.services import simple_service
from akara import request
from akara import logger
from string import Template
CAL_TEMPLATE = Template('''
<table class="akaraCalCalendar" xmlns="http://www.w3.org/1999/xhtml">
<thead>
<tr class="akaraCalCalendarTopHeaders">
$prevmonth<th colspan="5">$monthname, $year</th>$nextmonth
</tr>
<tr class="akaraCalCalendarWeekHeaders">
$dayheaders
</tr>
</thead>
<tbody>
$month
</tbody>
</table>
''')
SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/calendar'
@simple_service('GET', SERVICE_ID, 'akara.calendar', 'text/html') #application/xhtml+xml
def akara_calendar(highlight=None):
'''
Return a calendar in HTML
Generates a calendar along the lines of:
< January, 2007 >
Mo Tu We Th Fr Sa Su
1 2 3 4 5
6 7 8 9 10 11 12
13 14 15 16 17 18 19
20 21 22 23 24 25 26
27 28 29 30 31
Marks present date and those that have entries with archive links
Defines the following classes (for use in CSS customization):
- akaraCalCalendar
- calendar table (note: month/year header e.g. January 2007 is in table/th)
- akaraCalCalendarWeekHeaders
- week header (Su, Mo, Tu, ...)
- akaraCalCalendarEmpty
- filler cell (e.g. days after Jan 31)
- akaraCalCalendarLive
- day for which there is an entry (also has links to that day's archives)
And the following IDs:
- akaraCalCalendarToday
- today's calendar day
- akaraCalCalendarSpecificDay
- specific day being rendered (if any)
Some ideas (e.g. CSS styling of the table) from pycalendar.py by Will Guaraldi
Sample request:
curl http://localhost:8880/akara.calendar
curl http://localhost:8880/akara.calendar/2008/12
curl http://localhost:8880/akara.calendar/2008/12?highlight=2008-12-03
'''
baseuri = request.environ['SCRIPT_NAME'] + '/'
today = date.today()
year = shift_path_info(request.environ)
month = shift_path_info(request.environ)
if highlight:
#Fun axiom: date(*map(int, date.today().isoformat().split('-')))
highlight = date(*map(int, highlight.split('-')))
if year and month:
#Use specified year & month
year, month = int(year), int(month)
if (year, month) == (today.year, today.month):
present_day = today.day
else:
present_day = None
else:
#XXX We might want to return Bad Request of they specified year but not day
#Use present year & month
year, month = today.year, today.month
present_day = today.day
#logger.debug("year: " + repr(year))
dayheaders = ''.join(
['<td>%s</td>' % dh
for dh in calendar.weekheader(3).split()]
)
monthcal = calendar.monthcalendar(year, month)
c = []
for wk in monthcal:
c.append('<tr>\n')
for d in wk:
d_int = int(d)
attrs = ''
if d_int < 1:
d = ' '
fulldate = date.max #never to be found in archives
attrs += ' class="akaraCalCalendarEmpty"'
else:
fulldate = date(year, month, d_int)
# "today" trumps "specific day"
if d_int == present_day:
attrs += ' id="akaraCalCalendarToday"'
elif highlight and d_int == highlight.day:
attrs += ' id="akaraCalCalendarSpecificDay"'
#if fulldate in archives:
# attrs += ' class="akaraCalCalendarLive"'
#d = '<a href="%s%i/%i/%s/">%s</a>'%(self.weblog_base_url, year, month, d, d)
# d = '%s'%(d)
c.append('\t<td%s>%s</td>\n' % (attrs, d))
c.append('\n</tr>\n')
monthname = calendar.month_name[month]
prevmonth = date(year, month, 1) + relativedelta(months=-1)
nextmonth = date(year, month, 1) + relativedelta(months=+1)
#Yes, even checking if prevmonth > today, so if someone surfs
#3 month in the future, there will be no month nav links
if prevmonth > today:
prevmonth = ''
else:
#prevmonth = '<th><a href="%s%i/%i/"><<</a></th>'%(self.weblog_base_url, prevmonth.year, prevmonth.month)
prevmonth = '<th><a href="%s"><<</a></th>'%(join(baseuri, str(prevmonth.year), str(prevmonth.month)))
if nextmonth > today:
nextmonth = ''
else:
nextmonth = '<th><a href="%s">>></a></th>'%(join(baseuri, str(nextmonth.year), str(nextmonth.month)))
month = ''.join(c)
cal = CAL_TEMPLATE.safe_substitute(locals())
return cal | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/calweb.py | calweb.py |
#
#Detailed license and copyright information: http://4suite.org/COPYRIGHT
from __future__ import with_statement
import os
import re
import pprint
import httplib
import urllib, urllib2
import datetime
from wsgiref.util import shift_path_info, request_uri
from string import Template
from cStringIO import StringIO
from functools import partial
from itertools import *
from contextlib import closing
from dateutil.parser import parse as dateparse
import pytz
import amara
from amara import bindery
from amara.namespaces import *
from amara.writers.struct import *
from amara.bindery.html import parse as htmlparse
from amara.lib.iri import split_fragment
from amara.bindery.util import dispatcher, node_handler, property_sequence_getter
WIKITEXT_IMT = 'text/plain'
DOCBOOK_IMT = 'application/docbook+xml'
RDF_IMT = 'application/rdf+xml'
DEFAULT_TZ = pytz.timezone('UTC')
import sys
import SocketServer
from wsgiref import simple_server
#aname = partial(property_sequence_getter, u"name")
#aemail = partial(property_sequence_getter, u"email")
#auri = partial(property_sequence_getter, u"uri")
class author(object):
def __init__(self, para):
self.name = unicode(para.ulink)
self.email = unicode(para.ulink[1])
self.uri = para.ulink.url
return
UNSUPPORTED_IN_FILENAME = re.compile('\W')
LINK_PATTERN = u'http://zepheira.com/publications/news/#%s'
def pathsegment(relative):
return UNSUPPORTED_IN_FILENAME.sub('_', relative)
def handle_page(uri, page, outputdir, relative, output):
#tags = [u"xml", u"python", u"atom"]
print >> sys.stderr, 'Processing ', uri
title = unicode(page.article.section[0].title)
sections = dict([ (unicode(s.title), s) for s in page.article.section ])
#print sections
summary = sections["entry:summary"]
content = sections["entry:content"]
tags = [ g for g in page.article.section.glosslist.glossentry if unicode(g.glossterm) == u'entry:tags' ]
if tags: tags = [ gd.para.xml_select(u'string(.)').strip() for gd in tags[0].glossdef ]
authors = [ a
for a in page.article.section.glosslist.glossentry
if unicode(a.glossterm) == u'entry:authors'
]
if authors: authors = [ author(gd.para) for gd in authors[0].glossdef ]
#title = article.xml_select(u'section[@title = ]')
revdate = dateparse(unicode(page.article.articleinfo.revhistory.revision.date))
if revdate.tzinfo == None: revdate = revdate.replace(tzinfo=DEFAULT_TZ)
eid = LINK_PATTERN%unicode(uri.rsplit(u'/')[-1])
w = structwriter(indent=u"yes", stream=output)
w.feed(
ROOT(
E((ATOM_NAMESPACE, u'entry'), {(XML_NAMESPACE, u'xml:lang'): u'en'},
#E(u'link', {u'href': u'/blog'}),
E(u'link', {u'href': unicode(uri), u'rel': u'edit'}),
E(u'link', {u'href': eid, u'rel': u'alternate', u'title': u"Permalink"}),
E(u'id', eid),
E(u'title', title),
#FIXME: Use updated time from feed
E(u'updated', unicode(revdate.isoformat())),
#E(u'updated', datetime.datetime.now().isoformat()),
#E(u'updated', page.updated),
( E(u'category', {u'term': t}) for t in tags ),
( E(u'author',
E(u'name', a.name),
E(u'uri', a.uri),
E(u'email', a.email),
) for a in authors ),
E(u'summary', {u'type': u'xhtml'},
E((XHTML_NAMESPACE, u'div'),
CONTENT.dispatch(summary)
)
),
E(u'content', {u'type': u'xhtml'},
E((XHTML_NAMESPACE, u'div'),
CONTENT.dispatch(content)
)
),
),
))
return
class content_handlers(dispatcher):
@node_handler(u'section/title')
def section_title(self, node):
#Ignore this node
raise StopIteration
@node_handler(u'para')
def para(self, node):
#print 'content_handlers.para'
yield E((XHTML_NAMESPACE, u'p'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'code')
def code(self, node):
yield E((XHTML_NAMESPACE, u'code'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'emphasis')
def emphasis(self, node):
#print dict(node.xml_attributes)
ename = u'strong' if node.xml_attributes.get((None, u'role')) == u'strong' else u'em'
yield E((XHTML_NAMESPACE, ename),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'screen')
def screen(self, node):
yield E((XHTML_NAMESPACE, u'pre'),
chain(*imap(self.dispatch, node.xml_children))
)
@node_handler(u'ulink')
def a(self, node):
yield E((XHTML_NAMESPACE, u'a'), {u'href': node.url},
chain(*imap(self.dispatch, node.xml_children))
)
#@node_handler(u'*', priority=-1)
#def etc(self, node):
CONTENT = content_handlers()
OUTPUTPATTERN = 'MOIN.%s.atom'
def moin2atomentries(wikibase, outputdir, rewrite, pattern):
wikibase_len = len(rewrite)
if pattern: pattern = re.compile(pattern)
#print (wikibase, outputdir, rewrite)
req = urllib2.Request(wikibase, headers={'Accept': RDF_IMT})
with closing(urllib2.urlopen(req)) as resp:
feed = bindery.parse(resp)
for item in feed.RDF.channel.items.Seq.li:
uri = split_fragment(item.resource)[0]
relative = uri[wikibase_len:]
print >> sys.stderr, uri, relative
if pattern and not pattern.match(relative):
continue
if rewrite:
uri = uri.replace(rewrite, wikibase)
req = urllib2.Request(uri, headers={'Accept': DOCBOOK_IMT})
with closing(urllib2.urlopen(req)) as resp:
page = bindery.parse(resp)
entrydate = dateparse(unicode(page.article.articleinfo.revhistory.revision.date))
if entrydate.tzinfo == None: entrydate = entrydate.replace(tzinfo=DEFAULT_TZ)
output = os.path.join(outputdir, OUTPUTPATTERN%pathsegment(relative))
if os.access(output, os.R_OK):
lastrev = dateparse(unicode(bindery.parse(output).entry.updated))
if lastrev.tzinfo == None: lastrev = lastrev.replace(tzinfo=DEFAULT_TZ)
if (entrydate == lastrev):
print >> sys.stderr, 'Not updated. Skipped...'
continue
print >> sys.stderr, 'Writing to ', output
with open(output, 'w') as output:
handle_page(uri, page, outputdir, relative, output)
return
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
#FIXME: A lot of this is copied boilerplate that neds to be cleaned up
def command_line_prep():
from optparse import OptionParser
usage = "%prog [options] wikibase outputdir"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--pattern",
action="store", type="string", dest="pattern",
help="limit the pages treated as Atom entries to those matching this pattern")
return parser
def main(argv=None):
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
wikibase = args[0]
try:
outputdir = args[1]
except IndexError:
optparser.error("Missing output directory")
except IndexError:
optparser.error("Missing Wiki base URL")
except SystemExit, status:
return status
rewrite = args[2] if len(args) > 1 else None
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
pattern = options.pattern and options.pattern.decode('utf-8')
moin2atomentries(wikibase, outputdir, rewrite, pattern)
return
if __name__ == "__main__":
sys.exit(main(sys.argv)) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/moin2atomentries.py | moin2atomentries.py |
from __future__ import with_statement
import sys, time
import urllib, urllib2, urlparse
from subprocess import *
import cgi
from cStringIO import StringIO
from itertools import *
from contextlib import closing
from amara import _
from amara.lib.util import *
import akara
from akara.util import copy_auth
from akara.services import simple_service
Q_REQUIRED = _("The 'q' POST parameter is mandatory.")
SVN_COMMIT_CMD = akara.module_config().get('svn_commit', 'svn commit -m "%(msg)s" %(fpath)s')
SVN_ADD_CMD = akara.module_config().get('svn_add', 'svn add %(fpath)s')
SERVICE_ID = 'http://purl.org/akara/services/demo/svncommit'
@simple_service('POST', SERVICE_ID, 'akara.svncommit', 'text/plain')
def svncommit(body, ctype, **params):
'''Commit a file. Can optionally populate the file contents from a given URL.
The form parameters are:
fpath - the name of the file to commit to SVN
msg - the commit message
q (optional) - fetch the given URL and save it to the specified file before commmiting
The form must be POSTed as multipart/form-data. If the request includes
the 'q' parameter then the new fetch will contain authentication
forward
Sample request:
curl -F "msg=fixed a typo" -F fpath="/path/to/file" -F "q=http://example.org/content" http://localhost:8880/akara.svncommit
'''
body = StringIO(body)
form = cgi.FieldStorage(fp=body, environ=WSGI_ENVIRON)
#for k in form:
# print >> sys.stderr, (k, form.getvalue(k)[:100])
q = form.getvalue('q')
fpath = form.getvalue('fpath')
msg = form.getvalue('msg')
#assert_not_equal(q, None, msg=Q_REQUIRED)
if q:
handler = copy_auth(WSGI_ENVIRON, q)
opener = urllib2.build_opener(handler) if handler else urllib2.build_opener()
req = urllib2.Request(q)
with closing(opener.open(req)) as resp:
result = resp.read()
ctype = dict(resp.info()).get('Content-Type')
with closing(open(fpath, 'w')) as f:
f.write(result)
cmdline = SVN_COMMIT_CMD%{'msg': msg, 'fpath': fpath}
print >> sys.stderr, 'Executing subprocess in shell: ', cmdline
process = Popen(cmdline, stdout=PIPE, universal_newlines=True, shell=True)
output, perr = process.communicate()
return 'SVN commit OK\n'
URL_REQUIRED = _("The 'URL' POST parameter is mandatory.")
SERVICE_ID = 'http://purl.org/akara/services/demo/svncheckout'
@simple_service('GET', SERVICE_ID, 'akara.svncheckout')
def svncheckout(url=None):
'''
url -
Sample request:
curl "http://localhost:8880/akara.svncheckout?url=http://zepheira.com"
'''
# Requires Python 2.6 or http://code.google.com/p/json/
from amara.thirdparty import json
ids = set()
if url is None:
raise AssertionError(URL_REQUIRED)
with closing(urllib2.urlopen(url)) as resp:
content = resp.read()
resources = rdfaparse(content)
return json.dumps({'items': resources}, indent=4) | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/svntools.py | svntools.py |
import sys
from datetime import datetime, timedelta
import cgi
import sqlite3
from itertools import dropwhile
import amara
from amara import bindery
from amara.tools import atomtools
from amara.thirdparty import httplib2
from amara.lib.util import first_item
from akara.services import simple_service
from akara import request, response
from akara import logger
#akara.util.pubsubhubbub provides tools for creating subscribers
DBFILE = AKARA.module_config['dbfile']
#http://copia.posterous.com/quotidie-eliot-milton-and-ars-versificandi
def ensuredb():
logger.debug('DBFILE: ' + repr(DBFILE))
db = sqlite3.connect(DBFILE)
try:
db.execute("select count(*) from subscription")
except sqlite3.OperationalError:
# Create table
#db.execute('''create table subscriber
#(topic text, callback text, added timestamp)''')
db.execute('''create table subscription
(callback text, topic text, added timestamp)''')
#(topic text, latlong text, city text, state text, country text, updated timestamp)''')
db.commit()
return db
def dict2obj(d):
'''
>>> p = dict2obj({'spam': 1, 'eggs': 2, 'bacon': 0})
>>> p.spam
1
>>> p.eggs
2
'''
#FIXME: use Bindery rules for handling problem dict keys
class oclass(object): pass
o = oclass()
for k, v in d.iteritems():
setattr(o, k, v)
return o
SERVICE_ID = 'http://purl.org/akara/services/demo/pubsubhubbub.sub'
@simple_service('POST', SERVICE_ID, 'akara.pubsubhubbub.sub')
def pubsubhubbub_sub(body, ctype):
'''
Sample requests:
* curl --data "mode=subscribe&callback=http%3A%2F%2Flocalhost%3A8880%2Fpshbtestcb&topic=http%3A%2F%2Flocalhost%3A8880%2Fpshbtesttop" "http://localhost:8880/akara.pubsubhubbub.sub"
sqlite3 /tmp/pubsubhubbub.db 'select * from subscription;'
'''
'''
import httplib, urllib
params = urllib.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0}) #-> application/x-www-form-urlencoded
'''
logger.debug('parsed: ' + repr(dict(cgi.parse_qsl(body))))
hub = dict2obj(dict(cgi.parse_qsl(body)))
db = ensuredb()
if hub.mode == 'unsubscribe':
resp = db.execute("delete from subscription where callback=? and topic=?", (hub.callback, hub.topic))
logger.debug('accepted_imts: unsubscribe' + repr(resp))
db.commit()
elif hub.mode == 'subscribe':
resp = db.execute("insert into subscription values (?, ?, ?)", (hub.callback, hub.topic, datetime.now(),))
logger.debug('accepted_imts: subscribe' + repr(resp))
db.commit()
#hub.verify
#hub.lease_seconds
#hub.secret
#hub.verify_token
response.code = 204 #Or 202 for deferred verification
return ''
@simple_service('POST', SERVICE_ID, 'akara.pubsubhubbub.hub')
def pubsubhubbub_hub(body, ctype):
'''
Sample requests:
* curl --data "mode=subscribe&callback=http%3A%2F%2Flocalhost%3A8880%2Fpshbtestcb&topic=http%3A%2F%2Flocalhost%3A8880%2Fpshbtesttop" "http://localhost:8880/akara.pubsubhubbub.hub"
* curl --data "mode=publish&callback=http%3A%2F%2Flocalhost%3A8880%2Fpshbtestcb&topic=http%3A%2F%2Flocalhost%3A8880%2Fpshbtesttop" "http://localhost:8880/akara.pubsubhubbub.hub"
'''
logger.debug('parsed: ' + repr(dict(cgi.parse_qsl(body))))
hub = dict2obj(dict(cgi.parse_qsl(body)))
db = ensuredb()
if hub.mode == 'subscribe':
#Request from a subscriber to subscribe to a topic of interest
resp = db.execute("insert into subscription values (?, ?, ?)", (hub.callback, hub.topic, datetime.now(),))
logger.debug('accepted_imts: subscribe' + repr(resp))
db.commit()
if hub.mode == 'publish':
#Ping notification from a publisher whose topics have changed.
resp = db.execute("insert into subscription values (?, ?, ?)", (hub.callback, hub.topic, datetime.now(),))
logger.debug('accepted_imts: subscribe' + repr(resp))
db.commit()
#hub.verify
#hub.lease_seconds
#hub.secret
#hub.verify_token
response.code = 204 #Or 202 for deferred verification
return '' | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/pubsubhubbub.py | pubsubhubbub.py |
from __future__ import with_statement
import sys, time
import datetime
import urllib, urlparse
from cgi import parse_qs
from itertools import *
import amara
from amara import bindery
from amara import tree
from amara.writers.struct import *
from amara.bindery.model import examplotron_model, generate_metadata
from amara.lib import U
from amara.tools.atomtools import feed
from akara.services import simple_service
OAI_NAMESPACE = u"http://www.openarchives.org/OAI/2.0/"
#OAI-PMH verbs:
# * Identify
# * ListMetadataFormats
# * ListSets
# * GetRecord
# * ListIdentifiers
# * ListRecords
#Useful:
# http://www.nostuff.org/words/tag/oai-pmh/
# http://libraries.mit.edu/dspace-mit/about/faq.html
# http://wiki.dspace.org/index.php/OaiInstallations - List of OAI installations harvested by DSpace
#Examples:
# http://eprints.sussex.ac.uk/perl/oai2?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:eprints.sussex.ac.uk:67
# http://dspace.mit.edu/oai/request?verb=Identify
# http://dspace.mit.edu/oai/request?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:dspace.mit.edu:1721.1/5451
#Based on: http://dspace.mit.edu/oai/request?verb=GetRecord&metadataPrefix=oai_dc&identifier=oai:dspace.mit.edu:1721.1/5451
OAI_MODEL_XML = '''<?xml version="1.0" encoding="UTF-8"?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:o="http://www.openarchives.org/OAI/2.0/"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd"
xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel">
<responseDate>2009-03-30T06:09:23Z</responseDate>
<request verb="GetRecord" identifier="oai:dspace.mit.edu:1721.1/5451" metadataPrefix="oai_dc">http://dspace.mit.edu/oai/request</request>
<GetRecord>
<record ak:resource="o:header/o:identifier">
<header>
<identifier>oai:dspace.mit.edu:1721.1/5451</identifier>
<datestamp ak:rel="local-name()" ak:value=".">2006-09-20T00:15:44Z</datestamp>
<setSpec>hdl_1721.1_5443</setSpec>
</header>
<metadata>
<oai_dc:dc xmlns:oai_dc="http://www.openarchives.org/OAI/2.0/oai_dc/" xmlns:dc="http://purl.org/dc/elements/1.1/" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai_dc/ http://www.openarchives.org/OAI/2.0/oai_dc.xsd">
<dc:creator ak:rel="local-name()" ak:value=".">Cohen, Joshua</dc:creator>
<dc:date ak:rel="local-name()" ak:value=".">2004-08-20T19:48:34Z</dc:date>
<dc:date>2004-08-20T19:48:34Z</dc:date>
<dc:date>1991</dc:date>
<dc:identifier ak:rel="'handle'" ak:value=".">http://hdl.handle.net/1721.1/5451</dc:identifier>
<dc:description ak:rel="local-name()" ak:value=".">Cohen's Comments on Adam Przeworski's article "Could We Feed Everyone?"</dc:description>
<dc:format>2146519 bytes</dc:format>
<dc:format>application/pdf</dc:format>
<dc:language>en_US</dc:language>
<dc:publisher ak:rel="local-name()" ak:value=".">Politics and Society</dc:publisher>
<dc:title ak:rel="local-name()" ak:value=".">"Maximizing Social Welfare or Institutionalizing Democratic Ideals?"</dc:title>
<dc:type>Article</dc:type>
<dc:identifier>Joshua Cohen, "Maximizing Social Welfare or Institutionalizing Democratic Ideals?"; Politics and Society, Vol. 19, No. 1</dc:identifier>
</oai_dc:dc>
</metadata>
</record>
</GetRecord>
</OAI-PMH>
'''
OAI_MODEL = examplotron_model(OAI_MODEL_XML)
ATOM_ENVELOPE = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom" xmlns:sd="http://kds.elsevier.com/datamodel/sciencedirect#" xmlns:os="http://a9.com/-/spec/opensearch/1.1/">
<title>MIT DSpace</title>
<id>http://dspace.mit.edu/</id>
</feed>
'''
SERVICE_ID = 'http://purl.org/akara/services/demo/oai.json'
@simple_service('GET', SERVICE_ID, 'akara.oai.atom', 'application/atom+xml')
def atomize_oai_record(endpoint=None, id=None):
'''
endpoint - the OAI request URL, e.g. http://dspace.mit.edu/oai/request
id, e.g. the article ID, e.g. oai:dspace.mit.edu:1721.1/5451
Sample request:
curl "http://localhost:8880/akara.oai.atom?endpoint=http://dspace.mit.edu/oai/request&id=oai:dspace.mit.edu:1721.1/5451"
'''
if endpoint is None:
raise ValueError('endpoint required')
if id is None:
raise ValueError('id required')
qstr = urllib.urlencode({'verb' : 'GetRecord', 'metadataPrefix': 'oai_dc', 'identifier': id})
url = endpoint + '?' + qstr
doc = bindery.parse(url, model=OAI_MODEL)
resources = metadata_dict(generate_metadata(doc))
#print resources
f = feed(ATOM_ENVELOPE)
#f = feed(ATOM_ENVELOPE, title=resources['title'], id=resources['id'])
#f.source.feed.xml_append(E((ATOM_NAMESPACE, u'link'), {u'rel': u'self', u'type': u'application/atom+xml', u'href': self_link.decode('utf-8')}))
#f.source.feed.xml_append(E((ATOM_NAMESPACE, u'link'), {u'rel': u'search', u'type': u'application/opensearchdescription+xml', u'href': u'http://kds-kci.zepheira.com/sciencedirect.discovery'}))
#f.source.feed.xml_append(E((ATOM_NAMESPACE, u'link'), {u'rel': u'alternate', u'type': u'text/xml', u'href': alt_link.decode('utf-8')}))
#f.source.feed.xml_append(E((OPENSEARCH_NAMESPACE, u'Query'), {u'role': u'request', u'searchTerms': search_terms.decode('utf-8')}))
#maxarticles = DEFAULT_MAX_RESULTS
maxarticles = 3
for record in islice(doc.OAI_PMH, 0, maxarticles):
resource = unicode(resources[id])
print resource
authors = [ (a, None, None) for a in unicode(resource[u'creator']) ]
links = [
(unicode(resource['handle']), u'alternate'),
]
#categories = [ (unicode(k), SD_NS+u'authorKeyword') for k in authkw(article) ]
#elements = [
# E((SD_NS, u'sd:journal-cover'), unicode(article.journalCover).strip() if hasattr(article, 'journalCover') else DEFAULT_ICON),
# E((SD_NS, u'sd:journal-name'), unicode(article.journalName)),
#]
f.append(
id,
unicode(resource['title'][0]),
updated=unicode(resource['date'][0]),
summary=unicode(resource['description'][0]),
authors=authors,
links=links,
#categories=categories,
#elements=elements,
)
return f.source.xml_encode('xml-indent') | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/oaitools.py | oaitools.py |
#Detailed license and copyright information: http://4suite.org/COPYRIGHT
from __future__ import with_statement
SAMPLE_QUERIES_DOC = '''
Some sample queries:
curl http://localhost:8880/moin/xml3k/FrontPage
curl -H "Accept: application/docbook+xml" http://localhost:8880/moin/xml3k/FrontPage
curl -H "Accept: application/rdf+xml" http://localhost:8880/moin/xml3k/FrontPage
curl -H "Accept: application/x-moin-attachments+xml" http://localhost:8880/moin/xml3k/FrontPage
curl --request PUT --data-binary "@wikicontent.txt" --header "Content-Type: text/plain" "http://localhost:8880/moin/xml3k/FooTest"
curl --request POST --data-binary "@wikicontent.txt" --header "Content-Type: text/plain" "http://localhost:8880/moin/xml3k/FooTest;attachment=wikicontent.txt"
curl --request DELETE http://localhost:8880/moin/xml3k/FrontPage
curl -u me:passwd -p --request PUT --data-binary "@wikicontent.txt" --header "Content-Type: text/plain" "http://localhost:8880/moin/xml3k/FooTest"
Get an attached page:
curl "http://localhost:8880/moin/xml3k/FooTest;attachment=wikicontent.txt"
Get a page's history:
curl http://localhost:8880/moin/xml3k/FrontPage;history
''' #' # work-around emacs' inability to parse this level of embedded quotes
__doc__ += SAMPLE_QUERIES_DOC
# Standard library imports
import sys # Used only from sys.stderr
import os
import cgi
import httplib, urllib, urllib2
from string import Template
from cStringIO import StringIO
import tempfile
from contextlib import closing
from wsgiref.util import shift_path_info, request_uri
from functools import wraps
from itertools import dropwhile
# Amara Imports
import amara
from amara import bindery
from amara.writers.struct import structencoder, E, NS, ROOT, RAW
from amara.bindery.html import parse as htmlparse
from amara.bindery.model import examplotron_model, generate_metadata
from amara.lib.util import first_item
from amara.lib.iri import absolutize, relativize, join
from amara.lib.iri import split_fragment, split_uri_ref, unsplit_uri_ref, split_authority
#from amara import inputsource
# Akara Imports
from akara import module_config, logger, response
from akara.util import multipart_post_handler, wsgibase, http_method_handler, copy_headers_to_dict
from akara.services import method_dispatcher
from akara.util import status_response, read_http_body_to_temp
from akara.util import BadTargetError, HTTPAuthorizationError, MoinAuthorizationError, UnexpectedResponseError, MoinMustAuthenticateError, MoinNotFoundError, ContentLengthRequiredError, GenericClientError, ConflictError
import akara.util.moin as moin
# ======================================================================
# Module Configruation
# ======================================================================
TARGET_WIKIS = module_config().get("targets", {})
TARGET_WIKI_OPENERS = {}
DEFAULT_OPENER = urllib2.build_opener(
urllib2.HTTPCookieProcessor(),
multipart_post_handler.MultipartPostHandler)
# Specifies the default max-age of Moin pages
CACHE_MAX_AGE = module_config().get("CACHE_MAX_AGE", None)
# Specifies a Wiki path (currently only one, FIXME) under which no caching will occur
NO_CACHE_PATHS = module_config().get("NO_CACHE_PATHS", None)
# Look at each Wiki URL and build an appropriate opener object for retrieving
# pages. If the URL includes HTTP authentication information such as
# http://user:[email protected]/mywiki, the opener is built with
# basic authentication enabled. For details, see:
#
# : HTTP basic auth: http://www.voidspace.org.uk/python/articles/urllib2.shtml#id6
for k, v in TARGET_WIKIS.items():
#The target wiki base URI must end in '/'
v = v.rstrip('/') + '/'
(scheme, authority, path, query, fragment) = split_uri_ref(v)
auth, host, port = split_authority(authority)
authority = host + ':' + port if port else host
schemeless_url = authority + path
if auth:
TARGET_WIKIS[k] = unsplit_uri_ref((scheme, authority, path, query, fragment))
auth = auth.split(':')
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# Not setting the realm for now, so use None
password_mgr.add_password(None, scheme+"://"+host+path, auth[0], auth[1])
password_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
TARGET_WIKI_OPENERS[k] = urllib2.build_opener(
password_handler,
urllib2.HTTPCookieProcessor(),
multipart_post_handler.MultipartPostHandler)
else:
TARGET_WIKI_OPENERS[k] = DEFAULT_OPENER
SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/moinrest'
DEFAULT_MOUNT = 'moin'
# ======================================================================
# Response Templates
# ======================================================================
# These template strings contain the output produced for various
# error conditions.
error_badtarget = Template("""\
404 Not Found
The requested URL $fronturl not found.
Nothing is known about moin target $target.
"""
)
error_httpforbidden = Template("""\
403 Forbidden
Request for URL $url
is being rejected by the Moin HTTP server due to bad HTTP
authentication. Check the Akara's moinrest configuration to make
sure it includes an appropriate HTTP user name and password.
"""
)
error_moinauthforbidden = Template("""\
403 Forbidden
Request for login URL $url
is being rejected by MoinMoin because the username and password
aren't recognized. Check your request to moinrest to make sure
a valid Moin username and password are being supplied.
"""
)
error_moinmustauthenticateresponse = Template("""\
401 Unauthorized
Request for URL $url
requires a valid Moin username and password.
"""
)
error_unexpectedresponse = Template("""\
500 Internal Error
Request for URL $url
failed because an unexpected HTTP status code $code was received.
$error
"""
)
error_moinnotfoundresponse = Template("""\
404 Not Found
The requested URL $fronturl not found.
The URL $backurl was not found in the target wiki.
"""
)
error_contentlengthrequired = Template("""\
411 Length Required
A POST or PUT request was made, but no data was found.
""")
# ======================================================================
# moin_error_handler
# ======================================================================
# This error handling function is what actually runs all of the WSGI
# functions implemented by the modules. It catches MoinRest specific exceptions
# and produces appropriate error responses as needed.
#
# The reason for putting this functionality in a single function is to avoid a lot
# excessive code duplication between different HTTP methods. For example,
# the handlers for each HTTP method are going to have to deal with
# many of the same error conditions, faults, and responses. Centralizing
# the handling makes it possible to deal all of the errors in just one place.
def moin_error_wrapper(wsgiapp):
@wraps(wsgiapp)
def handler(environ, start_response):
status_info = {} # Dictionary of collected status information
# Replacement for the WSGI start_response function. This merely
# collects response data in a dictionary for later use if no errors occur
def local_start_response(status, headers):
status_info['status'] = status
status_info['headers'] = headers
# Try to run the supplied WSGI handler
try:
body = wsgiapp(environ, local_start_response)
# If control reaches here, no errors. Proceed with normal WSGI response
start_response(status_info['status'],status_info['headers'])
return body
# Error handling for specifying an invalid moin target name (i.e., not configured, misspelled)
except BadTargetError,e:
start_response(status_response(httplib.NOT_FOUND), [
('Content-Type','text/plain')
])
return error_badtarget.safe_substitute(e.parms)
# Error handling for back-end HTTP authorization failure. For example,
# if the HTTP server hosting MoinMoin has rejected our requests due to
# bad HTTP authorization.
except HTTPAuthorizationError,e:
start_response(status_response(httplib.FORBIDDEN), [
('Content-Type','text/plain')
])
return error_httpforbidden.safe_substitute(e.parms)
# Error handling for MoinMoin authorization failure. This occurs
# if the user and password supplied to MoinMoin is rejected.
except MoinAuthorizationError,e:
start_response(status_response(httplib.FORBIDDEN), [
('Content-Type','text/plain')
])
return error_moinauthforbidden.safe_substitute(e.parms)
# Error handling for unexpected HTTP status codes
except UnexpectedResponseError,e:
start_response(status_response(httplib.INTERNAL_SERVER_ERROR), [
('Content-Type','text/plain')
])
return error_unexpectedresponse.safe_substitute(e.parms)
# Authentication required by MoinMoin. This isn't an error, but we
# have to translate this into a 401 response to send back to the client
# in order to get them to supply the appropriate username/password
except MoinMustAuthenticateError,e:
start_response(status_response(httplib.UNAUTHORIZED), [
('Content-Type','text/plain'),
('WWW-Authenticate','Basic realm="%s"' % e.parms.get('target',''))
])
return error_moinmustauthenticateresponse.safe_substitute(e.parms)
# Page in the target-wiki not found. 404 the client
except MoinNotFoundError,e:
start_response(status_response(httplib.NOT_FOUND), [
('Content-Type','text/plain'),
])
return error_moinnotfoundresponse.safe_substitute(e.parms)
# Content-length is required for uploaded data
except ContentLengthRequiredError,e:
start_response(status_response(httplib.LENGTH_REQUIRED), [
('Content-Type','text/plain')
])
return error_contentlengthrequired.safe_substitute(e.parms)
# Used for Moin errors indicated in 2xx HTML responses. No
# need for canned text since the error text is in the HTML
except GenericClientError,e:
start_response(status_response(httplib.BAD_REQUEST), [
('Content-Type','text/plain')
])
return e.parms.get('error')
# For editing (PUT) conflicts detected in 2xx responses
except ConflictError,e:
start_response(status_response(httplib.CONFLICT), [
('Content-Type','text/plain')
])
return e.parms.get('error')
return handler
# ----------------------------------------------------------------------
# Support functions used by handlers
# ----------------------------------------------------------------------
# Utility function for generating status rsponses for WSGI
def status_response(code):
return '%i %s'%(code, httplib.responses[code])
# Returns information about the target wiki. Raises BadTargetError if nothing
# is known about the target name
def target(environ):
wiki_id = shift_path_info(environ)
full_incoming_request = request_uri(environ)
if wiki_id not in TARGET_WIKIS:
raise BadTargetError(fronturl=request_uri(environ), target=wiki_id)
original_page = join(TARGET_WIKIS[wiki_id].rstrip('/')+'/', environ['PATH_INFO'].lstrip('/'))
#relative_to_wrapped = relativize(, full_incoming_request)
wrapped_wiki_base = full_incoming_request[:-len(environ['PATH_INFO'])]
return wiki_id, TARGET_WIKIS[wiki_id], TARGET_WIKI_OPENERS.get(wiki_id), original_page, wrapped_wiki_base
# Check authentication of the user on the MoinMoin wiki
def check_auth(environ, start_response, base, opener, headers=None):
'''
Warning: mutates environ in place
If HTTP auth succeeds will also attach a cookie to the opener object in place
'''
auth = environ.get('HTTP_AUTHORIZATION')
#logger.debug('GRIPPO ' + repr((headers)))
if not auth:
return False
scheme, data = auth.split(None, 1)
if scheme.lower() != 'basic':
raise RuntimeError('Unsupported HTTP auth scheme: %s'%scheme)
username, password = data.decode('base64').split(':', 1)
url = absolutize('?action=login&name=%s&password=%s&login=login'%(username, password), base)
request = urllib2.Request(url, None, headers)
try:
with closing(opener.open(request)) as resp:
#Don't need to do anything with the response. The cookies will be captured automatically
pass
except urllib2.URLError,e:
if e.code == 401:
# If we're here, the backend HTTP server has likely rejected our request due to HTTP auth
raise HTTPAuthorizationError(url=url)
elif e.code == 403:
# If we get a forbidden response, we made it to MoinMoin but the user name/pass was rejected
raise MoinAuthorizationError(url=url)
else:
raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
environ['REMOTE_USER'] = username
return True
def fill_page_edit_form(page, wiki_id, base, opener, headers=None):
url = absolutize(page, base)
request = urllib2.Request(url+"?action=edit&editor=text", None, headers)
#logger.debug('GRIPPO ' + repr((headers)))
try:
with closing(opener.open(request)) as resp:
x = resp.read(); resp = x
doc = htmlparse(resp)
raise_embedded_error(doc)
except urllib2.URLError,e:
# Comment concerning the behavior of MoinMoin. If an attempt is made to edit a page
# and the user is not authenticated, you will either get a 403 or 404 error depending
# on whether or not the page being edited exists or not. If it doesn't exist,
# MoinMoin sends back a 404 which is misleading. We raise MoinMustAuthenticateError
# to signal the error wrapper to issue a 401 back to the client
#Note: Moin for somereason seems to give 403 errors on some URLs in response to Curl's UA
if e.code == 403 or e.code == 404:
raise MoinMustAuthenticateError(url=request.get_full_url(),target=wiki_id)
else:
raise UnexpectedResponseError(url=request.get_full_url(),code=e.code,error=str(e))
try:
form = doc.html.body.xml_select(u'.//*[@id="editor"]')[0]
except Exception as ex:
#XXX There seems to be a crazy XPath bug that only manifests here
#Use non-XPath as a hack-around :(
#open('/tmp/foo.html', 'w').write(x)
logger.debug('Stupid XPath bug. Working around... ' + repr(ex))
from amara.lib.util import element_subtree_iter
form = [ e for e in element_subtree_iter(doc.html.body) if e.xml_attributes.get(u'id') == u'editor' ][0]
#logger.debug('GRIPPO ' + repr(doc.html.body.xml_select(u'.//form')))
#logger.debug('GRIPPO ' + repr((form.xml_namespace, form.xml_local, form.xml_qname, form.xml_name, dict(form.xml_attributes))))
form_vars = {}
#form / fieldset / input
form_vars["action"] = [ e for e in element_subtree_iter(form) if e.xml_attributes.get(u'name') == u'action' ][0].xml_attributes[u'value']
form_vars["rev"] = [ e for e in element_subtree_iter(form) if e.xml_attributes.get(u'name') == u'rev' ][0].xml_attributes[u'value']
form_vars["ticket"] = [ e for e in element_subtree_iter(form) if e.xml_attributes.get(u'name') == u'ticket' ][0].xml_attributes[u'value']
form_vars["editor"] = [ e for e in element_subtree_iter(form) if e.xml_attributes.get(u'name') == u'editor' ][0].xml_attributes[u'value']
#logger.debug('Edit form vars ' + repr(form_vars))
return form_vars
form_vars = {}
#form / fieldset / input
form_vars["action"] = unicode(form.xml_select(u'string(*/*[@name="action"]/@value)'))
form_vars["rev"] = unicode(form.xml_select(u'string(*/*[@name="rev"]/@value)'))
form_vars["ticket"] = unicode(form.xml_select(u'string(*/*[@name="ticket"]/@value)'))
form_vars["editor"] = unicode(form.xml_select(u'string(*/*[@name="editor"]/@value)'))
#logger.debug('Edit form vars ' + repr(form_vars))
return form_vars
def fill_attachment_form(page, attachment, wiki_id, base, opener, headers=None):
url = absolutize(page, base)
request = urllib2.Request(url + '?action=AttachFile', None, headers)
try:
with closing(opener.open(request)) as resp:
doc = htmlparse(resp)
raise_embedded_error(doc)
except urllib2.URLError,e:
# Comment concerning the behavior of MoinMoin. If an attempt is made to post to a page
# and the user is not authenticated, you will either get a 403 or 404 error depending
# on whether or not the page being edited exists or not. If it doesn't exist,
# MoinMoin sends back a 404 which is misleading. We raise MoinMustAuthenticateError
# to signal the error wrapper to issue a 401 back to the client
if e.code == 403 or e.code == 404:
raise MoinMustAuthenticateError(url=request.get_full_url(),target=wiki_id)
else:
raise UnexpectedResponseError(url=request.get_full_url(),code=e.code,error=str(e))
form = doc.html.body.xml_select(u'.//*[@id="content"]/form')[0]
form_vars = {}
#form / dl / ... dd
#Was called rename in 1.8.x, target in 1.9.x
form_vars["rename"] = unicode(attachment)
form_vars["target"] = unicode(attachment)
#FIXME: parameterize
form_vars["overwrite"] = u'1'
form_vars["action"] = unicode(form.xml_select(u'string(*/*[@name="action"]/@value)'))
form_vars["do"] = unicode(form.xml_select(u'string(*/*[@name="do"]/@value)'))
form_vars["ticket"] = unicode(form.xml_select(u'string(*/*[@name="ticket"]/@value)'))
form_vars["submit"] = unicode(form.xml_select(u'string(*/*[@type="submit"]/@value)'))
#pprint.pprint(form_vars)
return form_vars
def fill_page_delete_form(page, wiki_id, base, opener, headers=None):
url = absolutize(page, base)
request = urllib2.Request(url+"?action=DeletePage", None, headers)
try:
with closing(opener.open(request)) as resp:
x = resp.read(); resp = x
doc = htmlparse(resp)
raise_embedded_error(doc)
except urllib2.URLError,e:
if e.code == 403:
raise MoinMustAuthenticateError(url=request.get_full_url(),target=wiki_id)
else:
raise UnexpectedResponseError(url=request.get_full_url(),code=e.code,error=str(e))
form = doc.html.body.xml_select(u'.//form[@method="POST"]')[0]
form_vars = {}
form_vars["action"] = unicode(form.xml_select(u'string(*/*[@name="action"]/@value)'))
form_vars["ticket"] = unicode(form.xml_select(u'string(*/*[@name="ticket"]/@value)'))
form_vars["delete"] = unicode(form.xml_select(u'string(//input[@type="submit" and @name="delete"]/@value)'))
return form_vars
def scrape_page_history(page, base, opener, headers=None):
url = absolutize(page, base)
request = urllib2.Request(url+"?action=info", None, headers)
try:
with closing(opener.open(request)) as resp:
doc = htmlparse(resp)
raise_embedded_error(doc)
except urllib2.URLError,e:
# Comment concerning the behavior of MoinMoin. If an attempt is made to post to a page
# and the user is not authenticated, you will either get a 403 or 404 error depending
# on whether or not the page being edited exists or not. If it doesn't exist,
# MoinMoin sends back a 404 which is misleading. We raise MoinMustAuthenticateError
# to signal the error wrapper to issue a 401 back to the client
if e.code == 403 or e.code == 404:
raise MoinMustAuthenticateError(url=request.get_full_url(),target=wiki_id)
else:
raise UnexpectedResponseError(url=request.get_full_url(),code=e.code,error=str(e))
info = []
try:
table = doc.html.body.xml_select(u'.//table[@id="dbw.table"]')[0]
except Exception as ex:
#XXX Seems to be a crazy XPath bug that only manifests here
#Use non-XPath as a hack-around :(
logger.debug('Stupid XPath bug. Working around... ' + repr(ex))
from amara.lib.util import element_subtree_iter
table = [ e for e in element_subtree_iter(doc.html.body) if e.xml_attributes.get(u'id') == u'dbw.table' ]
if not table:
#"Revision History... No log entries found." i.e. page not even yet created
return info
info = [
dict(rev=tr.td[0], date=tr.td[1], editor=tr.td[4])
for tr in table[0].xml_select(u'.//tr[td[@class="column1"]]')
#for tr in table.tbody.tr if tr.xml_select(u'td[@class="column1"]')
]
return info
# Extract any error embedded in an HTML response (returned by Moin in 2xx responses),
# and raise it as an HTTP error. Would be nice to handle this generically in
# moin_error_wrapper, but don't want to incur HTML parse cost
#
def raise_embedded_error(doc):
try:
edit_textarea = doc.xml_select(u'.//*[@id="editor-textarea"]/text()')
except:
pass
if edit_textarea and 'End of edit conflict' in edit_textarea:
raise ConflictError(error=edit_textarea)
try:
error_div = doc.xml_select('//div[@class="error"]')
except:
return
if error_div:
raise GenericClientError(error=error_div.asString())
# ----------------------------------------------------------------------
# HTTP Method Handlers
# ----------------------------------------------------------------------
# The following functions implement versions of the various HTTP methods
# (GET, HEAD, POST, PUT, DELETE). Each method is actually implemented as a
# a pair of functions. One is a private implementation (e.g., _get_page).
# The other function is a wrapper that encloses each handler with the error
# handling function above (moin_error_handler). Again, this is to avoid
# excessive duplication of error handling code.
@method_dispatcher(SERVICE_ID, DEFAULT_MOUNT, wsgi_wrapper=moin_error_wrapper)
def dispatcher():
__doc__ = SAMPLE_QUERIES_DOC
return
@dispatcher.method("GET")
def get_page(environ, start_response):
#logger.debug('get_page: ' + repr((environ['SCRIPT_NAME'], environ['PATH_INFO'])))
req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING'])
wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ)
page = environ['PATH_INFO'].lstrip('/')
check_auth(environ, start_response, base, opener, req_headers)
upstream_handler = None
status = httplib.OK
params = cgi.parse_qs(environ['QUERY_STRING'])
#Note: probably a better solution here: http://code.google.com/p/mimeparse/
accepted_imts = environ.get('HTTP_ACCEPT', '').split(',')
#logger.debug('accepted_imts: ' + repr(accepted_imts))
imt = first_item(dropwhile(lambda x: '*' in x, accepted_imts))
#logger.debug('imt: ' + repr(imt))
params_for_moin = {}
cache_max_age = CACHE_MAX_AGE # max-age of this response. If set to None, it will not be used
if NO_CACHE_PATHS and first_item(dropwhile(lambda x: x not in page, NO_CACHE_PATHS)):
cache_max_age = None
if 'rev' in params:
#XXX: Not compatible with search
#params_for_moin = {'rev' : params['rev'][0], 'action': 'recall'}
params_for_moin = {'rev' : params['rev'][0]}
if 'search' in params:
searchq = params['search'][0]
query = urllib.urlencode({'value' : searchq, 'action': 'fullsearch', 'context': '180', 'fullsearch': 'Text'})
#?action=fullsearch&context=180&value=foo&=Text
url = absolutize('?'+query, base)
request = urllib2.Request(url, None, req_headers)
ctype = moin.RDF_IMT
cache_max_age = None
#elif 'action' in params and params['action'][0] == 'recall':
elif moin.HTML_IMT in environ.get('HTTP_ACCEPT', ''):
params = urllib.urlencode(params_for_moin)
url = absolutize(page+'?'+params, base)
request = urllib2.Request(url, None, req_headers)
ctype = moin.HTML_IMT
elif moin.RDF_IMT in environ.get('HTTP_ACCEPT', ''):
#FIXME: Make unique flag optional
#url = base + '/RecentChanges?action=rss_rc&unique=1&ddiffs=1'
url = absolutize('RecentChanges?action=rss_rc&unique=1&ddiffs=1', base)
#print >> sys.stderr, (url, base, '/RecentChanges?action=rss_rc&unique=1&ddiffs=1', )
request = urllib2.Request(url, None, req_headers)
ctype = moin.RDF_IMT
elif moin.ATTACHMENTS_IMT in environ.get('HTTP_ACCEPT', ''):
url = absolutize(page + '?action=AttachFile', base)
request = urllib2.Request(url, None, req_headers)
ctype = moin.ATTACHMENTS_IMT
def upstream_handler():
#Sigh. Sometimes you have to break some Tag soup eggs to make a RESTful omlette
with closing(opener.open(request)) as resp:
rbody = resp.read()
doc = htmlparse(rbody)
raise_embedded_error(doc)
attachment_nodes = doc.xml_select(u'//*[contains(@href, "action=AttachFile") and contains(@href, "do=view")]')
targets = []
for node in attachment_nodes:
target = [ param.split('=', 1)[1] for param in node.href.split(u'&') if param.startswith('target=') ][0]
targets.append(target)
output = structencoder(indent=u"yes")
output.feed(
ROOT(
E((u'attachments'),
(E(u'attachment', {u'href': unicode(t)}) for t in targets)
)
))
return output.read(), ctype
#Notes on use of URI parameters - http://markmail.org/message/gw6xbbvx4st6bksw
elif ';attachment=' in page:
page, attachment = page.split(';attachment=', 1)
url = absolutize(page + '?action=AttachFile&do=get&target=' + attachment, base)
request = urllib2.Request(url, None, req_headers)
def upstream_handler():
with closing(opener.open(request)) as resp:
rbody = resp.read()
return rbody, dict(resp.info())['content-type']
#
elif ';history' in page:
cache_max_age = None
page, discard = page.split(';history', 1)
ctype = moin.XML_IMT
def upstream_handler():
revs = scrape_page_history(page, base, opener, req_headers)
output = structencoder(indent=u"yes")
output.feed(
ROOT(
E((u'history'),
(E(u'rev', {u'id': unicode(r['rev']), u'editor': unicode(r['editor']), u'date': unicode(r['date']).replace(' ', 'T')}) for r in revs)
)
))
return output.read(), ctype
elif imt:
params_for_moin.update({'mimetype': imt})
params = urllib.urlencode(params_for_moin)
url = absolutize(page, base) + '?' + params
request = urllib2.Request(url, None, req_headers)
ctype = moin.DOCBOOK_IMT
else:
params_for_moin.update({'action': 'raw'})
params = urllib.urlencode(params_for_moin)
url = absolutize(page, base) + '?' + params
request = urllib2.Request(url, None, req_headers)
ctype = moin.WIKITEXT_IMT
try:
if upstream_handler:
rbody, ctype = upstream_handler()
else:
with closing(opener.open(request)) as resp:
rbody = resp.read()
#headers = {moin.ORIG_BASE_HEADER: base}
#moin_base = absolutize(wiki_id, base)
moin_base_info = base + ' ' + wrapped_wiki_base + ' ' + original_page
response_headers = [("Content-Type", ctype),
("Vary", "Accept"),
(moin.ORIG_BASE_HEADER, moin_base_info)]
if cache_max_age:
response_headers.append(("Cache-Control","max-age="+str(cache_max_age)))
start_response(status_response(status), response_headers)
return rbody
except urllib2.URLError, e:
if e.code == 401:
raise HTTPAuthorizationError(url=request.get_full_url())
if e.code == 403:
raise MoinMustAuthenticateError(url=request.get_full_url(),target=wiki_id)
if e.code == 404:
raise MoinNotFoundError(fronturl=request_uri(environ),backurl=url)
else:
raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
# PUT handler
@dispatcher.method("PUT")
def _put_page(environ, start_response):
'''
'''
req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING'])
wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ)
page = environ['PATH_INFO'].lstrip('/')
check_auth(environ, start_response, base, opener, req_headers)
ctype = environ.get('CONTENT_TYPE', 'application/unknown')
temp_fpath = read_http_body_to_temp(environ, start_response)
form_vars = fill_page_edit_form(page, wiki_id, base, opener, req_headers)
form_vars["savetext"] = open(temp_fpath, "r").read()
url = absolutize(page, base)
data = urllib.urlencode(form_vars)
request = urllib2.Request(url, data, req_headers)
try:
logger.debug('Prior to urllib2.opener')
with closing(opener.open(request)) as resp:
logger.debug('Return from urllib2.opener')
doc = htmlparse(resp)
raise_embedded_error(doc)
logger.debug('HTML parse complete post urllib2.opener')
except urllib2.URLError,e:
raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
wrapped_url = join(wrapped_wiki_base, page)
msg = 'Page updated OK: %s (%s)'%(url, wrapped_url)
#response.add_header("Content-Length", str(len(msg)))
moin_base_info = base + ' ' + wrapped_wiki_base + ' ' + original_page
headers = [
("Content-Type", "text/plain"),
("Content-Location", wrapped_url),
(moin.ORIG_BASE_HEADER, moin_base_info),
(moin.WIKI_RELATIVE_HEADER, relativize(wrapped_url, wrapped_wiki_base)),
]
start_response(status_response(httplib.CREATED), headers)
return [msg]
# POST handler
@dispatcher.method("POST")
def post_page(environ, start_response):
'''
Attachments use URI path params
(for a bit of discussion see http://groups.google.com/group/comp.lang.python/browse_thread/thread/4662d41aca276d99)
'''
#ctype = environ.get('CONTENT_TYPE', 'application/unknown')
req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING'])
wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ)
logger.debug("wiki_id,base,opener,original_age,wrapped_wiki_base="+repr((wiki_id,base,opener,original_page,wrapped_wiki_base)))
check_auth(environ, start_response, base, opener, req_headers)
page = environ['PATH_INFO'].lstrip('/')
page, chaff, attachment = page.partition(';attachment=')
# print >> sys.stderr, page, attachment
#now = datetime.now().isoformat()
#Unfortunately because urllib2's data dicts don't give an option for limiting read length, must read into memory and wrap
#content = StringIO(environ['wsgi.input'].read(clen))
temp_fpath = read_http_body_to_temp(environ, start_response)
form_vars = fill_attachment_form(page, attachment, wiki_id, base, opener, req_headers)
form_vars["file"] = open(temp_fpath, "rb")
url = absolutize(page, base)
#print >> sys.stderr, url, temp_fpath
#data = urllib.urlencode(form_vars)
request = urllib2.Request(url, form_vars, req_headers)
try:
with closing(opener.open(request)) as resp:
doc = htmlparse(resp)
raise_embedded_error(doc)
#logger.debug('POST for attachment page response... ' + doc.xml_encode())
except urllib2.URLError,e:
if e.code == 404:
raise MoinNotFoundError(fronturl=request_uri(environ), backurl=url)
else:
raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
form_vars["file"].close()
os.remove(temp_fpath)
msg = 'Attachment updated OK: %s\n'%(url)
#response.add_header("Content-Length", str(len(msg)))
moin_base_info = base + ' ' + wrapped_wiki_base + ' ' + original_page
headers = [
("Content-Type", "text/plain"),
("Content-Location", url),
(moin.ORIG_BASE_HEADER, moin_base_info),
(moin.WIKI_RELATIVE_HEADER, relativize(url, wrapped_wiki_base)),
]
start_response(status_response(httplib.CREATED), headers)
return msg
# DELETE handler
@dispatcher.method("DELETE")
def _delete_page(environ, start_response):
'''
Deletes a Wiki page, returning 200 if successful. Does not yet support
the deletion of attachments.
'''
#The Moin form asks that this be in multipart-form format, but the multipart handler
#fallsback to url-encoding unless you pass it a file. Luckily, the equivalent
#url-encoded request works... for now.
req_headers = copy_headers_to_dict(environ,exclude=['HTTP_ACCEPT_ENCODING'])
wiki_id, base, opener, original_page, wrapped_wiki_base = target(environ)
page = environ['PATH_INFO'].lstrip('/')
check_auth(environ, start_response, base, opener, req_headers)
form_vars = fill_page_delete_form(page, wiki_id, base, opener, req_headers)
url = absolutize(page, base)
request = urllib2.Request(url, form_vars, req_headers)
try:
with closing(opener.open(request)) as resp:
doc = htmlparse(resp)
raise_embedded_error(doc)
except urllib2.URLError,e:
if e.code == 404:
# Moin returns 404 on a succcessful DeletePage POST; recast as a 200
pass
else:
raise UnexpectedResponseError(url=url,code=e.code,error=str(e))
msg = 'Page deleted OK: ' + url
start_response(status_response(httplib.OK),[("Content-Type","text/plain")])
return msg | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/demo/moinrest.py | moinrest.py |
import os
import tempfile
import httplib
import urllib, urllib2
import base64
from functools import wraps
from string import Template
from wsgiref.util import request_uri
from itertools import islice, dropwhile
#from amara.lib.iri import *
from amara.lib.iri import absolutize, join#, split_uri_ref
from amara.lib.util import first_item
from akara import logger
from akara import global_config
def status_response(code):
"""
Given an int or string, return the HTTP status line
"""
# If code is already a full status line, let it pass through,
# but if just a response code as a string, convert to int
try:
c = int(code)
except ValueError:
c = code
if isinstance(c,int):
ret = '%i %s'%(c, httplib.responses[c])
else: # string/unicode
ret = c
return ret
# Convert a lower case header name into its RFC 2616-defined equivalent.
# Useful when bridging httplib2 and WSGI.
normalize_http_header_name = lambda h: '-'.join([s.capitalize() for s in h.split('-')])
class iterwrapper:
"""
Wraps the response body iterator from the application to meet WSGI
requirements.
"""
def __init__(self, wrapped, responder):
"""
wrapped - the iterator coming from the application
response_chunk_handler - a callable for any processing of a
response body chunk before passing it on to the server.
"""
self._wrapped = iter(wrapped)
self._responder = responder(self._wrapped)
if hasattr(wrapped, 'close'):
self.close = self._wrapped.close
def __iter__(self):
return self
def next(self):
return self._responder.next()
def geturl(environ, relative=''):
"""
Constructs a portable URL for your application. If relative is omitted or '',
Just return the current base URL. Otherwise resolve the relative portion against
the present base and return the resulting URL.
(Inspired by url functions in CherryPy and Pylons, and Ian Bicking's code in PEP 333)
If you have a proxy that forwards the HOST but not the original HTTP request path
you might have to set akara.proxy-base in environ (e.g. through .ini) See
http://wiki.xml3k.org/Akara/Configuration
"""
#Manually set proxy base URI for non-well-behaved proxies, such as Apache < 1.3.33,
#Or for cases where the proxy is not mounted at the root of a host, and thus the original
#request path info is lost
if environ.get('akara.proxy-base'):
url = environ['akara.proxy-base']
if relative: url = Uri.Absolutize(relative, url)
return url
url = environ['wsgi.url_scheme']+'://'
#Apache 1.3.33 and later mod_proxy uses X-Forwarded-Host
if environ.get('HTTP_X_FORWARDED_HOST'):
url += environ['HTTP_X_FORWARDED_HOST']
#Lighttpd uses X-Host
elif environ.get('HTTP_X_HOST'):
url += environ['HTTP_X_HOST']
elif environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
#Can't use the more strict Uri.PercentEncode because it would quote the '/'
url += urllib.quote(environ.get('SCRIPT_NAME', '').rstrip('/')) + '/'
if relative: url = Uri.Absolutize(relative, url)
return url
def guess_self_uri(environ):
return absolutize(environ['SCRIPT_NAME'].rstrip('/'), request_uri(environ, include_query=False))
def in_akara():
'Returns True if the current process is running as an Akara module'
return 'pid_file' in global_config.__dict__
def find_peer_service(peer_id, environ=None):
'''
DEPRECATED! Use discover_service() and work with the resulting query_template property
Find a peer service endpoint, by ID, mounted on this same Akara instance
Must be caled from a running akara service, and it is highly recommended to call
at the top of service functions, or at least before the request environ has been manipulated
'''
if not in_akara():
raise RuntimeError('find_peer_service is meant to be called from within Akara process space')
from akara.registry import _current_registry
from akara import request
if environ:
serverbase = guess_self_uri(environ)
else:
serverbase = getattr(global_config, 'server_root')
for (path, s) in _current_registry._registered_services.iteritems():
if s.ident == peer_id:
return join(serverbase, '..', path)
return None
def discover_service(sid):
'''
Discover a service in the current environment with the given ID
It might be a peer, mounted on this same Akara instance, or it might
Be a proxy for a remote Web service
Must be called from a running akara service
Note if you were using the deprecated
find_peer_service(peer_id, environ=None) switch to this:
>>> s = discover_service(peer_id)
>>> url = join(server_call_base(environ), '..', path)
'''
if not in_akara():
raise RuntimeError('find_peer_service is meant to be called from within Akara process space')
from akara.registry import _current_registry
from akara import request
for (path, s) in _current_registry._registered_services.iteritems():
if s.ident == peer_id:
return s
return None
def server_call_base(environ=None):
'''
Determine the best base URL for calling a peer service on this Akara instance
Must be called from a running akara service, and it is highly recommended to call
at the top of service functions, or at least before the request environ has been manipulated
'''
if not in_akara():
raise RuntimeError('find_peer_service is meant to be called from within Akara process space')
if environ:
serverbase = guess_self_uri(environ)
else:
serverbase = getattr(global_config, 'server_root')
return serverbase
def http_method_handler(method):
'''
A decorator maker to flag a function as suitable for a given HTTP method
'''
def wrap(f):
#@wraps(f)
#def wrapper(*args, **kwargs):
# return f()
f.method = method
return f
return wrap
class wsgibase(object):
def __init__(self):
self._method_handlers = {}
if not hasattr(self, 'dispatch'):
self.dispatch = self.dispatch_by_lookup
#if not hasattr(self, 'dispatch'):
# self.dispatch = self.dispatch_by_lookup if hasattr(self, '_methods') else self.dispatch_simply
for obj in ( getattr(self, name) for name in dir(self) ):
method = getattr(obj, 'method', None)
if method:
self._method_handlers[method] = obj
return
def __call__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
return self
def __iter__(self):
func = self.dispatch()
if func is None:
response_headers = [('Content-type','text/plain')]
self.start_response(response(httplib.METHOD_NOT_ALLOWED), response_headers)
yield 'HTTP method Not Allowed'
else:
yield func()
def dispatch_simply(self):
func = 'do_%s' % self.environ['REQUEST_METHOD']
if not hasattr(self, func):
return None
else:
return func
def dispatch_by_lookup(self):
return self._method_handlers.get(self.environ['REQUEST_METHOD'])
def parse_fields(self):
s = self.environ['wsgi.input'].read(int(self.environ['CONTENT_LENGTH']))
return cgi.parse_qs(s)
def extract_auth(environ):
'''
Extract auth creds (HTTP basic only, for now) from the incoming request and return the
(username, password)
environ - The usual WSGI structure. Note: if you are using simple_service,
in Akara services available as akara.request.environ, or perhaps passed right
into the handler
top - top URL to be used for this auth.
'''
#Useful: http://www.voidspace.org.uk/python/articles/authentication.shtml
auth = environ.get('HTTP_AUTHORIZATION')
if not auth: return None
scheme, data = auth.split(None, 1)
if scheme.lower() != 'basic':
raise RuntimeError('Unsupported HTTP auth scheme: %s'%scheme)
username, password = data.decode('base64').split(':', 1)
return username, password
def copy_auth(environ, top, realm=None):
'''
Get auth creds (HTTP basic only, for now) from the incoming request and return an
HTTP auth handler for urllib2. This handler allows you to "forward" this auth to
remote services
environ - The usual WSGI structure. Note: if you are using simple_service,
in Akara services available as akara.request.environ, or perhaps passed right
into the handler
top - top URL to be used for this auth.
'''
#Useful: http://www.voidspace.org.uk/python/articles/authentication.shtml
creds = extract_auth(environ)
if creds:
username, password = creds
else:
return None
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# HTTPPasswordMgr top must omit any URL components before the host (i.e. no scheme and no auth info in the authority section)
#(scheme, authority, path, query, fragment) = split_uri_ref(top)
#auth, host, port = split_authority(authority)
#auth_top_url = (host + ':' + port if port else host) + path
#print >> sys.stderr, 'Auth creds: %s:%s (%s)'%(username, password, auth_top_url)
logger.debug('Auth creds: %s:%s (%s)'%(username, password, top))
# Not setting the realm for now, so use None
#password_mgr.add_password(None, auth_top_url, username, password)
password_mgr.add_password(None, top, username, password)
#password_handler = urllib2.HTTPDigestAuthHandler(password_mgr)
password_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
return password_handler
def header_credentials(username, password, headers=None):
'''
httplib2's simple HTTP auth support is great, but it doesn't recognize every case
in which auth is needed, sometimes because of compliance issues on the remote site*
Also, there are unusual cases where you want to always send the auth header,
without first waiting for 401 challenge
This function helps with these issues by unconditionally setting up httplib2 headers
for Basic authentication
>>> username = '[email protected]'
>>> password = 'password'
>>> H = httplib2.Http()
>>> auth_headers = header_credentials(username, password)
>>> response, content = H.request(url, 'GET', headers=auth_headers)
* For an example of such issues: http://pyre.posterous.com/accessing-posterous-api-in-python
'''
credentials = "Basic " + base64.b64encode("%s:%s"%(username, password))
if headers:
headers.update({ 'Authorization': credentials })
else:
headers = { 'Authorization': credentials }
return headers
CHUNKLEN = 4096
def read_http_body_to_temp(environ, start_response):
'''
Handle the reading of a file from an HTTP message body (file pointer from wsgi.input)
in chunks to a temporary file
Returns the file path of the resulting temp file
'''
clen = int(environ.get('CONTENT_LENGTH', None))
if not clen:
raise ContentLengthRequiredError()
http_body = environ['wsgi.input']
temp = tempfile.mkstemp(suffix=".dat")
while clen != 0:
chunk_len = min(CHUNKLEN, clen)
data = http_body.read(chunk_len)
if data:
#assert chunk_len == os.write(temp[0], data)
written = os.write(temp[0], data)
#print >> sys.stderr, "Bytes written to file in this chunk", written
clen -= len(data)
else:
clen = 0
os.fsync(temp[0]) #is this needed with the close below?
os.close(temp[0])
return temp[1]
#Convert WSGI environ headers to a plain header list (e.g. for forwarding request headers)
#Copied from webob.
_key2header = {
'CONTENT_TYPE': 'Content-Type',
'CONTENT_LENGTH': 'Content-Length',
'HTTP_CONTENT_TYPE': 'Content_Type',
'HTTP_CONTENT_LENGTH': 'Content_Length',
}
#Skipping User-Agent is actually Moin-specific, since Moin seems to react to different UAs, and e.g. gives 403 errors in response to Curl's UA
_skip_headers = [
'HTTP_HOST',
'HTTP_ACCEPT',
'HTTP_USER_AGENT',
]
def _trans_key(key, exclude=[]):
if not isinstance(key, basestring):
return None
elif key in _key2header:
#Do NOT copy these special headers (change from Webob)
return None
#return _key2header[key]
elif key in _skip_headers or key in exclude:
return None
elif key.startswith('HTTP_'):
return key[5:].replace('_', '-').title()
else:
return None
def copy_headers(environ,exclude=[]):
header_list = []
for k, v in environ.iteritems():
pure_header = _trans_key(k,exclude)
if pure_header:
#FIXME: does this account for dupe headers in the inbound WSGI?
header_list.append((pure_header, v))
return header_list
def copy_headers_to_dict(environ, exclude=[]):
headers = {}
for k, v in environ.iteritems():
pure_header = _trans_key(k,exclude)
if pure_header:
#FIXME: does this account for dupe headers in the inbound WSGI?
headers[pure_header] = v
return headers
def requested_imt(environ):
# Choose a preferred media type from the Accept header, using application/json as presumed
# default, and stripping out any wildcard types and type parameters
#
# FIXME Ideally, this should use the q values and pick the best media type, rather than
# just picking the first non-wildcard type. Perhaps: http://code.google.com/p/mimeparse/
accepted_imts = []
accept_header = environ.get('HTTP_ACCEPT')
if accept_header :
accepted_imts = [ mt.split(';')[0] for mt in accept_header.split(',') ]
accepted_imts.append('application/json')
#if logger: logger.debug('accepted_imts: ' + repr(accepted_imts))
imt = first_item(dropwhile(lambda x: '*' in x, accepted_imts))
return imt
#
# ======================================================================
# Exceptions
# ======================================================================
# Base exception used to indicate errors. Rather than replicating tons
# of error handling code, these errors are raised instead. A top-level
# exception handler catches them and then generates some kind of
# appropriate HTTP response. Positional arguments (if any)
# are just passed to the Exception base as before. Keyword arguments
# are saved in a local dictionary. They will be used to pass parameters
# to the Template strings used when generating error messages.
class HttpError(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args)
self.parms = kwargs
class BadTargetError(HttpError): pass
class HTTPAuthorizationError(HttpError): pass
class MoinAuthorizationError(HttpError): pass
class UnexpectedResponseError(HttpError): pass
class MoinMustAuthenticateError(HttpError): pass
class MoinNotFoundError(HttpError): pass
class ContentLengthRequiredError(HttpError): pass
class GenericClientError(HttpError): pass
class ConflictError(HttpError): pass | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/util/__init__.py | __init__.py |
__all__ = [
"WIKITEXT_IMT", "DOCBOOK_IMT", "RDF_IMT", "HTML_IMT", "ATTACHMENTS_IMT",
"ORIG_BASE_HEADER", "ATTACHMENTS_MODEL_XML", "ATTACHMENTS_MODEL",
"MOIN_DOCBOOK_MODEL_XML", "MOIN_DOCBOOK_MODEL", "WIKI_RELATIVE_HEADER"
]
#import pprint
import os
import stat # index constants for os.stat()
import re
import httplib, urllib, urllib2, cookielib
import datetime
from gettext import gettext as _
from dateutil.parser import parse as dateparse
import amara
from amara import bindery
from amara.namespaces import *
from amara.xslt import transform
from amara.writers.struct import *
from amara.bindery.html import parse as htmlparse
from amara.lib import U
from amara.lib.date import timezone, UTC
from amara.lib.iri import split_uri_ref, split_fragment, relativize, absolutize, IriError, join, is_absolute
from amara.bindery.model import examplotron_model, generate_metadata, metadata_dict
from amara.bindery.util import dispatcher, node_handler, property_sequence_getter
from akara import logger
WIKITEXT_IMT = 'text/plain'
HTML_IMT = 'text/html'
DOCBOOK_IMT = 'application/docbook+xml'
RDF_IMT = 'application/rdf+xml'
ATTACHMENTS_IMT = 'application/x-moin-attachments+xml'
ORIG_BASE_HEADER = 'x-akara-wrapped-moin'
WIKI_RELATIVE_HEADER = 'x-wiki-relative-location'
#Note: this requires the user to install the demo/etc/application_xml.py (a fixed Moin XML formatter) in plugin/formatter/ for th eMoin instance
XML_IMT = 'application/xml'
# XML models
ATTACHMENTS_MODEL_XML = '''<?xml version="1.0" encoding="UTF-8"?>
<attachments xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel">
<attachment href="" ak:rel="name()" ak:value="@href"/>
</attachments>
'''
ATTACHMENTS_MODEL = examplotron_model(ATTACHMENTS_MODEL_XML)
HISTORY_MODEL_XML = '''<?xml version="1.0" encoding="UTF-8"?>
<history xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel">
<rev eg:occurs="*" editor="akara" date="2010-05-12T21:35:34" id="20"/>
</history>
'''
HISTORY_MODEL = examplotron_model(HISTORY_MODEL_XML)
MOIN_DOCBOOK_MODEL_XML = '''<?xml version="1.0" encoding="UTF-8"?>
<article xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel" ak:resource="">
<ak:rel name="'ak-old-type'" ak:value="glosslist[1]/glossentry[glossterm='akara:type']/glossdef//ulink/@url"/>
<ak:rel name="'ak-type'" ak:value="section[title='akara:metadata']/glosslist/glossentry[glossterm='akara:type']/glossdef//ulink/@url"/>
<ak:rel name="'ak-updated'" ak:value="articleinfo/revhistory/revision[1]/date"/>
<articleinfo>
<title ak:rel="name()" ak:value=".">FrontPage</title>
<revhistory>
<revision eg:occurs="*">
<revnumber>15</revnumber>
<date>2009-02-22 07:45:22</date>
<authorinitials>localhost</authorinitials>
</revision>
</revhistory>
</articleinfo>
<section eg:occurs="*" ak:resource="">
<title ak:rel="name()" ak:value=".">A page</title>
<para>
Using: <ulink url="http://moinmo.in/DesktopEdition"/> set <code>interface = ''</code>)
</para>
<itemizedlist>
<listitem>
<para>
<ulink url="http://localhost:8080/Developer#">Developer</ulink> </para>
</listitem>
</itemizedlist>
</section>
</article>
'''
MOIN_DOCBOOK_MODEL = examplotron_model(MOIN_DOCBOOK_MODEL_XML)
#python akara/services/moincms.py -p "Site.*" http://localhost:8880/ ~/tmp/ http://localhost:8080/
#
#Detailed license and copyright information: http://4suite.org/COPYRIGHT
#aname = partial(property_sequence_getter, u"name")
#aemail = partial(property_sequence_getter, u"email")
#auri = partial(property_sequence_getter, u"uri")
AKARA_NS = u'http://purl.org/dc/org/xml3k/akara'
CMS_BASE = AKARA_NS + u'/cms'
CAMELCASE_PAT = re.compile(u'(\s+)(([A-Z]+)([a-z]+)([A-Z]+)(\w+))(\s+)')
def text_to_moin(text):
'''
Convert text into a form where it appears as one would expect in Moin:
* Normalize line endings
* Escape CamelCase
>>> from akara.util.moin import text_to_moin
>>> text_to_moin(u' a AxBxCx b\\r\\nMoreCamelCase foo') #Beware double-escaped chars for doctest
u' a !AxBxCx b\\n!MoreCamelCase foo'
>>> text_to_moin(u' a ABC b\\r\\nmoreCamelCase foo') #Beware double-escaped chars for doctest
u' a ABC b\\nmoreCamelCase foo'
'''
text = CAMELCASE_PAT.subn(lambda m: m.group(1) + u'!' + m.group(2) + m.groups()[-1], text)[0]
return u'\n'.join([line.rstrip() for line in text.splitlines() ])
def cleanup_text_blocks(text):
return u'\n'.join([line.strip() for line in text.splitlines() ])
class node(object):
'''
Akara Moin/CMS node, a Moin wiki page that follows a template to direct workflow
activity, including metadata extraction
'''
AKARA_TYPE = u'http://purl.org/xml3k/akara/cms/resource-type'
NODES = {}
#Processing priority
PRIORITY = 0
ENDPOINTS = None
@staticmethod
def factory(rest_uri, moin_link=None, opener=None):
opener = opener or urllib2.build_opener()
logger.debug('rest_uri: ' + rest_uri)
req = urllib2.Request(rest_uri, headers={'Accept': DOCBOOK_IMT})
resp = opener.open(req)
doc = bindery.parse(resp, standalone=True, model=MOIN_DOCBOOK_MODEL)
original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#self.original_wiki_base = dict(resp.info())[ORIG_BASE_HEADER]
#amara.xml_print(self.content_cache)
metadata, first_id = metadata_dict(generate_metadata(doc))
metadata = metadata[first_id]
akara_type = U(metadata[u'ak-type'])
logger.debug('Type: ' + akara_type)
try:
#Older Moin CMS resource types are implemented by registration to the global node.NODES
cls = node.NODES[akara_type]
except KeyError:
#Newer Moin CMS resource types are implemented by discovery of a URL,
#to which a POST request executes the desired action
return node.ENDPOINTS and (rest_uri, akara_type, node.ENDPOINTS[akara_type], doc, metadata, original_wiki_base)
else:
instance = cls(rest_uri, moin_link, opener, cache=(doc, metadata, original_wiki_base))
return instance
#FIXME: This cache is to help eliminate unnecessary trips back to moin to get
#The page body. It should soon be replaced by the proposed comprehensive caching
def __init__(self, rest_uri, moin_link, opener=None, cache=None):
'''
rest_uri - the full URI to the Moin/REST wrapper for this page
relative - the URI of this page relative to the Wiki base
'''
self.rest_uri = rest_uri
self.opener = opener
self.moin_link = moin_link
#logger.debug('Moin link: ' + moin_link)
#logger.debug('REST URI: ' + rest_uri)
self.cache = cache #(doc, metadata, original_wiki_base)
return
def load(self):
raise NotImplementedError
def render(self):
raise NotImplementedError
def up_to_date(self, force_update=False):
'''
Checks whether there needs to be an update of the output
'''
#By default just always update
return False
def section_titled(self, title):
'''
Helper to extract content from a specific section within the page
'''
#FIXME: rethink this "caching" business
doc, metadata, original_wiki_base = self.cache
#logger.debug("section_titled: " + repr(title))
return doc.article.xml_select(u'section[title = "%s"]'%title)
#
def definition_list(self, list_path, contextnode=None, patterns=None):
'''
Helper to construct a dictionary from an indicated definition list on the page
'''
#FIXME: rethink this "caching" business
#Use defaultdict instead, for performance
patterns = patterns or {None: lambda x: U(x) if x else None}
doc, metadata, original_wiki_base = self.cache
contextnode = contextnode or doc.article
top = contextnode.xml_select(list_path)
if not top:
return None
#Go over the glossentries, and map from term to def, applying the matching
#Unit transform function from the patterns dict
result = dict((U(i.glossterm), patterns.get(U(i.glossterm), patterns[None])(i.glossdef))
for i in top[0].glossentry)
#logger.debug("definition_list: " + repr(result))
return result
node.NODES[node.AKARA_TYPE] = node
#XXX: do we really need this function indirection for simple global dict assignment?
def register_node_type(type_id, nclass):
node.NODES[type_id] = nclass
def wiki_uri(original_base, wrapped_base, link, relative_to=None, raw=False):
'''
Constructs absolute URLs to the original and REST-wrapper for a page, given a link from another page
original_base - The base URI of the actual Moin instance
wrapped_base - The base URI of the REST-wrapped proxy of the Moin instance
link - the relative link, generally from one wiki page to another
relative_to - the REST-wrapped version of the page from which the relative link came, defaults to same as wrapped_base
raw - the link is a full hierarchical path, rather than relative to the wiki base
Returns a tuple (wrapped_uri, abs_link)
wrapped_uri - the URI wrapped for REST ops
abs_link - the full, original wiki URL
>>> from akara.util.moin import wiki_uri
>>> wiki_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', '/spam')
('http://localhost:8880/moin/w/spam', 'http://example.com/mywiki/spam')
>>> wiki_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', 'http://google.com/spam')
(None, None)
>>> wiki_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', 'http://google.com/spam', raw=True)
(None, None)
>>> wiki_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', '/mywiki/spam', raw=True)
('http://localhost:8880/moin/w/spam', 'http://example.com/mywiki/spam')
>>> wiki_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', '/mywiki/spam')
('http://localhost:8880/moin/w/mywiki/spam', 'http://example.com/mywiki/mywiki/spam')
'''
#rel_link = relativize(abs_link, original_wiki_base)
#e.g. original wiki base is http://myhost:8080/mywiki/ and link is /a/b
#abs_link is http://myhost:8080/mywiki/a/b note the need to strip the leading / to get that
#from akara import logger; logger.debug('wiki_uri' + repr((original_base, wrapped_base, link, relative_to, absolutize(link, original_base.rstrip('/')+'/'))))
if raw and not is_absolute(link):
(scheme, authority, path, query, fragment) = split_uri_ref(original_base)
link = link[len(path):]
link = link.lstrip('/')
abs_link = absolutize(link, original_base.rstrip('/')+'/')
rel_to_wikibase = relativize(abs_link, original_base.rstrip('/')+'/')
if not rel_to_wikibase:
#It's not a relative wiki link
return None, None
rest_uri = absolutize(rel_to_wikibase, wrapped_base.rstrip('/')+'/')
return rest_uri, abs_link
#
def unwrap_uri(original_base, wrapped_base, rest_uri):
'''
Constructs an absolute URL to the original Moin page
original_base - The base URI of the actual Moin instance
wrapped_base - The base URI of the REST-wrapped proxy of the Moin instance
rest_uri - moinrest-wrapped URI
Returns a tuple unwrapped_link
>>> from akara.util.moin import unwrap_uri
>>> unwrap_uri('http://example.com/mywiki/', 'http://localhost:8880/moin/w/', 'http://localhost:8880/moin/w/spam')
'http://example.com/mywiki/spam'
>>> unwrap_uri('http://example.com/', 'http://localhost:8880/moin/w/', 'http://localhost:8880/moin/w/spam')
'http://example.com/spam'
'''
rel = relativize(rest_uri, wrapped_base.rstrip('/')+'/')
return absolutize(rel, original_base.rstrip('/')+'/')
RE_XML_WIKISPLIT = re.compile(u'\s+')
def wiki_normalize(s):
'''
A smarter variety of string normalization. Multiple runs of whitespace are replaced
with a space, except that " \n" goes unchanged, and runs of whitespace at the beginning
of a line go unchanged
>>> from akara.util.moin import wiki_normalize
>>> wiki_normalize(u'= A =\\n * spam \\n * eggs\\n\\n') #Beware double-escaped chars for doctest
'''
#First of all normalize line endings
s = '\n'.join(s.splitlines())
def repl(m):
if '\n' in m.group(0):
return m.group(0)
else:
return ' '
return RE_XML_WIKISPLIT.subn(repl, s)[0] | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/util/moin.py | moin.py |
####
# http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
# http://code.activestate.com/recipes/146306/
#
# 02/2006 Will Holcomb <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 7/26/07 Slightly modified by Brian Schneider
# in order to support unicode files ( multipart_encode function )
"""
Usage:
Enables the use of multipart/form-data for posting forms
Inspirations:
Upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
urllib2_file:
Fabien Seisen: <[email protected]>
Example:
import multipart_post_handler, urllib2, cookielib
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
multipart_post_handler.MultipartPostHandler)
params = { "username" : "bob", "password" : "riviera",
"file" : open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
Further Example:
The main function of this file is a sample which downloads a page and
then uploads it to the W3C validator.
"""
import urllib
import urllib2
import mimetools, mimetypes
import os, stat
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buf = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buf is None:
buf = StringIO()
for(key, value) in vars:
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"' % key)
buf.write('\r\n\r\n' + value + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buf.write('Content-Type: %s\r\n' % contenttype)
# buffer += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf.write('\r\n' + fd.read() + '\r\n')
buf.write('--' + boundary + '--\r\n\r\n')
buf = buf.getvalue()
return boundary, buf
multipart_encode = Callable(multipart_encode)
https_request = http_request
def main():
import tempfile, sys
validatorURL = "http://validator.w3.org/check"
opener = urllib2.build_opener(MultipartPostHandler)
def validateFile(url):
temp = tempfile.mkstemp(suffix=".html")
os.write(temp[0], opener.open(url).read())
params = { "ss" : "0", # show source
"doctype" : "Inline",
"uploaded_file" : open(temp[1], "rb") }
print opener.open(validatorURL, params).read()
os.remove(temp[1])
if len(sys.argv[1:]) > 0:
for arg in sys.argv[1:]:
validateFile(arg)
else:
validateFile("http://www.google.com")
if __name__=="__main__":
main() | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/util/multipart_post_handler.py | multipart_post_handler.py |
import httplib
import sqlite3
from datetime import datetime
from wsgiref.util import shift_path_info, request_uri
from string import Template
from cStringIO import StringIO
from akara.resource import *
from akara.resource.repository import driver
from akara.resource.index import simple_xpath_index
# Templates
wrapper = Template("""\
<html><head><title>$title</title></head><body>
$body
</body></html>
""")
four_oh_four = Template("""\
<html><body>
<h1>404-ed!</h1>
The requested URL <i>$url</i> was not found.
</body></html>""")
def alias(environ, start_response):
'''
GET - retrieve the resource with the specified alias
POST - create a resource with the specified alias
'''
key = environ['PATH_INFO']
print 'key', key
if not key in APPS:
#404 error
start_response('404 Not Found', [('content-type', 'text/html')])
response = four_oh_four.substitute(url=request_uri(environ))
return [response]
next = APPS[key]
return next(environ, start_response)
def response(code):
return '%i %s'%(code, httplib.responses[code])
def store(environ, start_response):
dbfile = environ['akara.DBFILE']
drv = driver(sqlite3.connect(dbfile))
def head_resource():
get_resource()
return ''
def get_resource():
key = shift_path_info(environ)
content1, metadata = drv.get_resource(key)
if content1 is None:
#404 error
start_response('404 Not Found', [('content-type', 'text/html')])
response = four_oh_four.substitute(url=request_uri(environ))
return response
start_response('200 OK', [('content-type', str(metadata[CONTENT_TYPE]))])
return content1.encode('utf-8')
def post_resource():
ctype = environ.get('CONTENT_TYPE', 'application/unknown')
clen = int(environ.get('CONTENT_LENGTH', None))
if not clen:
start_response("411 Length Required", [('Content-Type','text/plain')])
return ["Length Required"]
key = shift_path_info(environ)
now = datetime.now().isoformat()
md = {
CREATED: now,
UPDATED: now,
CONTENT_LENGTH: clen,
CONTENT_TYPE: ctype,
}
#md = self.standard_index
content = environ['wsgi.input'].read(clen)
id = drv.create_resource(content, metadata=md)
msg = 'Adding %i' % id
new_uri = str(id)
headers = [('Content-Type', 'text/plain')]
headers.append(('Location', new_uri))
headers.append(('Content-Location', new_uri))
#environ['akara.etag'] = compute_etag(content)
headers.append(('Content-Length', str(len(msg))))
start_response("201 Created", headers)
return msg
dispatch = {
'GET': get_resource,
'HEAD': head_resource,
'POST': post_resource,
}
method = dispatch.get(environ['REQUEST_METHOD'])
if not method:
response_headers = [('Content-type','text/plain')]
start_response(response(httplib.METHOD_NOT_ALLOWED), response_headers)
return ['Method Not Allowed']
else:
return [method()] | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/resource/web.py | web.py |
#__all__ = ['manager', 'standard_index']
CREATED = 'akara:created'
UPDATED = 'akara:updated'
CONTENT_LENGTH = 'akara:size'
CONTENT_TYPE = 'akara:type'
class resource(object):
'''
An analogue of a Web resource
In effect it serves as a cache of the actual stored repository data
Standard repository metadata:
* Content type (internet media type)
* Size
* creation date
* last mod date
'''
def __init__(self, rid, manager):
self._manager = manager
self.rid = rid
self._metadata = None #Mixes/caches repository metadata and user metadata
self._content = None
return
def __getitem__(self, name):
return self._metadata[name]
def _get_content(self):
if self._content is None: self._sync()
return self._content
def _set_content(self, c):
if self._content is None: self._sync()
self._content = c
content = property(_get_content, _set_content)
@property
def metadata(self):
if self._metadata is None: self._sync()
return self._metadata
def _sync(self):
'''
Sync up this copy with the database
'''
drv = self._manager._driver
content, self.metadata = drv.get_resource(self.rid)
self.content = content.read()
return
class manager(dict):
"""
Maps aliases to IDs
"""
#Manager itself is a very simple dict interface. You would generally use a more specialized
#object that includes the persistence layer
#def __init__(self, input_dict={}):
# self.update(input_dict)
def __init__(self, driver):
self._driver = driver
self.aliases = {}
#FIXME: replace with MRU
self._cache = {}
return
def lookup(self, name):
'''
Look up resource by ID
'''
rid = name
if rid in self.aliases:
rid = self.aliases[rid]
if rid in self._cache:
return elf._cache[rid]
if self._driver.has_resource(rid):
return resource(rid, self)
else:
raise RuntimeError('Resource not found: %s'%str(rid))
#raise ResourceError
return | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/resource/__init__.py | __init__.py |
import sqlite3
from cStringIO import StringIO
#__all__ = ['driver']
class driver(object):
"""
Typical usage:
driver = akara.resource.repository.driver(sqlite3.connect(conn_str))
"""
def __init__(self, conn):
self._conn = conn
pass
def create_resource(self, content, metadata):
"""
content - the actual content of the resource
metadata - a dictionary
We will generate the id.
If e.g. they use hierarchical path aliases, that would be part of the metadata
"""
c = self._conn.cursor()
#http://www.sqlite.org/faq.html#q1
c.execute('INSERT into resources values(NULL, ?)', (content,))
new_id = c.lastrowid
#c.execute('SELECT max(id) FROM resources')
#rowid = c.next()[0]
#print 'past inserted', rowid
#Use executemany?
for key, value in metadata.iteritems():
c.execute('insert into metadata values(?, ?, ?)', (new_id, key, value))
self._conn.commit()
return new_id
def has_resource(self, id):
"""
id = ID of the resource to check
Return a boolean
"""
c = self._conn.cursor()
c.execute('select content from resources where id=?', (id,))
try:
c.next()
resource_exists = True
except:
resource_exists = False
c.close()
return resource_exists
def get_resource(self, id):
"""
id = ID of the resource to get
return a stream and an iterator over the metadata dict
"""
c = self._conn.cursor()
c.execute('select content from resources where id=?', (id,))
data = c.fetchone()
if not data:
c.close()
return None, None
data = data[0]
c.execute('select key, value from metadata where id=?', (id,))
metadata = dict(c)
#for row in c:
c.close()
#stream = StringIO(data)
return data, metadata
def update_resource(self, id, content=None, metadata=None):
"""
id - ID of the resource to update
content - text or stream with new resource content, or None to leave content untouched
metadata - dict of metadata to be added/updated
return a stream and an iterator over the metadata dict
"""
c = self._conn.cursor()
if content is not None:
c.execute('update resources set content=? where id=?', (content, id))
#Use executemany?
for key, value in metadata.iteritems():
c.execute('update metadata set key=?, set value=? where id=?', (key, value, id,))
self._conn.commit()
return
def delete_resource(self, id):
"""
id = ID of the resource to delete
"""
c = self._conn.cursor()
c.execute('DELETE from resources where id=?', (id,))
c.execute('DELETE from metadata where id=?', (id,))
self._conn.commit()
return
def get_metadata(self, id):
"""
id = ID of the resource to get
return a stream and an iterator over the metadata dict
"""
c = self._conn.cursor()
#c.execute('select content from resources where id=?', (id,))
#data = c.next()[0]
c.execute('select key, value from metadata where id=?', (path,))
metadata = dict(c)
#for row in c:
c.close()
return metadata
def lookup(self, key, value):
"""
key - metadata key
value - metadata value to match
"""
c = self._conn.cursor()
#c.execute('select content from resources where id=?', (id,))
#data = c.next()[0]
c.execute('select id from metadata where key=? and value=?', (key, value))
result = [ r[0] for r in c ]
#metadata = dict(c)
#for row in c:
c.close()
return result
def orderby(self, key, count=None, reverse=False):
"""
id = ID of the resource to get
return a stream and an iterator over the metadata dict
"""
direction = 'DESC' if reverse else 'ASC'
c = self._conn.cursor()
#c.execute('select content from resources where id=?', (id,))
#data = c.next()[0]
if count:
c.execute('select id, value from metadata where key=? order by value %s limit ?'%(direction), (key, count))
else:
c.execute('select id, value from metadata where key=? order by value %s'%(direction), (key,))
result = [ r[0] for r in c ]
#metadata = dict(c)
#for row in c:
c.close()
return result
#def create_alias(self, alias, id):
# """
# alias - the alias for the resource
# id - the resource's ID
# """
# c = self._conn.cursor()
# c.execute('INSERT into alias values(?, ?)', (alias, id))
# self._conn.commit()
# #def lookup_alias(self, alias):
# #c.execute('SELECT * from alias values(?, ?)', (alias, id))
# return new_id
@staticmethod
def init_db(conn):
c = conn.cursor()
# Create table
c.execute('''create table resources
(id INTEGER PRIMARY KEY, content TEXT)''')
c.execute('''create table metadata
(id INTEGER, key TEXT, value TEXT)''')
#c.execute('''create table aliases
#(alias TEXT, id INTEGER)''')
c.close()
pass | Akara | /Akara-2.0.0a4.tar.gz/Akara-2.0.0a4/lib/resource/repository/__init__.py | __init__.py |
import sys
import requests
import html2text
import wikipedia
import dock
from rich.console import Console
from rich.markdown import Markdown
VERSION = '1.7.1'
console = Console()
# pylint settings:
# pylint: disable=E1101
# pylint: disable=E1102
@dock()
def get_request(url: str) -> str:
"""
This function receives a request from the site.
Args:
url (str): A variable that stores the URL that will open in the browser Akasia.
Returns:
site_content (str): The variable contains the content of the site in html format.
response (str): This variable stores the request from the site.
"""
try:
response = requests.get(url)
except requests.exceptions.MissingSchema:
choosing_the_right_url = input(
f"Invalid URL '{url}': No schema supplied. Perhaps you meant http://{url}? (y/n) ")
if choosing_the_right_url.lower() == 'y' or choosing_the_right_url.lower() == 'yes':
response = requests.get(f'http://{url}')
else:
sys.exit()
except requests.exceptions.ConnectionError:
print(f'Site server "{url}" not found.')
sys.exit()
site_content = str(response.content, response.encoding)
return site_content, response
@dock()
def print_site(site_content: str, response: str) -> str:
"""
This function prints the site in format markdown.
Args:
site_content (str): The variable contains the content of the site in html format.
response (str): This variable stores the request from the site.
Returns:
site (str): The variable stores the text of the site in markdown format.
"""
if len(site_content) == 0:
if response.status_code == requests.codes.ok:
site = (html2text.html2text(site_content))
if response.status_code == 404:
site = ('Error 404, Not Found!')
if response.status_code == 500:
site = ('Error 500, Internal server error!')
site = (html2text.html2text(site_content))
# If non-empty content is detected, print it.
# This is to allow customised html error messages.
site = (html2text.html2text(site_content))
return site
@dock()
def save_site_in_html(site_content: str, path: str) -> None:
"""
This function is needed to save the site in html format.
Args:
site_content (str): This variable stores the site in html format.
path (str): This variable stores path which will saved site in format html.
Returns:
None: The function returns nothing.
"""
with open(path, "w") as file:
file.write(site_content)
@dock()
def save_site_in_markdown(site_content: str, path: str) -> None:
"""
This function is needed to save the site in markdown format.
Args:
site_content (str): This variable stores the site in html format.
path (str): This variable stores path which will saved site in format markdown.
Returns:
None: The function returns nothing.
"""
with open(path, "w") as file:
file.write(html2text.html2text(site_content))
@dock()
def main() -> None:
""" This is main function, what initializing web browser Akasia. """
print('''
d8888 888 d8b
d88888 888 Y8P
d88P888 888
d88P 888 888 888 8888b. .d8888b 888 8888b.
d88P 888 888 .88P "88b 88K 888 "88b
d88P 888 888888K .d888888 "Y8888b. 888 .d888888
d8888888888 888 "88b 888 888 X88 888 888 888
d88P 888 888 888 "Y888888 88888P' 888 "Y888888\n\n\n''')
print(f'Version - {VERSION}\n'.center(58))
print('Akasia - A fork tiny python text-based web browser Asiakas.\n'.center(58))
print('Type "quit" or "q" to shut down the browser.'.center(58))
print('Type "google" or "g" to search information in Google.'.center(58))
print('Type "wikipedia" or "w" to search information in Wikipedia.'.center(58))
print('Type "save_html" or "sh" to save site in format html.'.center(58))
print('Type "save_markdown" or "smd" to save site in format markdown.'.center(58))
while True:
link = input('URL: ')
if link.lower() == 'quit' or link.lower() == 'q':
break
if link.lower() == 'google' or link.lower() == 'g':
request = input('Request: ')
link = ('https://google.com/search?q=' + request.replace(' ', '+'))
cont, req_get = get_request(link)
markdown_site = Markdown(print_site(cont, req_get))
console.print(markdown_site)
elif link.lower() == 'wikipedia' or link.lower() == 'w':
try:
request = input('Request: ')
language = input('Language on search in Wikipedia: ')
wikipedia.set_lang(language)
wiki_page = wikipedia.page(request)
type_text = input('Full text(y/n) ')
if type_text.lower() == 'y':
print(wiki_page.content)
elif type_text.lower() == 'n':
print(wikipedia.summary(request))
print('\nPage URL: ' + wiki_page.url)
except wikipedia.exceptions.PageError:
print('Request page not found')
except requests.exceptions.ConnectionError:
print('Please type language by first two letters in language name.')
elif link.lower() == 'save_html' or link.lower() == 'sh':
link = input('URL: ')
path = input('Path: ')
cont, req_get = get_request(link)
save_site_in_html(cont, path)
elif link.lower() == 'save_markdown' or link.lower() == 'smd':
link = input('URL: ')
path = input('Path: ')
cont, req_get = get_request(link)
save_site_in_markdown(cont, path)
else:
cont, req_get = get_request(link)
markdown_site = Markdown(print_site(cont, req_get))
console.print(markdown_site)
if __name__ == "__main__":
main() | Akasia | /Akasia-1.7.1-py3-none-any.whl/akasia.py | akasia.py |
#Akatosh
<p style="text-align: justify;">
<code>Akatosh</code> is a light-weighted disceret event simulation library. Unlike popular library <code>Simpy</code> which is progress-oriented and you have to write generator function for simulated events or events interaction, `Akatosh` is fully object-oriented that events are encapsulated as `InstantEvent`/`ContinousEvent` with states, priority and a life-cycle. The actual impact of events are simply regular python functions. You could create events all at once, or create event within event. In addition, `Akatosh` is async which means event that are happening at the same simulated time will be executed simultaneously for real, unless they have different priority.
</p>
<p style="text-align: justify;">
<code>Akatosh</code> also support <code>Resource</code>, provide all functionalities as it is in <code>Simpy</code> with extra utilities for telemetries collection and interaction with <code>Entity</code>. The <code>Entity</code> is unique to <code>Akatosh</code> which represents a abstract entity with a life-cycle, for example a follower. The <code>Entity</code> supports utility functions to interact with `Resource` and automatically releases all its occupied resources upon termination.
</p>
<p style="text-align: justify;">
You probably already noticed that <code>Akatosh</code> is the name of "Dragon God of Time" in elder scroll serie, therefore the singleton class <code>Mundus</code> is the core of the simulation. The <code>Mundus</code> will schedule the events, move forward time and engage async execution.
</p>
To use `Akatosh`:
```
pip install -U Akatosh
```
A basic example is showing below, for more information please look at *Examples* and *API Reference*, full documentation is available at https://ulfaric.github.io/Akatosh/.
```py
import logging
from Akatosh import event, Mundus
# create two instant event at simulation time 1.0 and 5.0
@event(at=5)
def hellow_world_again():
print(f"{Mundus.now}:\tHello World! Again!")
@event(at=1)
def hellow_world():
print(f"{Mundus.now}:\tHello World!")
# enable debug message
Mundus.set_logger(logging.DEBUG)
# run simulation for 6s
Mundus.simulate(6)
```
| Akatosh | /Akatosh-2.2.6.tar.gz/Akatosh-2.2.6/README.md | README.md |
Akela
=====
Akela - программа, которая скачивает веб-страницы и сохраняет их в
формате Markdown.
Использование
-------------
### Скачивание
`Akela uri`
- uri - URI документа для извлечения (вывод направлен в
окно терминала)
Программа создаст папку ~/.akela/library для
сохранения файлов.
### Поиск в файлах
`Akela-find place query`
- place - место (akela-zim://place), где place -
это имя файла в конфигурационном файле bm.ini
- query - искомый документ
### Синхронизация
`Akela-sync up` - загрузка резервной копии на сервер
`Akela-sync down` - скачивание резервной копии на сервер
#### Вызов справки по утилите Akela-sync:
import akela.sync
help(akela.sync)
### Примечание
Код поиска и извлечения файлов из ZIM-архивов
взят из [ОФИЦИАЛЬНЫХ ПРИМЕРОВ БИБЛИОТЕКИ libzim](https://pypi.org/project/libzim/)
и распространяется по лицензии **GNU GPLv3+**!
Новости
-------
### Akela 0.3.1
+ Дополнена документация к утилите Akela-sync
(она способна архивировать указанные в настройках
файлы, а не только ~/.akela/)
+ Akela-sync использует requests для отправки на
сервер резервных копий архива.
+ Программа Akela теперь выводит документ в терминал;
вывод можно перенаправить в файл.
### Akela 0.3
+ Добавлена обработка протоколов HTTP и HTTPS
+ Теперь программа Akela "понимает" псевдопротокол
akela-zim.
+ Добавлен код для извлечения *нетекстовых* данных
в формате base64
+ Добавлен поиск документов в zim-файлах
+ Добавлена утилита синхронизации Akela-sync
### Akela 0.2.1
+ Добавлен скрипт geturi, который позволяет извлекать
статьи из zim-файла
### Akela 0.2
+ Добавлена функция чтения ZIM-файлов
+ Добавлена поддержка URI
### Akela 0.1
Первый и пробный выпуск.
#### Описание URI:
Cтрока `akela-zim//wikipedia/A/URI` значит взять документ
/A/URI из zim-файла, указанного в опции wikipedia
секции Zim конфигурационного файла ~/.akela/bm.ini.
##### Пример:
`from akela.uri import URI`
`locate = URI("akela-zim://wikisource/A/Человеческое,_слишком_человеческое_(Ницше)")`
`locate.parse()`
`open("test", "w").write(locate.resource)`
### Akela 0.1
Первый выпуск
| Akela | /Akela-0.3.1.tar.gz/Akela-0.3.1/README.md | README.md |
from os import system
from os.path import expanduser as homedir
from configparser import ConfigParser
from sys import argv, exit
from getpass import getpass
import requests
def upload(remote, user, filename):
"""Upload filename to remote directory using requests"""
# system(f"curl -v -T {filename} --user {user} {remote}")
r = requests.put(f"{remote}/{filename}", data=open(homedir("~")+f"/{filename}", 'rb').read(), auth=(user, getpass("Пароль от сервера? ")))
if r:
print("Успешно")
exit(0)
else:
print(f"Ошибка {r.status_code}")
exit(1)
def download(remote, user, filename):
"""Download file filename from remote directory using requests"""
# system(f"curl -v --user {user} {remote}/{filename} > ~/{filename}")
r = requests.get(f"{remote}/{filename}", auth=(user, getpass("Пароль от сервера? ")))
with open(homedir("~")+f"/{filename}", 'wb') as f:
print(r.status_code)
if r:
f.write(r.content)
print("Успешно.")
exit(0)
else:
print(f"Ошибка {r.status_code}")
exit(1)
def syncup():
"""Create a backup and upload it"""
cfg = ConfigParser()
cfg.read(homedir('~')+'/.akela/bm.ini')
if 'Sync' not in cfg:
print("Нет настроек синхронизации.")
exit(2)
user = cfg['Sync']['User']
remote = cfg['Sync']['Remote']
try:
passon = cfg['Sync']['Encrypt']
except:
passon = '0'
try:
incldir = cfg['Sync']['Include']
except:
incldir = "~/.akela/*"
if passon.lower() in ['yes', 'y', '1', 'on', 'enable', 'enabled']:
p = getpass('Пароль: ')
system(f'7z a -P{p} ~/akela.7z {incldir}')
filename = "akela.7z"
else:
filename = "akela.zip"
system(f'zip -9 ~/akela.zip {incldir}')
upload(remote, user, filename)
def syncdown():
"""Download a backup"""
cfg = ConfigParser()
cfg.read(homedir('~')+'/.akela/bm.ini')
if 'Sync' not in cfg:
print("Нет настроек синхронизации.")
exit(2)
user = cfg['Sync']['User']
remote = cfg['Sync']['Remote']
try:
passon = cfg['Sync']['Encrypt']
except:
passon = '0'
if passon.lower() in ['yes', 'y', '1', 'on', 'enable', 'enabled']:
filename = "akela.7z"
else:
filename = "akela.zip"
download(remote, user, filename)
def help():
prog = argv[0]
print(f'{prog} up - загрузить данные на сервер')
print(f'{prog} down - скачать данные с сервера')
def sync():
try:
if argv[1] == 'down':
syncdown()
elif argv[1] == 'up':
syncup()
else:
help()
except IndexError:
help()
if __name__ == "__main__":
sync() | Akela | /Akela-0.3.1.tar.gz/Akela-0.3.1/akela/sync.py | sync.py |
import re, requests
from akela.config import Config
from akela.zimread import ZIMREAD
from markdownify import markdownify
from libzim.reader import Archive
from libzim.search import Query, Searcher
from libzim.suggestion import SuggestionSearcher
isimage = lambda path: path.endswith('.webp') or \
path.endswith('.png') or \
path.endswith('.gif') or \
path.endswith('.jpg') or \
path.endswith('.jpeg') or \
path.endswith('.bmp')
class URI:
""" URI handler class """
config = Config()
resource = None
Encod = "default"
head = {"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'}
def __init__(self, uri):
""" Parse URI """
self.rawuri = uri
self.uri = re.match("^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?", uri).groups()
def seturi(self, uri):
""" Parse new URI """
self.rawuri = uri
self.uri = re.match("^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?", uri).groups()
def namespace(self):
""" Get protocol """
if type(self.uri) is tuple:
# print(f"Namespace is {self.uri[1]}")
return self.uri[1]
def place(self):
""" Get place (ZIM archive or web server) """
if type(self.uri) is tuple:
# print(f"Place is {self.uri[3]}")
return self.uri[3]
def path(self):
""" Get path to resource """
if type(self.uri) is tuple:
# print(f"Path is {self.uri[4]}")
return self.uri[4]
def load(self, url):
"""Load web page and try to convert it to markdown"""
enq = self.Encod
urli = url
page = requests.get(urli, headers=self.head)
if enq.lower() not in ['default', '']:
page.encoding = enq
page = page.text
b = markdownify(page, heading_style="ATX")
self.resource = b
def parse(self):
""" Handle URI """
if isimage(self.path()):
self.getbinary()
return
if self.namespace() == "akela-zim":
fname = self.config.getzimlocation(self.place())
zim = ZIMREAD(fname)
zim.getarticle(self.path())
self.resource = zim.page
elif self.namespace() in ['http', 'https']:
self.load(self.rawuri)
def getbinary(self):
""" Get binary resource """
if self.namespace() == "akela-zim":
fname = self.config.getzimlocation(self.place())
zim = ZIMREAD(fname)
self.resource = zim.getrawblob(self.path())
def datauri(self, mime):
""" Create data: URI for base64-encoded data """
return f'data:{mime};base64,{self.resource}'
def searchzimdoc(self, search):
""" Search document in ZIM archive """
if self.namespace() == "akela-zim":
fname = self.config.getzimlocation(self.place())
zim = ZIMREAD(fname)
return zim.searchzimdoc(search) | Akela | /Akela-0.3.1.tar.gz/Akela-0.3.1/akela/uri.py | uri.py |
from selenium import webdriver #Selenium powers the autobrowser
from selenium.webdriver.common.keys import Keys #Allows sending of keyboard presses to HTML elements
import time #Allows use of the sleep function
import openpyxl #Allows Python to read Excel files
from openpyxl import load_workbook #Allows Python to load Excel workbooks.
from datetime import date #Obtains system date
#A set is used instead of a list to remove duplicates, and order is uneccesarry.
finallist = {""}
#
#Takes the name of the airline, and shorts it to the IATA code.
def condenser(airline,flightnum):
if flightnum != 'none':
#This is not a complete list of airlines, will need ammending
airline = airline.replace('American Airlines', 'AA')
airline = airline.replace('Alaska Airlines', 'AS')
airline = airline.replace('United Airlines', 'UA')
airline = airline.replace('Delta Air Lines', 'DL')
airline = airline.replace('Jetblue Airways Corporation', 'B6')
airline = airline.replace('Southwest Airlines', 'WN')
airline = airline.replace('Aer Lingus', 'EL')
airline = airline.replace('Frontier Airlines Inc.', 'F9')
airline = airline.replace('Virgin Atlantic Airways', 'VS')
airline = airline.replace('Hawaiian Airlines', 'HA')
airline = airline.replace('British Airways', 'BA')
#
#Condenses the airline and the flight number by adding them together into a string.
condensed = airline+str(flightnum)
#
#TravelTracker exports airlines and trainlines as the same category. This wil remove amtrak entries from the set
if 'Amtrak' not in condensed:
finallist.add(condensed)
#
#
#Loads the Excel doc, sets up variables.
workbook = load_workbook(filename="Flights.xlsx")
workbook.sheetnames
sheet = workbook.active
num = 1
Inbounddate = ""
Outbounddate= ""
today = date.today()
day = today.day
maximum = sheet.max_row
#
for num in range(1,maximum):
#Column 2 is the Inbound date, Column 4 is the inbound Airline, Column 1 is Inbound Flight Number.
Inbounddate = sheet.cell(row=num, column=2).value
Inboundtime = str(sheet.cell(row=num, column = 3).value)
#
#Column 6 is the Outbound Date, and Column 8 is the Outbound Airline, Column 5 is the Outbound Flight number
Outbounddate = sheet.cell(row=num, column =6).value
Outboundtime = str(sheet.cell(row=num, column =7).value)
#
#Checks to see if the "day" in today's date is in the Flight Date.
if str(day) in str(Inbounddate) and int(Inboundtime[0:2]) <=16:
condenser((sheet.cell(row=num, column = 4).value),(sheet.cell(row=num, column=1).value))
if str(day) in str(Outbounddate) and int(Outboundtime[0:2]) <=16:
condenser((sheet.cell(row=num, column = 8).value),(sheet.cell(row=num, column=5).value))
#
print(finallist)
# Start the autobrowser and then go to the login page of Flightaware
driver = webdriver.Chrome()
driver.get("https://flightaware.com/account/session")
#Targets the username and password boxes on the webpage and inputs the credentials for the account, then submits them.
username = driver.find_element_by_name('flightaware_username')
username.send_keys("")
password = driver.find_element_by_name('flightaware_password')
password.send_keys("")
password.send_keys(Keys.ENTER)
#
#Goes to the flight tracking management page, then targets the box to add flights
driver.get("https://flightaware.com/me/manage")
aircraft = driver.find_element_by_id('add_ident')
#
#Enters flight numbers into box, presses enter to submit them.
num = 0
for x in finallist:
aircraft.send_keys(x)
aircraft.send_keys(Keys.ENTER)
time.sleep(1)
num+=1
print('Completed '+ num + ' flights' ) | AkerFlight | /AkerFlight-0.1-py3-none-any.whl/Flightaware.py | Flightaware.py |
Akhet
%%%%%
:Version: 2.0, released 2012-02-12
:Docs-Updated: same
:PyPI: http://pypi.python.org/pypi/Akhet
:Docs: http://docs.pylonsproject.org/projects/akhet/dev/
:Source: https://github.com/Pylons/akhet
:Bugs: https://github.com/Pylons/akhet/issues
:Discuss: pylons-discuss_ list
:Author: `Mike Orr <mailto:[email protected]>`_
:Contributors: Michael Merickel, Marcin Lulek
Akhet is a Pyramid_ library and demo application with a Pylons-like feel.
**Main changes in version 2: (A)** The 'akhet' scaffold gone, replaced by a demo
application, which you can cut and paste from. **(B)** General Pyramid/Pylons
material has been moved out of the manual to the `Pyramid Cookbook`_, section
`Pyramid for Pylons Users`_ guide. *(The guide is not yet online as of February
2012.)* **(C)** The include for static routes has changed to "akhet.static", but
"akhet" is still allowed for backward compatibility. **(D)** A new pony module.
**(E)** The repository is now on GitHub in the Pylons Project.
The demo is distributed separately from the Akhet. Its repository URL is in the
Demo section.
Akhet runs on Python 2.5 - 2.7. Version 2 has been tested on Pyramid
1.3a6 and 1.2.4 using Pyramid 2.7.2 on Ubuntu Linux 11.10. The next Akhet
version, 2.1, will focus on Python 3 and will drop Python 2.5.
The demo application currently has the same compatibility range as Akhet
itself.
The word "akhet" is the name of the hieroglyph that is Pylons' icon: a sun
shining over two pylons. It means "horizon" or "mountain of light".
Documentation Contents
======================
.. toctree::
:maxdepth: 2
library/index
demo/index
.. toctree::
:maxdepth: 1
changes
rant_scaffold
..
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. include:: links.rst
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/index.rst | index.rst |
Appendix: Rant about scaffolds and PasteScript
----------------------------------------------
The main reason the 'akhet' scaffold is gone is that maintaining it turned out
to be a significant burden. Testing a scaffold requires several manual steps --
change a line of code, generate an app, install it, test a URL, test some other
URLs, change the application, backport the change to the scaffold, generate
another app, install and test it, -OR- make changes directly to the scaffold
and generate an app to see whether it works. If it requires custom application
code to trigger the bug, you have to re-apply the code every time you crete the
app. Beyond that, Pyramid evolves over time, so the scaffolds have to be
updated even if they were working OK. And the scaffold API is primitive and
limited; e.g., you can't inherit from a scaffold and specify just the changes
between yours and the parent.
The final barrier
was Python 3. Other packages descended from Paste have been ported to 3
(PasteDeploy, WebOb), but Paste and PasteScript haven't been. There doesn't
seem to be much point because the scaffold API needs to be overhauled anyway,
many of paster's subcommands are obsolete, and some people question the whole
concept of plugin subcommands: what exactly is its benefit over bin scripts?
Pyramid 1.3 drops the Paste and PasteScript
dependencies, and adds bin scripts for the essential utilities Pyramid needs:
'pcreate', 'pserve', 'pshell', 'proutes', 'ptweens', and 'pviews'. These were
derived from the Paste code, and the scaffold API is unchanged.
Two other factors led to the demise of the scaffold. One, users wanted to mix
and match Akhet features and non-Akhet features, and add databases to the
scaffold (e.g., MongoDB). That would lead to more questions in the scaffold, or
more scaffolds, and more testing burden (especially since I didn't use those
databases).
The other factor is, I began to doubt whether certain Akhet features are
necessarily better than their non-Akhet conterparts. For instance, Akhet 1 and
Pyramid have different ways of handling static files. Each way has its pluses
and minuses. Akhet's role is to make the Pylons way available, not to recommend
it beyond what it deserves.
So faced with the burden of maintaining the scaffold and keeping it updated, I
was about to retire Akhet completely, until I realized it could have a new life
without the scaffold. And as I work on my own applications and come up with new
pieces of advice or new convenience classes, I need a place to put them, and
Akhet 2 is an ideal place. So viva the new, scaffold-free, Akeht 2.
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/rant_scaffold.rst | rant_scaffold.rst |
Usage and features
%%%%%%%%%%%%%%%%%%
The Akhet demo application shows the Akhet library's features in action, and
contains templates and code you can copy into your own application as a
starting point. The demo is based on the former Akhet application scaffold
from Akhet 1, and what users of that scaffold have later reported doing in
their more recent applications.
The demo is distributed separately from Akhet due to its larger number of
dependencies and more frequent changes. The Akhet library focuses on stability
and backward compatibility, while the demo is free to experiment more and make
backward-incompatible changes, and is in a permanent development mode.
Installation
============
You can install the demo it from its source repository like any Pyramid
application:
.. code-block:: console
$ virtualenv --no-site-packages ~/directory/myvenv
$ source ~/directory/myvenv/bin/activate
(myvenv)$ git clone git://github.com/mikeorr/akhet_demo
(myenv)$ pip install -e .
(myenv)$ pserve development.ini
Features
========
The demo has the following features which originated in the former 'akhet'
scaffold:
* Mako templates.
* Site template to provide a common look and feel to your pages.
* Automatically recognize filenames ending in .html as Mako templates.
* Default stylesheet and browser-neutral reset stylesheet.
* Pylons-like template globals including a helpers module 'h' and a URL
generator 'url', and instructions for adding additional ones.
* Serve static files at any URL, without being limited by URL prefixes.
* Listen on localhost:5000 by defalt.
* Beaker session and cache configuration.
* Demonstration of flash messages and logging.
The demo introduces the following new features:
* Class-based views using ``@view_config``.
* A pony and a unicorn.
The demo does *not* have these features that were in the former 'akhet'
scaffold:
* A SQLAlchemy model. The Pyramid 'alchemy' scaffold and the Models chapter in
the `Pyramid for Pylons Users`_ guide are sufficient to get started.
* View handlers using 'pyramid_handlers'. Many Akhet users have gone to
class-based views using Pyramid's standard ``@view_config``, so the demo is
doing that now too.
* Subpackages for views and models. These are easy enough to create yourself if
you need them.
.. include:: ../links.rst
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/demo/usage.rst | usage.rst |
Details
%%%%%%%
development.ini
===============
The config file contains the following settings which aren't in Pyramid's
built-in scaffolds:
* mako.directories: necessary when using Mako, to set the template search
path. (Theoretically you don't need this if you specify renderers by asset
spec rather than by relative path, but I couldn't get that to work.)
* cache.\*: Beaker cache settings. These are not actually necessary because
the demo never uses a cache, but they're here for demonstration.
* session.\*: Beaker session settings. These are necessary if you use sessions
or flash messages.
Beaker supports several kinds of session
persistence: in-memory, files, memcached, database, etc. The demo's
configuration uses memory mode, which holds the sessions in memory until the application
quits. It contains commented settings for file-based sessions, which is Pylons'
default. Experienced developers seem to be choosing memcached mode nowadays.
Memory sessions disappear when the server is restarted, and work only with
multithreaded servers, not multiprocess servers. File-based sessions are
persistent, but add the complications of a directory and permissions and
maintenance. Memcached avoids all these problems, and it also scales to
multiple parallel servers, which can all share a memcached session.
If you copy the session configuration to your application, do change
"session.secret" to a random string. This is used to help ensure the integrity
of the session, to prevent people from hijacking it.
Init module and main function
=============================
The main function, in addition to the minimal Pyramid configuration, activates
Beaker sessions and caching, and sets up templates, subscribers, routes, and a
static route. The Beaker setup passes the ``settings`` dict to Beaker; that's
how your settings are read. Pyramid cofigures Mako the same way behind the
scenes, passing the settings to it. The "add_renderer" line tells Pyramid to
recognize filenames ending in ".html" as Mako templates. The subscribers
include we'll see in a minute.
Activating static routes involves an include line and a "config.add_static_route"
call.
Helpers
=======
The demo provides a Pylons-like helpers module,
*akhet_demo/lib/helpers.py*. You can put utility functions here for use in
your templates. The helper contains imports for WebHelper's HTML tag helpers,
but they're commented out. (WebHelpers is a Python package containing generic
functions for use in web applications and other applications.) I'm tempted to
actually use the tag helpers in the site template but haven't done so yet.
Most of WebHelpers works with Pyramid, including the popular
``webhelpers.html`` subpackage, ``webhelpers.text``, and ``webhelpers.number``.
You'll have to add a WebHelpers dependency to your application if you want to
use it. The only part of WebHelpers that doesn't work with Pyramid is the
``webhelpers.pylonslib`` subpackage, which depends on Pylons' special globals.
Note that ``webhelpers.paginate`` requires a slightly different configuration
with Pyramid than with Pylons, because ``pylons.url`` is not available. You'll
have to supply a URL generator, perhaps using one of the convenience classes
included in WebHelpers 1.3. Paginate's URL generator is *not* Akhet's URL
generator: it's a different kind of class specific to the paginator's needs.
Subscribers
===========
*akhet_demo/subscribers.py* is unique to the demo. It sets up a URL generator
and configures several Pylons-like globals for the template namespace. The only
thing you need in here is the includeme function, which the application's main
function invokes via the ``config.include(".subscribers")`` line.
The ``add_renderer_globals`` subscriber configures the following variables for
the template namespace:
* ``h``: the helpers module.
* ``r``: an alias for ``request``.
* ``url``: the URL generator.
It has commented code to configure "settings", "session", and "c" variables if
you want those.
For completeness, here are the system variables Pyramid 1.3 adds to the
template namespace:
* ``context``: the context.
* ``renderer_name``: the name of the renderer.
* ``renderer_info``: a ``RendererHelper`` object (defined in ``pyramid.renderers``).
* ``request``: the request.
* ``view``: the view. (A function or instance.)
As a reminder, everything here is local to the current request. The URL
generator is attached to the request object, and the renderer globals are set
just before the renderer is invoked. These variables are all discarded at the
end of the request.
Views
=====
The views module has a base class called ``Handler`` (but it's not related to
"pyramid_handlers"). The index view demonstrates logging, optionally sets a
flash message, and invokes a Mako template renderer.
The demo pushes a flash message by calling ``self.request.session.flash()``
with the message text. By default this puts the message on the "info" queue,
and it's displayed using an "info" CSS class. You can push the message onto a
different queue by specifying the queue name as a second argument. But that's
only useful if the template pops the messages from the other queue by name,
otherwise they'll never be displayed. It's customary to name the queues
according to the Python logging hierarchy: debug, info (notice), warn(ing),
error, critical. The default stylesheet defines CSS classes with distinct
styling for several of these levels.
.. include:: ../links.rst
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/demo/details.rst | details.rst |
Templates and stylesheets
=========================
The demo's templates and stylesheets are designed to function
in a variety of environments, so you can copy them to your application as a starting
point. The following files are included:
* A home page, *akhet_demo/templates/index.html*
* A site template, *akhet_demo/templates/site.html*
* A stylesheet, *akhet_demo/static/stylesheets/default.css*
* A "reset" stylesheet, *akhet_demo/static/stylesheets/reset.css*
The HTML files are Mako templates. The stylesheets are static files.
index.html
----------
This is a page template, so it contains only the unique parts of this page. The
first three lines are Mako constructs:
.. code-block:: mako
:linenos:
<%inherit file="/site.html" />
<%def name="title()">Hello, ${project}!</%def>
<%def name="ht_title()">${project}</%def>
Line 1 makes the template inherit from the site template, which will add the
site's header and footer. Lines 2 and 3 are Mako methods. They output the body
title (the <h1> at the top of the page) and the head title (the <title> tag)
respectively. Mako templates and methods are not literally Python classes and
methods -- they compile to modules and functions respectively -- but Mako
treats them in a way that's similar to classes and methods.
The "${varname}" syntax is a placeholder which will output the named variable.
Template variables can come from several sources: (1) keys in the view's return
dict, (2) template globals specified in *akhet_demo/subscribers.py*, (3) local
variables defined in the template, (4) built-in Mako variables like ``self``.
The rest of the file is a big chunk of HTML that will be plugged into the site
template. Mako implicitly puts this chunk in a method named "body", which can
be called from other templates as we'll see in a moment.
Site template
-------------
The site template contains the "complete" HTML document, with
placeholders to plug in content from the page template. The most important
placeholder here is "${self.body()}", which outputs the body of the
highest-level template in the inheritance chain.
Note the difference between calling "${body()}" and "${self.body()}". The
former calls a <%def> method defined in the same template. The latter calls the
highest-level <%def> method with that name in the inheritance chain, which may
be in a different template.
The site template also calls "self.title()" and "self.ht_title()", and defines
default implementations for these methods. The default body title outputs
nothing (resulting in an empty title); the default head title is whatever the
body title returns. So you can just define a "title" in your pages and forget about
"ht_title" if it's the same. But there are times when you'll want to make them
different:
* When the body title contains embedded HTML tags like <em>. The head title
can't contain these because it will display them literally rather than
changing the font.
* Sometimes the body title is too wordy for the head title.
* Many sites want the site's name in the head title. A general rule of thumb is
"Short Page Title &emdash; Site Name". Or if you're part of a large
organization: "Short Page Title | Site Name | Organization Name". Search
engines pay special attention to the head title, so it should contain all the
essential words that describe the page, and it should be less than sixty or
so characters so it can be displayed in a variety of contexts.
The other kind of placeholder in the site template is "${url.app}", which is
used to form static URLs like "${url.app}/stylesheets.default.css". "url" is
the URL generator, which the subscriber puts into the template namespace.
"url.app" is the application's URL prefix. This is normally empty for a
top-level application mounted at "/". But if the application is mounted at a
sub-URL like "/site1", that will be what "url.app" is set to.
Normally you'd generate URLs by route name, such as "${url('home')}" or its
full form "${url.route('home')}". But static URLs don't have a route name, and
the URL generator does not have a ``static`` method (although you can define
one in a subclass). So we're left with literal URLs relative to the application
prefix.
The template displays flash messages, which a view may have pushed into the
session before redirecting. The code for this is:
.. code-block:: mako
<div id="content">
<div id="flash-messages">
% for message in request.session.pop_flash():
<div class="info">${message}</div>
% endfor
</div>
The stylesheet displays it all pretty-like.
Reset stylesheet
----------------
This is an industry-standard reset stylesheet by Eric Meyer, which is in the
public domain. The original site is http://meyerweb.com/eric/tools/css/reset/ .
It resets all the tag styles to be consistent across browsers.
The top part of the page is Meyer's original stylesheet; the bottom contains
some overrides. Meyers does remove some attributes which have generally
been assumed to be intrinsic to the tag, such as margins around <p> and <h\*>.
His reasoning is that you should start with nothing and consciously re-add the
styles you want. Some people may find this attitude to be overkill. The reset
stylesheet is just provided as a service if you want to use it. In any case, I
have re-added some expected styles, and also set <dt> to boldface which is a
pet peeve of mine.
If you want something with more bells and whistles, some Pyramid developers
recommend `HTML5 Boilerplate`_.
It's also based on Meyer's stylesheet.
We're exploring stylesheet compilers like Less, but this version of the demo
does not include one.
.. _HTML5 Boilerplate: http://html5boilerplate.com/
Default stylesheet
------------------
This is the stylesheet referenced in the page template; it inherits the reset
stylesheet. It defines some styles the default home page needs. You'll probably
want to adjust them for your layout.
The bottom section has styles for flash messages. The ".info" stanza is used by
the demo. The ".warning" and ".error" styles are not used by
the demo but are provided as extras.
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/demo/content.rst | content.rst |
URL generator
%%%%%%%%%%%%%
A class that consolidates Pyramid's various URL-generating functions into one
concise API that's convenient for templates. It performs the same job as
``pylons.url`` in Pylons applications, but the API is different.
Pyramid has several URL-generation routines but they're scattered between
Pyramid request methods, WebOb request methods, Pyramid request attributes,
WebOb request attributes, and Pyramid functions. They're named inconsistently,
and the names are too long to put repeatedly in templates. The methods are
usually -- but not always -- paired: one method returning the URL path only
("/help"), the other returning the absolute URL ("http://example.com/help").
Pylons defaults to URL paths, while Pyramid tends to absolute URLs (because
that's what the methods with "url" in their names return). The Akhet author
prefers path URLs because the automatically adjust under reverse proxies, where
the application has the wrong notion of what its visible scheme/host/port is,
but the browser knows which scheme/host/port it requested the page on.
``URLGenerator`` unifies all these by giving short one-word names to the most
common methods, and having a switchable default between path URLs and absolute
URLs.
Usage
=====
Copy the "subscribers" module in the Akhet demo (*akhet_demo/subscribers.py*)
to your own application, and modify it if desired. Then, include it in your main
function::
# In main().
config.include(".subscribers")
The subscribers attach the URL generator to the request as
``request.url_generator``, and inject it into the template namespace as ``url``.
``URLGenerator`` was contributed by Michael Merickel and modified by Mike Orr.
API
===
.. autoclass:: akhet.urlgenerator.URLGenerator
:members: __init__, app, ctx, route, current, resource
:undoc-members:
.. method:: __call__(\*elements, \*\*kw)
Same as the ``.route`` method.
Subclassing
===========
The source code (*akhet/urlgenerator.py*) has some commented examples of things
you can do in a subclass. For instance, you can define a ``static`` method to
generate a URL to a static asset in your application, or a ``deform`` method to
serve static files from the Deform form library. The instance has ``request``
and ``context`` attributes, which you can use to calculate any URL you wish.
You can put a subclass in your application and then adjust the subscribers to
use it.
The reason the base class does not define a ``static`` method, is that we're
not sure yet what the best long-term API is. We want something concise enough
for everyday use but also supporting unusual cases, and something we can
guarantee is correct and we're comfortable supporting long-term. There's also
the issue of the static route helper vs Pyramid's static view, or multiple
Pyramid static views responding to different sub-URLs. In the
meantime, if you want a ``static`` method, you can decide on your own favorite
API and implement it.
.. include:: ../links.rst
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/library/urlgenerator.rst | urlgenerator.rst |
Pony
%%%%
``akhet.pony`` is a port of ``paste.pony`` in the Paste distribution,
originally written by Ian Bicking. Usage::
# In main().
config.include("akhet.pony")
This registers a route at URL "/pony", which displays an ASCII art pony. If
the user appends the query parameter "horn" with a non-blank value, as in
*/pony?horn=1*, it will display a unicorn instead.
The page does not show your application name or anything in your site template,
but it does include a "Home" hyperlink which returns to the application's
home page (normally "/" unless the application is mounted under a URL prefix).
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/library/pony.rst | pony.rst |
Static route
%%%%%%%%%%%%
The static route helper provides a more Pylons-like way to serve static files compared
to Pyramid's standard static view. In Pylons, the static app is an overlay on
"/", so that it looks first for a file under the static directory, and if no
file exists, it falls back to the dynamic application. This static route helper
works the same way: it works as an overlay on "/", so the route matches only if
the file exists, otherwise Pyramid falls through to the next route. The
difference is that Pylons' static app is a WSGI middleware, while the static
route helper registers an ordinary route and a view. By convention you put the static
route last in the route list, but it can go anywhere in the list.
Pyramid's standard `static view`_, in contrast, works only with a URL prefix like
"/static"; it can't serve top-level URLs like "/robots.txt" and "/favicon.ico".
If you want a separate disjunct prefix like "/w3c" (for "/w3c/p3p.xml", the
Internet standard for a machine-readable privacy policy), you'd have to
configure a separate view and static directory for that prefix. With the static route
helper you don't have to configure anything extra, just create a file
"myapp/static/w3c/p3p.xml" and you're done.
The static route helper does have some disadvantages compared to Pyramid's
static view. (1) There's no spiffy method to generate URLs to them. (2) You
can't switch to a static media server and configure the nonexistent spiffy
method to generate external URLs to it. (3) You can't override assets.
For completeness, we'll mention that if you're using Pyramid's static view,
there are a couple workarounds for serving top-level URLs or disjoint URLs.
(1) Use an ordinary route and view to serve a static file. (2) Use the
"pyramid_assetviews" package on PyPI to serve top-level files. So you can
weigh these alternatives against the static route helper. I (the static route
author) am now undecided, so I can't definitively say which way is better. The
demo app uses it mainly so that you can see it in action.
Usage
=====
::
# In main().
config.include("akhet.static")
config.add_static_route("myapp", "static")
API
===
.. function:: config.add_static_route(package, subdir, cache_max_age=3600, \*\*add_route_args)
Register a route and view to serve any URL if a corresponding file exists
under the static directory. If the file doesn't exist, the route will fail
and Pyramid will continue down the route list.
Arguments:
* ``package``: the name of the Python package containing the static files.
* ``subdir``: the subdirectory within the package that contains the files.
This should be a relative directory with "/" separators regardless of
platform.
* ``cache_max_age``: Influences the "Expires" and "Max-Age" HTTP headers in
the response. (Default is 3600 seconds = 5 minutes.)
* ``add_route_args``: Additional arguments for ``config.add_route``.
``name`` defaults to "static" but can be overridden. (Every route in your
application must have a unique name.) ``pattern`` and ``view`` may not be
specified; it will raise TypeError if they are.
The API is from Pyramid's early days, so it makes an asset spec out of
``package`` and ``subdir`` for you and doesn't allow you to supply your own. It
also searches only a single directory rather than a search path. These
limitations may be relaxed in a future version.
Changes in version 2
====================
The include module is now "akhet.static"; in version 1 it was "akhet". A
backward compatibility shim is in place.
.. include:: ../links.rst
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/docs/library/static.rst | static.rst |
Breadcrumbs
-----------
The default application does not include this but several people have asked
about it so here is my thoughts. Breadcrumbs are the navigation bar at the top
of the page on many sites.
.. code-block: mako
Home > Section > Subsection > My Page
The first three are links to ancestor pages, and the last one is just an
abbreviation for the current page. In some variations, the last one is omitted.
The bar gives users an intuitive way to see where they are in the site and to
navigate upward.
Here's one way to do this in the site template:
.. code-block:: mako
<% crumbs = self.breadcrumbs() %>
% if crumbs is not None:
<a href="${url.app}">Home</a>
% for link in self.breadcrumbs():
>
${link}
% endfor
% endif
<%def name="breadcrumbs()">
<% return [] %>
<%/def>
The breadcrumbs method has a Python "<% %>" escape which returns a Python
value. This is not the method's HTML output: the output and the return value
are different things. Mako methods don't return their HTML output, they write
it. The "output" of this method is a blank line, which we never see because we
don't call "${self.breadcrumbs()}".
The default breadcrumbs method returns no crumbs, so only the Home link is
shown. The Home link is always the same so we don't make every page define it.
As a special case, if the method returns ``None``, the entire breadcrumbs bar
is suppressed. This may be desirable on the home page or special pages.
Then each page can define its crumbs like this, omitting the first Home crumb
which is the same on every page.
| Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/unfinished/breadcrumbs.rst | breadcrumbs.rst |
from pyramid.decorator import reify
import pyramid.url as url
class URLGenerator(object):
def __init__(self, context, request, qualified=False):
"""Instantiate a URLGenerator based on the current request.
* ``request``: a Pyramid Request.
* ``context``: a Pyramid Context.
* ``qualified``: If true, return fully-qualified URLs
with the "scheme://host" prefix. If false (default), return only the
URL path if the underlying Pyramid function allows it.
"""
self.context = context
self.request = request
self.qualified = qualified
@reify
def ctx(self):
"""The URL of the default view for the current context.
I'm a "reified" attribute which means I start out as a property but
I turn into an ordinary string attribute on the first access.
This saves CPU cycles if I'm accessed often.
I am mainly used with traversal. I am different from ``.app`` when
using context factories. I always return a qualified URL regardless
of the constructor's 'qualified' argument.
"""
return url.resource_url(self.context, self.request)
@reify
def app(self):
"""The application URL or path.
I'm a "reified" attribute which means I start out as a property but
I turn into an ordinary string attribute on the first access.
This saves CPU cycles if I'm accessed often.
I return the application prefix of the URL. Append a slash to get the
home page URL, or additional path segments to get a sub-URL.
If the constructor arg 'qualified' is true, I return
``request.application_url``, otherwise I return ``request.script_name``.
"""
if self.qualified:
return self.request.application_url
else:
return self.request.script_name
def route(self, route_name, *elements, **kw):
"""Generate a route URL.
I return a URL based on a named route. Calling the URLGenerator
instance is the same as calling me.
If the constructor arg 'qualified' is true, I call
``pyramid.url.route_url``, otherwise I call ``pyramid.url.route_path``.
Arguments:
* ``route_name``: the name of a route.
* ``*elements``: additional segments to append to the URL path.
Keyword arguments are passed to the underlying function. The following
are recognized:
* ``_query``: the query parameters. May be a dict-like object with
an ``.items()`` method or a sequence of 2-tuples.
* ``_anchor``: the URL's "#ancor" fragment without the "#".
* ``_qualified``: override the constructor's "qualified" flag.
* ``_app_url``: override the "scheme://host" prefix. (This also causes
the result to be qualified if it wouldn't otherwise be.)
* Other keyword args override path variables defined in the route.
If the relevant route has a *pregenerator* defined, it may modify the
elements or keyword args.
"""
qualified = kw.get("_qualified", self.qualified)
if qualified or "_app_url" in kw:
return url.route_url(route_name, self.request, *elements, **kw)
else:
return url.route_path(route_name, self.request, *elements, **kw)
# sugar for calling url('home')
__call__ = route
def current(self, *elements, **kw):
"""Generate a URL based on the current request's route.
I call ``pyramid.url.current_route_url``. I'm the same as calling
``.route`` with the current route name. The result is always qualified
regardless of the constructor's 'qualified' argument.
"""
return url.current_route_url(self.request, *elements, **kw)
def resource(self, *elements, **kw):
"""Return a "resource URL" as used in traversal.
``*elements`` is the same as with ``.route``. Keyword args ``query``
and ``anchor`` are the same as the ``_query`` and ``_anchor`` args to
``.route``.
When called without arguments, I return the same as ``.ctx``.
"""
return url.resource_url(self.context, self.request, *elements, **kw)
## Commented because I'm unsure of the long-term API.
## If you want to use this, or a more particular one for your
## static package(s), define it in a subclass.
##
# A future version might make 'path' optional, defaulting to
# a value passed to the constructor ("myapp:static/").
#
#def static(self, path, **kw):
# return url.static_url(path, self.request, **kw)
## If you're using the Deform package you may find this useful.
#
#@reify
#def deform(self):
# return url.static_url("deform:static/", self.request) | Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/akhet/urlgenerator.py | urlgenerator.py |
import pkg_resources
from pyramid.static import static_view
def includeme(config):
"""Add static route support to the Configurator.
"""
config.add_directive('add_static_route', add_static_route)
def add_static_route(config, package, subdir, cache_max_age=3600,
**add_route_args):
"""Add a route and view to serve static files from a directory.
I create a catchall route that serves all URLs from a directory of static
files if the corresponding file exists. Subdirectories are also handled.
For example, the URL "/robots.txt" corresponds to file
"PACKAGE/SUBDIR/robots.txt", and "/images/header/logo.png"
corresponds to "PACKAGE/SUBDIR/images/header/logo.png". If the file
doesn't exist, the route won't match the URL, and Pyramid will continue to
the next route or traversal. The route name is 'static', which must not
conflict with your application's other routes.
This serves URLs from the "static" directory in package "myapp".
Arguments:
* ``config``: a ``pyramid.config.Configurator`` instance.
* ``package``: the name of the Python package containing the static files.
* ``subdir``: the subdirectory in the package that contains the files.
This should be a relative directory with '/' separators regardless of
platform.
* ``cache_max_age``: influences the ``Expires`` and ``Max-Age``
response headers returned by the view (default is 3600 seconds or five
minutes).
* ``**add_route_args``: additional arguments to ``config.add_route``.
'name' defaults to "static" but can be overridden. (Every route in your
application must have a unique name.) 'pattern' and 'view' may not be
specified and will raise TypeError if they are.
"""
for bad_arg in ["pattern", "view"]:
if bad_arg in add_route_args:
raise TypeError("keyword arg '%s' is not allowed")
name = add_route_args.pop("name", "static")
pattern = "/*subpath"
asset = "%s:%s" % (package, subdir)
view = static_view(asset, cache_max_age)
custom_preds = add_route_args.pop("custom_predicates", [])
preds = [StaticViewPredicate(package, subdir)]
preds.extend(custom_preds)
config.add_route(name, pattern, custom_predicates=preds, **add_route_args)
config.add_view(view, route_name=name)
#### Private stuff
class StaticViewPredicate(object):
def __init__(self, package, subdir):
self.package = package
self.subdir = subdir
def __call__(self, info, request):
subpath = info["match"]["subpath"]
#log.debug("subpath is %r", subpath)
if not subpath:
#log.debug("no subpath, returning false")
return False
parts = [self.subdir]
parts.extend(subpath)
resource_name = "/".join(parts)
#log.debug("package=%r, resource_name=%r", self.package, resource_name)
return pkg_resources.resource_exists(self.package, resource_name) | Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/akhet/static.py | static.py |
import base64
import zlib
from pyramid.response import Response
def includeme(config):
"""Add pony power to any Pyramid application.
Defines the "pony" route at URL "/pony".
"""
config.add_route("pony", "/pony")
config.add_view(view, route_name="pony")
PONY = """
eJyFkkFuxCAMRfdzCisbJxK2D5D2JpbMrlI3XXQZDt9PCG0ySgcWIMT79rcN0XClUJlZRB9jVmci
FmV19khjgRFl0RzrKmqzvY8lRUWFlXvCrD7UbAQR/17NUvGhypAF9og16vWtkC8DzUayS6pN3/dR
ki0OnpzKjUBFpmlC7zVFRNL1rwoq6PWXXQSnIm9WoTzlM2//ke21o5g/l1ckRhiPbkDZXsKIR7l1
36hF9uMhnRiVjI8UgYjlsIKCrXXpcA9iX5y7zMmtG0fUpW61Ssttipf6cp3WARfkMVoYFryi2a+w
o/2dhW0OXfcMTnmh53oR9egzPs+qkpY9IKxdUVRP5wHO7UDAuI6moA2N+/z4vtc2k8B+AIBimVU=
"""
UNICORN = """
eJyVVD1vhDAM3e9XeAtIxB5P6qlDx0OMXVBzSpZOHdsxP762E0JAnMgZ8Zn37OePAPC60eV1Dl5b
SS7fB6DmQNGhtegpNlPIQS8HmkYGdSqNqDF9wcMYus4TuBYGsZwIPqXfEoNir5K+R3mbzhlR4JMW
eGpikPpn9wHl2sDgEH1270guZwzKDRf3nTztMvfI5r3fJqEmNxdCyISBcWjNgjPG8Egg2hgT3mJi
KBwNvmPB1hbWJ3TwBfMlqdTzxNyDE2H8zOD5HA4KkqJGPVY/TwnxmPA82kdSJNj7zs+R0d1pB+JO
xn2DKgsdxAfFS2pfTSD0Fb6Uzv7dCQSvE5JmZQEQ90vNjBU1GPuGQpCPS8cGo+dQgjIKqxnJTXbw
ucFzPFVIJXtzk6BXKGPnYsKzvFmGx7A0j6Zqvlvk5rETXbMWTGWj0RFc8QNPYVfhJfMMniCPazWJ
lGtPZecIGJWW6oL2hpbWRZEkChe8eg5Wb7xx/MBZBFjxeZPEss+mRQ3Uhc8WQv684seSRO7i3nb4
7HlKUg8sraz47LmXyh8S0somADvoUpoHjGWl+rUkF0H+EIf/gbyyMg58BBk6L634/fkHUCodMw==
"""
TEMPLATE = """\
<!DOCTYPE html>
<html><head><title>Pony</title></head><body>
<pre>{animal}</pre>
<p><a href="{url}">{link}</a></p>
<p><a href="{home}">Home</a></p>
</body></html>
"""
def view(request):
"""A pony view.
Display a pony.
If query param 'horn' is non-empty, display a unicorn instead.
"""
req = request
home = req.script_name or "/"
url = req.path
if request.params.get("horn"):
data = UNICORN
link = "remove horn!"
url = req.path
else:
data = PONY
link = "add horn!"
url = req.path + "?horn=1"
#animal = data.decode("base64").decode("zlib")
data = base64.b64decode(data)
animal = zlib.decompress(data)
html = TEMPLATE.format(animal=animal, url=url, link=link, home=home)
return Response(html) | Akhet | /Akhet-2.0.tar.gz/Akhet-2.0/akhet/pony.py | pony.py |
import os
import os.path
import mimetypes
import time
import random
import string
import datetime
import collections
import pytz
import requests
import dateutil
import dateutil.parser
import re
import copy
import sys
import six
from decorator import decorate
import hashlib
import pdb
###
# Version check functions, including decorator and parser
###
def parse_version_string(version_string):
"""Parses a semver version string, stripping off "rc" stuff if present."""
string_parts = version_string.split(".")
version_parts = [
int(re.match("([0-9]*)", string_parts[0]).group(0)),
int(re.match("([0-9]*)", string_parts[1]).group(0)),
int(re.match("([0-9]*)", string_parts[2]).group(0))
]
return version_parts
def bigger_version(version_string_a, version_string_b):
"""Returns the bigger version of two version strings."""
major_a, minor_a, patch_a = parse_version_string(version_string_a)
major_b, minor_b, patch_b = parse_version_string(version_string_b)
if major_a > major_b:
return version_string_a
elif major_a == major_b and minor_a > minor_b:
return version_string_a
elif major_a == major_b and minor_a == minor_b and patch_a > patch_b:
return version_string_a
return version_string_b
def api_version(created_ver, last_changed_ver, return_value_ver):
"""Version check decorator. Currently only checks Bigger Than."""
def api_min_version_decorator(function):
def wrapper(function, self, *args, **kwargs):
if not self.version_check_mode == "none":
if self.version_check_mode == "created":
version = created_ver
else:
version = bigger_version(last_changed_ver, return_value_ver)
major, minor, patch = parse_version_string(version)
if major > self.akkoma_major:
raise AkkomaVersionError("Version check failed (Need version " + version + ")")
elif major == self.akkoma_major and minor > self.akkoma_minor:
print(self.akkoma_minor)
raise AkkomaVersionError("Version check failed (Need version " + version + ")")
elif major == self.akkoma_major and minor == self.akkoma_minor and patch > self.akkoma_patch:
raise AkkomaVersionError("Version check failed (Need version " + version + ", patch is " + str(self.akkoma_patch) + ")")
return function(self, *args, **kwargs)
function.__doc__ = function.__doc__ + "\n\n *Added: Akkoma v" + created_ver + ", last changed: Akkoma v" + last_changed_ver + "*"
return decorate(function, wrapper)
return api_min_version_decorator
###
# Dict helper class.
# Defined at top level so it can be pickled.
###
class AttribAccessDict(dict):
def __getattr__(self, attr):
if attr in self:
return self[attr]
else:
raise AttributeError("Attribute not found: " + str(attr))
def __setattr__(self, attr, val):
if attr in self:
raise AttributeError("Attribute-style access is read only")
super(AttribAccessDict, self).__setattr__(attr, val)
class Akkoma:
"""
Easy to use Akkoma API wrapper.
Main repository at https://git.mastodont.cat/spla/Akkoma.py
"""
__DEFAULT_BASE_URL = 'https://akkoma.mastodont.cat'
__DEFAULT_TIMEOUT = 300
__DEFAULT_SCOPES = ['read', 'write', 'follow', 'push', 'admin']
__SCOPE_SETS = {
'read': [
'read:accounts',
'read:blocks',
'read:favourites',
'read:filters',
'read:follows',
'read:lists',
'read:mutes',
'read:notifications',
'read:search',
'read:statuses',
'read:bookmarks'
],
'write': [
'write:accounts',
'write:blocks',
'write:favourites',
'write:filters',
'write:follows',
'write:lists',
'write:media',
'write:mutes',
'write:notifications',
'write:reports',
'write:statuses',
'write:bookmarks'
],
'follow': [
'read:blocks',
'read:follows',
'read:mutes',
'write:blocks',
'write:follows',
'write:mutes',
],
'admin:read': [
'admin:read:accounts',
'admin:read:reports',
],
'admin:write': [
'admin:write:accounts',
'admin:write:reports',
],
}
# Dict versions
__DICT_VERSION_APPLICATION = "2.7.2"
__DICT_VERSION_MENTION = "1.0.0"
__DICT_VERSION_MEDIA = "2.8.2"
__DICT_VERSION_ACCOUNT = "3.1.0"
__DICT_VERSION_POLL = "2.8.0"
__DICT_VERSION_STATUS = bigger_version(bigger_version(bigger_version(bigger_version(bigger_version("3.1.0",
__DICT_VERSION_MEDIA), __DICT_VERSION_ACCOUNT), __DICT_VERSION_APPLICATION), __DICT_VERSION_MENTION), __DICT_VERSION_POLL)
__DICT_VERSION_NOTIFICATION = bigger_version(bigger_version("1.0.0", __DICT_VERSION_ACCOUNT), __DICT_VERSION_STATUS)
@staticmethod
def create_app(app_name, scopes=__DEFAULT_SCOPES, redirect_uris=None, website=None, to_file=None, api_base_url=__DEFAULT_BASE_URL,
request_timeout=__DEFAULT_TIMEOUT, session=None):
"""
Create a new app with given app_name, redirect_uris and website.
Specify `api_base_url` if you want to register an app on an different instance.
Specify `website` if you want to give a website for your app.
Returns `client_id` and `client_secret`, both as strings.
"""
api_base_url = Akkoma.__protocolize(api_base_url)
request_data = {
'client_name': app_name,
#'redirect_uris': redirect_uris,
#'website': website
'scopes': " ".join(scopes)
}
try:
if redirect_uris is not None:
if isinstance(redirect_uris, (list, tuple)):
redirect_uris = "\n".join(list(redirect_uris))
request_data['redirect_uris'] = redirect_uris
else:
request_data['redirect_uris'] = 'urn:ietf:wg:oauth:2.0:oob'
if website is not None:
request_data['website'] = website
if session:
ret = session.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = ret.json()
else:
response = requests.post(api_base_url + '/api/v1/apps', data=request_data, timeout=request_timeout)
response = response.json()
except Exception as e:
raise AkkomaNetworkError("Could not complete request: %s" % e)
if to_file is not None:
with open(to_file, 'w') as secret_file:
secret_file.write(response['client_id'] + "\n")
secret_file.write(response['client_secret'] + "\n")
secret_file.write(api_base_url + "\n")
return (response['client_id'], response['client_secret'])
###
# Authentication, including constructor
###
def __init__(self, client_id=None, client_secret=None, access_token=None,
api_base_url=None, debug_requests=False,
ratelimit_method="wait", ratelimit_pacefactor=1.1,
request_timeout=__DEFAULT_TIMEOUT, akkoma_version=None,
version_check_mode = "created", session=None, feature_set="mainline"):
"""
Create a new API wrapper instance based on the given `client_secret` and `client_id`. If you
give a `client_id` and it is not a file, you must also give a secret. If you specify an
`access_token` then you don't need to specify a `client_id`. It is allowed to specify
neither - in this case, you will be restricted to only using endpoints that do not
require authentication. If a file is given as `client_id`, client ID, secret and
base url are read from that file.
You can also specify an `access_token`, directly or as a file (as written by `log_in()`_). If
a file is given, Akkoma.py also tries to load the base URL from this file, if present. A
client id and secret are not required in this case.
Akkoma.py can try to respect rate limits in several ways, controlled by `ratelimit_method`.
"throw" makes functions throw a `AkkomaRatelimitError` when the rate
limit is hit. "wait" mode will, once the limit is hit, wait and retry the request as soon
as the rate limit resets, until it succeeds. "pace" works like throw, but tries to wait in
between calls so that the limit is generally not hit (How hard it tries to not hit the rate
limit can be controlled by ratelimit_pacefactor). The default setting is "wait". Note that
even in "wait" and "pace" mode, requests can still fail due to network or other problems! Also
note that "pace" and "wait" are NOT thread safe.
Specify `api_base_url` if you wish to talk to an instance other than the flagship one. When
reading from client id or access token files as written by Akkoma.py 1.5.0 or larger,
this can be omitted.
By default, a timeout of 300 seconds is used for all requests. If you wish to change this,
pass the desired timeout (in seconds) as `request_timeout`.
For fine-tuned control over the requests object use `session` with a requests.Session.
The `akkoma_version` parameter can be used to specify the version of Akkoma that Akkoma.py will
expect to be installed on the server. The function will throw an error if an unparseable
Version is specified. If no version is specified, Akkoma.py will set `akkoma_version` to the
detected version.
The version check mode can be set to "created" (the default behaviour), "changed" or "none". If set to
"created", Akkoma.py will throw an error if the version of Akkoma it is connected to is too old
to have an endpoint. If it is set to "changed", it will throw an error if the endpoints behaviour has
changed after the version of Akkoma that is connected has been released. If it is set to "none",
version checking is disabled.
`feature_set` can be used to enable behaviour specific to non-mainline Akkoma API implementations.
Details are documented in the functions that provide such functionality. Currently supported feature
sets are `mainline`, `fedibird` and `pleroma`.
"""
self.api_base_url = None
if not api_base_url is None:
self.api_base_url = Akkoma.__protocolize(api_base_url)
self.client_id = client_id
self.client_secret = client_secret
self.access_token = access_token
self.debug_requests = debug_requests
self.ratelimit_method = ratelimit_method
self._token_expired = datetime.datetime.now()
self._refresh_token = None
self.__logged_in_id = None
self.ratelimit_limit = 300
self.ratelimit_reset = time.time()
self.ratelimit_remaining = 300
self.ratelimit_lastcall = time.time()
self.ratelimit_pacefactor = ratelimit_pacefactor
self.request_timeout = request_timeout
if session:
self.session = session
else:
self.session = requests.Session()
self.feature_set = feature_set
if not self.feature_set in ["mainline", "fedibird", "pleroma"]:
raise AkkomaIllegalArgumentError('Requested invalid feature set')
# Token loading
if self.client_id is not None:
if os.path.isfile(self.client_id):
with open(self.client_id, 'r') as secret_file:
self.client_id = secret_file.readline().rstrip()
self.client_secret = secret_file.readline().rstrip()
try_base_url = secret_file.readline().rstrip()
if (not try_base_url is None) and len(try_base_url) != 0:
try_base_url = Akkoma.__protocolize(try_base_url)
if not (self.api_base_url is None or try_base_url == self.api_base_url):
raise AkkomaIllegalArgumentError('Mismatch in base URLs between files and/or specified')
self.api_base_url = try_base_url
else:
if self.client_secret is None:
raise AkkomaIllegalArgumentError('Specified client id directly, but did not supply secret')
if self.access_token is not None and os.path.isfile(self.access_token):
with open(self.access_token, 'r') as token_file:
self.access_token = token_file.readline().rstrip()
try_base_url = token_file.readline().rstrip()
if (not try_base_url is None) and len(try_base_url) != 0:
try_base_url = Akkoma.__protocolize(try_base_url)
if not (self.api_base_url is None or try_base_url == self.api_base_url):
raise AkkomaIllegalArgumentError('Mismatch in base URLs between files and/or specified')
self.api_base_url = try_base_url
# Versioning
if akkoma_version == None:
self.retrieve_akkoma_version()
else:
try:
self.akkoma_major, self.akkoma_minor, self.akkoma_patch = parse_version_string(akkoma_version)
except:
raise AkkomaVersionError("Bad version specified")
if not version_check_mode in ["created", "changed", "none"]:
raise AkkomaIllegalArgumentError("Invalid version check method.")
self.version_check_mode = version_check_mode
# Ratelimiting parameter check
if ratelimit_method not in ["throw", "wait", "pace"]:
raise AkkomaIllegalArgumentError("Invalid ratelimit method.")
def retrieve_akkoma_version(self):
"""
Determine installed akkoma version and set major, minor and patch (not including RC info) accordingly.
Returns the version string, possibly including rc info.
"""
try:
version_str = self.__instance()["version"]
except:
# instance() was added in 1.1.0, so our best guess is 1.0.0.
version_str = "1.0.0"
self.akkoma_major, self.akkoma_minor, self.akkoma_patch = parse_version_string(version_str)
return version_str
def verify_minimum_version(self, version_str, cached=False):
"""
Update version info from server and verify that at least the specified version is present.
If you specify "cached", the version info update part is skipped.
Returns True if version requirement is satisfied, False if not.
"""
if not cached:
self.retrieve_akkoma_version()
major, minor, patch = parse_version_string(version_str)
if major > self.akkoma_major:
return False
elif major == self.akkoma_major and minor > self.akkoma_minor:
return False
elif major == self.akkoma_major and minor == self.akkoma_minor and patch > self.akkoma_patch:
return False
return True
def log_in(self, client_id=None, client_secret=None, grant_type=None, username=None, password=None, code=None, redirect_uri="urn:ietf:wg:oauth:2.0:oob", refresh_token=None, scopes=__DEFAULT_SCOPES, to_file=None):
"""
Get the access token for a user.
The username is the e-mail used to log in into akkoma.
Can persist access token to file `to_file`, to be used in the constructor.
Handles password and OAuth-based authorization.
Will throw a `AkkomaIllegalArgumentError` if the OAuth or the
username / password credentials given are incorrect, and
`AkkomaAPIError` if all of the requested scopes were not granted.
For OAuth2, obtain a code via having your user go to the url returned by
`auth_request_url()`_ and pass it as the code parameter. In this case,
make sure to also pass the same redirect_uri parameter as you used when
generating the auth request URL.
Returns the access token as a string.
"""
if username is not None and password is not None:
params = self.__generate_params(locals(), ['scopes', 'to_file', 'code', 'refresh_token'])
params['grant_type'] = 'password'
elif code is not None:
params = self.__generate_params(locals(), ['scopes', 'to_file', 'username', 'password', 'refresh_token'])
params['grant_type'] = 'authorization_code'
elif refresh_token is not None:
params = self.__generate_params(locals(), ['scopes', 'to_file', 'username', 'password', 'code'])
params['grant_type'] = 'refresh_token'
else:
raise AkkomaIllegalArgumentError('Invalid arguments given. username and password or code are required.')
params['client_id'] = self.client_id
params['client_secret'] = self.client_secret
params['username'] = username
params['password'] = password
#params['scope'] = " ".join(scopes)
try:
response = self.__api_request('POST', '/oauth/token', params, do_ratelimiting=False)
self.access_token = response['access_token']
self.__set_refresh_token(response.get('refresh_token'))
self.__set_token_expired(int(response.get('expires_in', 0)))
except Exception as e:
if username is not None or password is not None:
raise AkkomaIllegalArgumentError('Invalid user name, password, or redirect_uris: %s' % e)
elif code is not None:
raise AkkomaIllegalArgumentError('Invalid access token or redirect_uris: %s' % e)
else:
raise AkkomaIllegalArgumentError('Invalid request: %s' % e)
received_scopes = response["scope"].split(" ")
for scope_set in self.__SCOPE_SETS.keys():
if scope_set in received_scopes:
received_scopes += self.__SCOPE_SETS[scope_set]
if not set(scopes) <= set(received_scopes):
raise AkkomaAPIError(
'Granted scopes "' + " ".join(received_scopes) + '" do not contain all of the requested scopes "' + " ".join(scopes) + '".')
if to_file is not None:
with open(to_file, 'w') as token_file:
token_file.write(response['access_token'] + "\n")
token_file.write(self.api_base_url + "\n")
self.__logged_in_id = None
return response['access_token']
###
# Reading data: Notifications
###
#@api_version("1.0.0", "2.9.0", __DICT_VERSION_NOTIFICATION)
def notifications(self, id=None, account_id=None, max_id=None, min_id=None, since_id=None, limit=None, mentions_only=None):
"""
Fetch notifications (mentions, favourites, reblogs, follows) for the logged-in
user. Pass `account_id` to get only notifications originating from the given account.
Can be passed an `id` to fetch a single notification.
Returns a list of `notification dicts`_.
"""
if max_id != None:
max_id = self.__unpack_id(max_id)
if min_id != None:
min_id = self.__unpack_id(min_id)
if since_id != None:
since_id = self.__unpack_id(since_id)
if account_id != None:
account_id = self.__unpack_id(account_id)
if id is None:
params = self.__generate_params(locals(), ['id'])
return self.__api_request('GET', '/api/v1/notifications', params)
else:
id = self.__unpack_id(id)
url = '/api/v1/notifications/{0}'.format(str(id))
return self.__api_request('GET', url)
###
# Reading data: Accounts
###
@api_version("1.0.0", "1.0.0", __DICT_VERSION_ACCOUNT)
def account(self, id):
"""
Fetch account information by user `id`.
Does not require authentication for publicly visible accounts.
Returns a `user dict`_.
"""
id = self.__unpack_id(id)
url = '/api/v1/accounts/{0}'.format(str(id))
return self.__api_request('GET', url)
@api_version("1.0.0", "2.1.0", __DICT_VERSION_ACCOUNT)
def account_verify_credentials(self):
"""
Fetch logged-in user's account information.
Returns a `user dict`_ (Starting from 2.1.0, with an additional "source" field).
"""
return self.__api_request('GET', '/api/v1/accounts/verify_credentials')
@api_version("1.0.0", "2.1.0", __DICT_VERSION_ACCOUNT)
def me(self):
"""
Get this users account. Symonym for `account_verify_credentials()`, does exactly
the same thing, just exists becase `account_verify_credentials()` has a confusing
name.
"""
return self.account_verify_credentials()
###
# Internal helpers, dragons probably
###
@staticmethod
def __json_allow_dict_attrs(json_object):
"""
Makes it possible to use attribute notation to access a dicts
elements, while still allowing the dict to act as a dict.
"""
if isinstance(json_object, dict):
return AttribAccessDict(json_object)
return json_object
@staticmethod
def __json_date_parse(json_object):
"""
Parse dates in certain known json fields, if possible.
"""
known_date_fields = ["created_at", "week", "day", "expires_at", "scheduled_at", "updated_at", "last_status_at", "starts_at", "ends_at", "published_at"]
for k, v in json_object.items():
if k in known_date_fields:
if v != None:
try:
if isinstance(v, int):
json_object[k] = datetime.datetime.fromtimestamp(v, pytz.utc)
else:
json_object[k] = dateutil.parser.parse(v)
except:
raise AkkomaAPIError('Encountered invalid date.')
return json_object
@staticmethod
def __json_truefalse_parse(json_object):
"""
Parse 'True' / 'False' strings in certain known fields
"""
for key in ('follow', 'favourite', 'reblog', 'mention'):
if (key in json_object and isinstance(json_object[key], six.text_type)):
if json_object[key].lower() == 'true':
json_object[key] = True
if json_object[key].lower() == 'False':
json_object[key] = False
return json_object
@staticmethod
def __json_strnum_to_bignum(json_object):
"""
Converts json string numerals to native python bignums.
"""
for key in ('id', 'week', 'in_reply_to_id', 'in_reply_to_account_id', 'logins', 'registrations', 'statuses', 'day', 'last_read_id'):
if (key in json_object and isinstance(json_object[key], six.text_type)):
try:
json_object[key] = int(json_object[key])
except ValueError:
pass
return json_object
@staticmethod
def __json_hooks(json_object):
"""
All the json hooks. Used in request parsing.
"""
json_object = Akkoma.__json_strnum_to_bignum(json_object)
json_object = Akkoma.__json_date_parse(json_object)
json_object = Akkoma.__json_truefalse_parse(json_object)
json_object = Akkoma.__json_allow_dict_attrs(json_object)
return json_object
def __api_request(self, method, endpoint, params={}, files={}, headers={}, access_token_override=None, base_url_override=None, do_ratelimiting=True, use_json=False, parse=True):
"""
Internal API request helper.
"""
response = None
remaining_wait = 0
# "pace" mode ratelimiting: Assume constant rate of requests, sleep a little less long than it
# would take to not hit the rate limit at that request rate.
if do_ratelimiting and self.ratelimit_method == "pace":
if self.ratelimit_remaining == 0:
to_next = self.ratelimit_reset - time.time()
if to_next > 0:
# As a precaution, never sleep longer than 5 minutes
to_next = min(to_next, 5 * 60)
time.sleep(to_next)
else:
time_waited = time.time() - self.ratelimit_lastcall
time_wait = float(self.ratelimit_reset - time.time()) / float(self.ratelimit_remaining)
remaining_wait = time_wait - time_waited
if remaining_wait > 0:
to_next = remaining_wait / self.ratelimit_pacefactor
to_next = min(to_next, 5 * 60)
time.sleep(to_next)
# Generate request headers
headers = copy.deepcopy(headers)
if not self.access_token is None:
headers['Authorization'] = 'Bearer ' + self.access_token
if not access_token_override is None:
headers['Authorization'] = 'Bearer ' + access_token_override
# Determine base URL
base_url = self.api_base_url
if not base_url_override is None:
base_url = base_url_override
if self.debug_requests:
print('Akkoma: Request to endpoint "' + base_url + endpoint + '" using method "' + method + '".')
print('Parameters: ' + str(params))
print('Headers: ' + str(headers))
print('Files: ' + str(files))
# Make request
request_complete = False
while not request_complete:
request_complete = True
response_object = None
try:
kwargs = dict(headers=headers, files=files,
timeout=self.request_timeout)
if use_json == False:
if method == 'GET':
kwargs['params'] = params
else:
kwargs['data'] = params
else:
kwargs['json'] = params
# Block list with exactly three entries, matching on hashes of the instance API domain
# For more information, have a look at the docs
if hashlib.sha256(",".join(base_url.split("//")[-1].split("/")[0].split(".")[-2:]).encode("utf-8")).hexdigest() in \
[
"f3b50af8594eaa91dc440357a92691ff65dbfc9555226e9545b8e083dc10d2e1",
"b96d2de9784efb5af0af56965b8616afe5469c06e7188ad0ccaee5c7cb8a56b6",
"2dc0cbc89fad4873f665b78cc2f8b6b80fae4af9ac43c0d693edfda27275f517"
]:
raise Exception("Access denied.")
response_object = self.session.request(method, base_url + endpoint, **kwargs)
except Exception as e:
raise AkkomaNetworkError("Could not complete request: %s" % e)
if response_object is None:
raise AkkomaIllegalArgumentError("Illegal request.")
# Parse rate limiting headers
if 'X-RateLimit-Remaining' in response_object.headers and do_ratelimiting:
self.ratelimit_remaining = int(response_object.headers['X-RateLimit-Remaining'])
self.ratelimit_limit = int(response_object.headers['X-RateLimit-Limit'])
try:
ratelimit_reset_datetime = dateutil.parser.parse(response_object.headers['X-RateLimit-Reset'])
self.ratelimit_reset = self.__datetime_to_epoch(ratelimit_reset_datetime)
# Adjust server time to local clock
if 'Date' in response_object.headers:
server_time_datetime = dateutil.parser.parse(response_object.headers['Date'])
server_time = self.__datetime_to_epoch(server_time_datetime)
server_time_diff = time.time() - server_time
self.ratelimit_reset += server_time_diff
self.ratelimit_lastcall = time.time()
except Exception as e:
raise AkkomaRatelimitError("Rate limit time calculations failed: %s" % e)
# Handle response
if self.debug_requests:
print('Akkoma: Response received with code ' + str(response_object.status_code) + '.')
print('response headers: ' + str(response_object.headers))
print('Response text content: ' + str(response_object.text))
if not response_object.ok:
try:
response = response_object.json(object_hook=self.__json_hooks)
if isinstance(response, dict) and 'error' in response:
error_msg = response['error']
elif isinstance(response, str):
error_msg = response
else:
error_msg = None
except ValueError:
error_msg = None
# Handle rate limiting
if response_object.status_code == 429:
if self.ratelimit_method == 'throw' or not do_ratelimiting:
raise AkkomaRatelimitError('Hit rate limit.')
elif self.ratelimit_method in ('wait', 'pace'):
to_next = self.ratelimit_reset - time.time()
if to_next > 0:
# As a precaution, never sleep longer than 5 minutes
to_next = min(to_next, 5 * 60)
time.sleep(to_next)
request_complete = False
continue
if response_object.status_code == 404:
ex_type = AkkomaNotFoundError
if not error_msg:
error_msg = 'Endpoint not found.'
# this is for compatibility with older versions
# which raised AkkomaAPIError('Endpoint not found.')
# on any 404
elif response_object.status_code == 401:
ex_type = AkkomaUnauthorizedError
elif response_object.status_code == 500:
ex_type = AkkomaInternalServerError
elif response_object.status_code == 502:
ex_type = AkkomaBadGatewayError
elif response_object.status_code == 503:
ex_type = AkkomaServiceUnavailableError
elif response_object.status_code == 504:
ex_type = AkkomaGatewayTimeoutError
elif response_object.status_code >= 500 and \
response_object.status_code <= 511:
ex_type = AkkomaServerError
else:
ex_type = AkkomaAPIError
raise ex_type(
'Akkoma API returned error',
response_object.status_code,
response_object.reason,
error_msg)
if parse == True:
try:
response = response_object.json(object_hook=self.__json_hooks)
except:
raise AkkomaAPIError(
"Could not parse response as JSON, response code was %s, "
"bad json content was '%s'" % (response_object.status_code,
response_object.content))
else:
response = response_object.content
# Parse link headers
if isinstance(response, list) and \
'Link' in response_object.headers and \
response_object.headers['Link'] != "":
tmp_urls = requests.utils.parse_header_links(
response_object.headers['Link'].rstrip('>').replace('>,<', ',<'))
for url in tmp_urls:
if 'rel' not in url:
continue
if url['rel'] == 'next':
# Be paranoid and extract max_id specifically
next_url = url['url']
matchgroups = re.search(r"[?&]max_id=([^&]+)", next_url)
if matchgroups:
next_params = copy.deepcopy(params)
next_params['_pagination_method'] = method
next_params['_pagination_endpoint'] = endpoint
max_id = matchgroups.group(1)
if max_id.isdigit():
next_params['max_id'] = int(max_id)
else:
next_params['max_id'] = max_id
if "since_id" in next_params:
del next_params['since_id']
if "min_id" in next_params:
del next_params['min_id']
response[-1]._pagination_next = next_params
if url['rel'] == 'prev':
# Be paranoid and extract since_id or min_id specifically
prev_url = url['url']
# Old and busted (pre-2.6.0): since_id pagination
matchgroups = re.search(r"[?&]since_id=([^&]+)", prev_url)
if matchgroups:
prev_params = copy.deepcopy(params)
prev_params['_pagination_method'] = method
prev_params['_pagination_endpoint'] = endpoint
since_id = matchgroups.group(1)
if since_id.isdigit():
prev_params['since_id'] = int(since_id)
else:
prev_params['since_id'] = since_id
if "max_id" in prev_params:
del prev_params['max_id']
response[0]._pagination_prev = prev_params
# New and fantastico (post-2.6.0): min_id pagination
matchgroups = re.search(r"[?&]min_id=([^&]+)", prev_url)
if matchgroups:
prev_params = copy.deepcopy(params)
prev_params['_pagination_method'] = method
prev_params['_pagination_endpoint'] = endpoint
min_id = matchgroups.group(1)
if min_id.isdigit():
prev_params['min_id'] = int(min_id)
else:
prev_params['min_id'] = min_id
if "max_id" in prev_params:
del prev_params['max_id']
response[0]._pagination_prev = prev_params
return response
###
# Reading data: Apps
###
@api_version("2.0.0", "2.7.2", __DICT_VERSION_APPLICATION)
def app_verify_credentials(self):
"""
Fetch information about the current application.
Returns an `application dict`_.
"""
return self.__api_request('GET', '/api/v1/apps/verify_credentials')
def __generate_params(self, params, exclude=[]):
"""
Internal named-parameters-to-dict helper.
Note for developers: If called with locals() as params,
as is the usual practice in this code, the __generate_params call
(or at least the locals() call) should generally be the first thing
in your function.
"""
params = collections.OrderedDict(params)
if 'self' in params:
del params['self']
param_keys = list(params.keys())
for key in param_keys:
if isinstance(params[key], bool) and params[key] == False:
params[key] = '0'
if isinstance(params[key], bool) and params[key] == True:
params[key] = '1'
for key in param_keys:
if params[key] is None or key in exclude:
del params[key]
param_keys = list(params.keys())
for key in param_keys:
if isinstance(params[key], list):
params[key + "[]"] = params[key]
del params[key]
return params
###
# Writing data: Statuses
###
@api_version("1.0.0", "2.8.0", __DICT_VERSION_STATUS)
def status_post(self, status, in_reply_to_id=None, media_ids=None,
sensitive=False, visibility=None, spoiler_text=None,
language=None, idempotency_key=None, content_type=None,
scheduled_at=None, poll=None, quote_id=None):
"""
Post a status. Can optionally be in reply to another status and contain
media.
`media_ids` should be a list. (If it's not, the function will turn it
into one.) It can contain up to four pieces of media (uploaded via
`media_post()`_). `media_ids` can also be the `media dicts`_ returned
by `media_post()`_ - they are unpacked automatically.
The `sensitive` boolean decides whether or not media attached to the post
should be marked as sensitive, which hides it by default on the Mastodon
web front-end.
The visibility parameter is a string value and accepts any of:
'direct' - post will be visible only to mentioned users
'private' - post will be visible only to followers
'unlisted' - post will be public but not appear on the public timeline
'public' - post will be public
If not passed in, visibility defaults to match the current account's
default-privacy setting (starting with Mastodon version 1.6) or its
locked setting - private if the account is locked, public otherwise
(for Mastodon versions lower than 1.6).
The `spoiler_text` parameter is a string to be shown as a warning before
the text of the status. If no text is passed in, no warning will be
displayed.
Specify `language` to override automatic language detection. The parameter
accepts all valid ISO 639-2 language codes.
You can set `idempotency_key` to a value to uniquely identify an attempt
at posting a status. Even if you call this function more than once,
if you call it with the same `idempotency_key`, only one status will
be created.
Pass a datetime as `scheduled_at` to schedule the toot for a specific time
(the time must be at least 5 minutes into the future). If this is passed,
status_post returns a `scheduled toot dict`_ instead.
Pass `poll` to attach a poll to the status. An appropriate object can be
constructed using `make_poll()`_ . Note that as of Mastodon version
2.8.2, you can only have either media or a poll attached, not both at
the same time.
**Specific to `pleroma` feature set:**: Specify `content_type` to set
the content type of your post on Pleroma. It accepts 'text/plain' (default),
'text/markdown', 'text/html' and 'text/bbcode. This parameter is not
supported on Mastodon servers, but will be safely ignored if set.
**Specific to `fedibird` feature set:**: The `quote_id` parameter is
a non-standard extension that specifies the id of a quoted status.
Returns a `toot dict`_ with the new status.
"""
if quote_id != None:
if self.feature_set != "fedibird":
raise MastodonIllegalArgumentError('quote_id is only available with feature set fedibird')
quote_id = self.__unpack_id(quote_id)
if content_type != None:
if self.feature_set != "pleroma":
raise MastodonIllegalArgumentError('quote_id is only available with feature set pleroma')
# It would be better to read this from nodeinfo and cache, but this is easier
if not content_type in ["text/plain", "text/html", "text/markdown", "text/bbcode"]:
raise MastodonIllegalArgumentError('Invalid content type specified')
if in_reply_to_id != None:
in_reply_to_id = self.__unpack_id(in_reply_to_id)
if scheduled_at != None:
scheduled_at = self.__consistent_isoformat_utc(scheduled_at)
params_initial = locals()
# Validate poll/media exclusivity
if not poll is None:
if (not media_ids is None) and len(media_ids) != 0:
raise ValueError('Status can have media or poll attached - not both.')
# Validate visibility parameter
valid_visibilities = ['private', 'public', 'unlisted', 'direct']
if params_initial['visibility'] == None:
del params_initial['visibility']
else:
params_initial['visibility'] = params_initial['visibility'].lower()
if params_initial['visibility'] not in valid_visibilities:
raise ValueError('Invalid visibility value! Acceptable '
'values are %s' % valid_visibilities)
if params_initial['language'] == None:
del params_initial['language']
if params_initial['sensitive'] is False:
del [params_initial['sensitive']]
headers = {}
if idempotency_key != None:
headers['Idempotency-Key'] = idempotency_key
if media_ids is not None:
try:
media_ids_proper = []
if not isinstance(media_ids, (list, tuple)):
media_ids = [media_ids]
for media_id in media_ids:
if isinstance(media_id, dict):
media_ids_proper.append(media_id["id"])
else:
media_ids_proper.append(media_id)
except Exception as e:
raise MastodonIllegalArgumentError("Invalid media "
"dict: %s" % e)
params_initial["media_ids"] = media_ids_proper
if params_initial['content_type'] == None:
del params_initial['content_type']
use_json = False
if not poll is None:
use_json = True
params = self.__generate_params(params_initial, ['idempotency_key'])
return self.__api_request('POST', '/api/v1/statuses', params, headers = headers, use_json = use_json)
###
# Writing data: Notifications
###
#@api_version("1.0.0", "1.0.0", "1.0.0")
def notifications_clear(self):
"""
Clear out a users notifications
"""
self.__api_request('POST', '/api/v1/notifications/clear')
#@api_version("1.3.0", "2.9.2", "2.9.2")
def notifications_dismiss(self, id):
"""
Deletes a single notification
"""
id = self.__unpack_id(id)
url = '/api/v1/notifications/{0}/dismiss'.format(str(id))
self.__api_request('POST', url)
###
# Writing data: Media
###
@api_version("1.0.0", "2.9.1", __DICT_VERSION_MEDIA)
def media_post(self, media_file, mime_type=None, description=None, focus=None):
"""
Post an image, video or audio file. `media_file` can either be image data or
a file name. If image data is passed directly, the mime
type has to be specified manually, otherwise, it is
determined from the file name. `focus` should be a tuple
of floats between -1 and 1, giving the x and y coordinates
of the images focus point for cropping (with the origin being the images
center).
Throws a `AkkomaIllegalArgumentError` if the mime type of the
passed data or file can not be determined properly.
Returns a `media dict`_. This contains the id that can be used in
status_post to attach the media file to a toot.
"""
if mime_type is None and (isinstance(media_file, str) and os.path.isfile(media_file)):
mime_type = guess_type(media_file)
media_file = open(media_file, 'rb')
elif isinstance(media_file, str) and os.path.isfile(media_file):
media_file = open(media_file, 'rb')
if mime_type is None:
raise AkkomaIllegalArgumentError('Could not determine mime type'
' or data passed directly '
'without mime type.')
random_suffix = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
file_name = "akkomapy_upload_" + str(time.time()) + "_" + str(random_suffix) + mimetypes.guess_extension(mime_type)
if focus != None:
focus = str(focus[0]) + "," + str(focus[1])
media_file_description = (file_name, media_file, mime_type)
return self.__api_request('POST', '/api/v1/media',
files={'file': media_file_description},
params={'description': description, 'focus': focus})
def __unpack_id(self, id):
"""
Internal object-to-id converter
Checks if id is a dict that contains id and
returns the id inside, otherwise just returns
the id straight.
"""
if isinstance(id, dict) and "id" in id:
return id["id"]
else:
return id
def __set_token_expired(self, value):
"""Internal helper for oauth code"""
self._token_expired = datetime.datetime.now() + datetime.timedelta(seconds=value)
return
def __set_refresh_token(self, value):
"""Internal helper for oauth code"""
self._refresh_token = value
return
@staticmethod
def __protocolize(base_url):
"""Internal add-protocol-to-url helper"""
if not base_url.startswith("http://") and not base_url.startswith("https://"):
base_url = "https://" + base_url
# Some API endpoints can't handle extra /'s in path requests
base_url = base_url.rstrip("/")
return base_url
##
# Exceptions
##
class AkkomaError(Exception):
"""Base class for Akkoma.py exceptions"""
class AkkomaVersionError(AkkomaError):
"""Raised when a function is called that the version of Akkoma for which
Akkoma.py was instantiated does not support"""
class AkkomaIllegalArgumentError(ValueError, AkkomaError):
"""Raised when an incorrect parameter is passed to a function"""
pass
class AkkomaIOError(IOError, AkkomaError):
"""Base class for Akkoma.py I/O errors"""
class AkkomaNetworkError(AkkomaIOError):
"""Raised when network communication with the server fails"""
pass
class AkkomaReadTimeout(AkkomaNetworkError):
"""Raised when a stream times out"""
pass
class AkkomaAPIError(AkkomaError):
"""Raised when the akkoma API generates a response that cannot be handled"""
pass
class AkkomaNotFoundError(AkkomaAPIError):
"""Raised when the akkoma API returns a 404 Not Found error"""
pass
class AkkomaMalformedEventError(AkkomaError):
"""Raised when the server-sent event stream is malformed"""
pass | Akkoma.py | /Akkoma.py-0.1.6-py3-none-any.whl/akkoma/Akkoma.py | Akkoma.py |
import Decision_Tree.file.loader as loader
import time as t
import random as r
class Settings:
class console_output:
Train_Data_loadtime = True
class Data:
training_data = []
def load_from_file(path):
past = t.time()
Data.training_data = loader.load_td_file(path)
if Settings.console_output.Train_Data_loadtime:
print(f'training data loaded in {round((t.time()-past)*1000)}ms')
def load_example(code):
if code == 1:
Data.training_data = [
['Sunny', 'High', 'Weak', False],
['Sunny', 'High', 'Strong', False],
['Overcast', 'High', 'Weak', True],
['Rain', 'High', 'Weak', True],
['Rain', 'Normal', 'Weak', True],
['Rain', 'Normal', 'Strong', False],
['Overcast', 'Normal', 'Strong', True],
['Sunny', 'High', 'Weak', False],
['Sunny', 'Normal', 'Weak', True],
['Rain', 'Normal', 'Weak', True],
['Sunny', 'Normal', 'Strong', True],
['Overcast', 'High', 'Strong', True],
['Overcast', 'Normal', 'Weak', True],
['Rain', 'High', 'Strong', False]
]
class Tree:
def predict(data):
if not Data.training_data == []:
index = 0
pure_found = False
list_of_all = Data.training_data
while not pure_found:
counter = 0
new_list = []
for each in list_of_all:
if each[index] == data[index]:
new_list.append(each)
counter += -1*(-1*int(each[len(each)-1]))
if not new_list == []:
list_of_all = new_list
pure_found = (counter == len(list_of_all))
if counter == 0:
break
index += 1
else:
return bool(r.randint(0,1))
return pure_found
else:
print('No training data loaded.') | Akram-Hssaini | /Akram%20Hssaini-0.0.1.tar.gz/Akram Hssaini-0.0.1/__init__.py | __init__.py |
VowelMap = {
"halfu": {
"Malayalam": "ഉ\u0D4D",
"Devanagari": "ॶ",
"Sharada": "𑆃𑇋𑆶",
"Tamil": "உ\u0BCD",
"IAST": "ŭ",
"ISO": "ŭ",
"IPA": "ʉ",
"OthersRev": [4, "ʼ"],
"OthersNonRev": [4],
},
"halfuu": {
"Malayalam": "ഊ\u0D4D",
"Devanagari": "ॷ",
"Sharada": "𑆃𑇋𑆷",
"Tamil": "ஊ\u0BCD",
"IAST": "ŭ\u0304",
"ISO": "ŭ\u0304",
"IPA": "ʉː",
"OthersRev": [5, "ʼ"],
"OthersNonRev": [5],
},
"oe": {
"Devanagari": "ॳ",
"Sharada": "𑆃𑇋",
"IAST": "ö",
"ISO": "ö",
"IPA": "ʉː",
"OthersRev": [0, "ʼ"],
"OthersNonRev": [0],
},
"oee": {
"Devanagari": "ॴ",
"Sharada": "𑆃𑇋𑆳",
"IAST": "ȫ",
"ISO": "ȫ",
"IPA": "ʉː",
"OthersRev": [0, "ʼʼ"],
"OthersNonRev": [0],
},
}
VowelSignMap = {
"halfu": {
"Malayalam": "\u0D41\u0D4D",
"Devanagari": "\u0956",
"Sharada": "\U000111CB\U000111B6",
"Tamil": "\u0BC1\u0BCD",
"IAST": "ŭ",
"ISO": "ŭ",
"IPA": "ʉː",
"OthersRev": [3, "ʼ"],
"OthersNonRev": [3],
},
"halfuu": {
"Malayalam": "\u0D42\u0D4D",
"Devanagari": "\u0957",
"Sharada": "\U000111CB\U000111B7",
"Tamil": "\u0BC2\u0BCD",
"IAST": "ŭ\u0304",
"ISO": "ŭ\u0304",
"IPA": "ʉ",
"OthersRev": [4, "ʼ"],
"OthersNonRev": [4],
},
"oe": {
"Devanagari": "\u093A",
"Sharada": "\U000111CB",
"IAST": "ö",
"ISO": "ö",
"IPA": "ʉː",
"OthersRev": [-1, "ʼ\u02BD"],
"OthersNonRev": [-1],
},
"oee": {
"Devanagari": "\u093B",
"Sharada": "\U000111CB\U000111B3",
"IAST": "ȫ",
"ISO": "ȫ",
"IPA": "ʉː",
"OthersRev": [-1, "ʼʼ"],
"OthersNonRev": [-1],
},
}
ConsonantMap = {}
SignMap = {}
AyogavahaMap = {
"Jihvamuliya": {
"IAST": "ẖ",
"ISO": "ẖ",
"IASTPali": "ẖ",
"Titus": "ẖ",
"HK": "X",
"Itrans": "X",
"Devanagari": "\u1CF5",
"Bengali": "\u1CF5",
"Assamese": "\u1CF5",
"Kannada": "\u0CF1",
"Tibetan": "\u0F88",
"Brahmi": "\U00011003",
"Sharada": "\U000111C2",
"Newa": "\U00011460",
"Soyombo": "\U00011A84",
"OthersRev": [2, "\u02BD"],
"OthersNonRev": [2],
},
"Upadhmaniya": {
"IAST": "ḫ",
"ISO": "ḫ",
"IASTPali": "ḫ",
"Titus": "ḫ",
"HK": "F",
"Itrans": "F",
"Devanagari": "\u1CF6",
"Bengali": "\u1CF6",
"Assamese": "\u1CF6",
"Kannada": "\u0CF2",
"Tibetan": "\u0F89",
"Brahmi": "\U00011004",
"Sharada": "\U000111C3",
"Newa": "\U00011461",
"Soyombo": "\U00011A85",
"OthersRev": [2, "\u02BD"],
"OthersNonRev": [2],
},
} | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/data.py | data.py |
import convert as cn
import xml.etree.ElementTree as ET
import Map as GM
import re
def GetSyllables(Strng, Script):
if Script not in GM.IndicScripts:
ListV = "|".join(
sorted(GM.CrunchSymbols(GM.Vowels, Script), key=len, reverse=True)
)
ListC = "|".join(
sorted(GM.CrunchSymbols(GM.Consonants, Script), key=len, reverse=True)
)
ListA = "|".join(
sorted(GM.CrunchSymbols(GM.CombiningSigns, Script), key=len, reverse=True)
)
Sylbl = re.compile("((" + ListC + ")*" + "((" + ListV + ")(" + ListA + ")?)?)")
ListSyl = [Mtch[0] for Mtch in Sylbl.findall(Strng) if Mtch[0] != ""]
return ListSyl
def GetSidRanCh(Char, Script, reverse=False):
if Script is "Siddham":
tree = ET.parse(
"C:\Personal Projects\\aksharamukha\ScriptMap\EastIndic\siddham.xml"
)
tbl = "tbl_siddham"
defChar = "袎"
elif Script is "Ranjana":
tree = ET.parse(
"C:\Personal Projects\\aksharamukha\ScriptMap\EastIndic\\ranjana.xml"
)
tbl = "tbl_lantsa"
defChar = "跬"
root = tree.getroot()
fnd = False
for mapng in root.findall(tbl):
for ch in mapng:
if ch.text == Char:
fndmap = mapng
fnd = True
if fnd:
break
if fnd:
if not reverse:
return fndmap.find("chars").text
else:
return fndmap.find("ime").text
else:
return defChar
def SiddhRanjConv(Strng, Script, reverse=False):
if not reverse:
Sylbl = sorted(GetSyllables(Strng, "HK"), key=len, reverse=True)
SylblCon = [GetSidRanCh(x, Script) for x in Sylbl if x != ""]
for x, y in zip(Sylbl, SylblCon):
Strng = Strng.replace(x, y)
Strng = Strng.replace("_", "")
else:
CharList = list(set(Strng.replace(" ", "")))
if Script == "Siddham":
Strng = Strng.replace("袎", "(.)")
else:
Strng = Strng.replace("跬", "(.)")
for char in CharList:
Strng = Strng.replace(char, GetSidRanCh(char, Script, reverse=True))
return Strng
if __name__ == "__main__":
print(cn.convertScript("namo", "HK", "Siddham"))
print(cn.convertScript("巧伕", "Siddham", "Telugu"))
print(cn.convertScript("巧伕", "Siddham", "Ranjana"))
print(cn.convertScript("呣呺", "Ranjana", "Kannada")) | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/Ranjana.py | Ranjana.py |
import Map as GM, fixer as CF, pre_processing as PrP
import post_processing as PP
import East.SiddhamRanjana as SR
import data as FB
import string
import re
from functools import cmp_to_key
import json
import trans
def TamilSort(x, y):
if "\u0B83" in x[0] and len(x[0]) != 1:
return -1
elif x[0] < y[0]:
return 1
else:
return 0
def lenSort(x, y):
if len(x[0]) > len(y[0]):
return -1
else:
return 0
def convertInter(Strng, Source):
ScriptAll = (
GM.Vowels
+ GM.Consonants
+ GM.CombiningSigns
+ GM.Numerals
+ GM.Signs
+ GM.Aytham
)
SourceScript = GM.CrunchSymbols(ScriptAll, Source)
TargetScript = GM.CrunchSymbols(ScriptAll, GM.Inter)
ScriptMapAll = sorted(zip(SourceScript, TargetScript), key=cmp_to_key(lenSort))
for x, y in ScriptMapAll:
Strng = Strng.replace(x, y)
return Strng
def convertScript(Strng, Source, Target):
charPairs = []
Schwa = "\uF000"
DepV = "\u1E7F"
if Source in GM.LatinScripts and Target in GM.IndicScripts:
try:
Strng = getattr(CF, "Fix" + Source)(Strng, reverse=True)
except AttributeError:
pass
if Source in ["IAST", "ISO", "ISOPali", "Titus"]:
Strng = Strng.replace("ŭ", "u\u00D7")
Strng = Strng.replace("{}", "\u200C")
Strng = Strng.replace("()", "\u200D")
Strng = CF.VedicSvarasLatinIndic(Strng, Source)
punc = (
"("
+ "|".join(
["\u005C" + x for x in list(string.punctuation)]
+ ["\s"]
+ [
x.replace(".", "\.")
for x in GM.CrunchSymbols(GM.Signs, Source)[1:3]
]
)
+ ")"
)
sOm, tOm = GM.CrunchList("OmMap", Source)[0], GM.CrunchList("OmMap", Target)[0]
Strng = re.sub(punc + sOm + punc, r"\1" + tOm + r"\2", Strng)
Strng = re.sub("^" + sOm + punc, tOm + r"\1", Strng)
Strng = re.sub(punc + sOm + "$", r"\1" + tOm, Strng)
Strng = re.sub("^" + sOm + "$", tOm, Strng)
punc = "(\s)"
Strng = re.sub(punc + sOm + punc, r"\1" + tOm + r"\2", Strng)
Strng = re.sub("^" + sOm + punc, tOm + r"\1", Strng)
Strng = re.sub(punc + sOm + "$", r"\1" + tOm, Strng)
Strng = re.sub("^" + sOm + "$", tOm, Strng)
SourceOld = Source
Strng = convertInter(Strng, Source)
Source = GM.Inter
Strng = PrP.RomanPreFix(Strng, Source)
Strng = Strng.replace("ṿ×_", "ṿ")
Strng = Strng.replace("ṿ×_", "ṿ")
ha = GM.CrunchSymbols(GM.Consonants, Source)[32]
charPairs = []
for charList in GM.ScriptAll:
TargetScript = GM.CrunchSymbols(GM.retCharList(charList), Target)
if charList == "VowelSigns":
SourceScript = [
DepV + x for x in GM.CrunchSymbols(GM.VowelSigns, Source)
]
else:
SourceScript = GM.CrunchSymbols(GM.retCharList(charList), Source)
ScriptMap = list(zip(SourceScript, TargetScript))
ScriptMap.sort(reverse=True)
charPairs = charPairs + ScriptMap
charPairs = sorted(charPairs, key=cmp_to_key(lenSort))
for x, y in charPairs:
Strng = Strng.replace(x, y)
Strng = Strng.replace(
"_" + GM.CrunchSymbols(GM.Vowels, Target)[2],
GM.CrunchSymbols(GM.Vowels, Target)[2],
)
Strng = Strng.replace(
"_" + GM.CrunchSymbols(GM.Vowels, Target)[4],
GM.CrunchSymbols(GM.Vowels, Target)[4],
)
vir = GM.CrunchList("ViramaMap", Target)[0]
Strng = Strng.replace(vir + "[]", "\u200D" + vir)
if Source in ["Inter"]:
Strng = Strng.replace("\u00D7", vir)
Strng = CF.FixIndicOutput(Strng, Source, Target)
elif Source in GM.LatinScripts and Target in GM.LatinScripts:
try:
Strng = getattr(CF, "Fix" + Source)(Strng, reverse=True)
except AttributeError:
pass
ScriptAll = (
GM.Vowels
+ GM.Consonants
+ GM.CombiningSigns
+ GM.Numerals
+ GM.Signs
+ GM.Aytham
)
Strng = convertInter(Strng, Source)
SourceScript = GM.CrunchSymbols(ScriptAll, GM.Inter)
TargetScript = GM.CrunchSymbols(ScriptAll, Target)
ScriptMapAll = list(zip(SourceScript, TargetScript))
for x, y in ScriptMapAll:
Strng = Strng.replace(x, y)
Strng = CF.PostFixRomanOutput(Strng, Source, Target)
elif Source in GM.IndicScripts and Target in GM.IndicScripts:
Strng = PrP.RemoveJoiners(Strng)
Strng = CF.ShiftDiacritics(Strng, Source, reverse=True)
try:
Strng = getattr(CF, "Fix" + Source)(Strng, reverse=True)
except AttributeError:
pass
punc = (
"("
+ "|".join(
["\u005C" + x for x in list(string.punctuation)]
+ ["\s"]
+ [
x.replace(".", "\.")
for x in GM.CrunchSymbols(GM.Signs, Source)[1:3]
]
)
+ ")"
)
sOm, tOm = GM.CrunchList("OmMap", Source)[0], GM.CrunchList("OmMap", Target)[0]
if len(sOm) != 1:
Strng = re.sub(punc + sOm + punc, r"\1" + tOm + r"\2", Strng)
Strng = re.sub("^" + sOm + punc, tOm + r"\1", Strng)
Strng = re.sub(punc + sOm + "$", r"\1" + tOm, Strng)
Strng = re.sub("^" + sOm + "$", tOm, Strng)
if len(sOm) == 1:
Strng = Strng.replace(sOm, tOm)
for charList in GM.ScriptAll:
SourceScript = GM.CrunchSymbols(GM.retCharList(charList), Source)
TargetScript = GM.CrunchSymbols(GM.retCharList(charList), Target)
ScriptMap = list(zip(SourceScript, TargetScript))
ScriptMap.sort(reverse=True)
charPairs = charPairs + ScriptMap
charPairs = sorted(charPairs, key=cmp_to_key(lenSort))
for x, y in charPairs:
Strng = Strng.replace(x, y)
Strng = CF.FixIndicOutput(Strng, Source, Target)
elif Source in GM.IndicScripts and Target in GM.LatinScripts:
Strng = PrP.RemoveJoiners(Strng)
Strng = CF.ShiftDiacritics(Strng, Source, reverse=True)
try:
Strng = getattr(CF, "Fix" + Source)(Strng, reverse=True)
except AttributeError:
pass
sOm, tOm = GM.CrunchList("OmMap", Source)[0], GM.CrunchList("OmMap", Target)[0]
Strng = Strng.replace(sOm, tOm)
for charList in GM.ScriptAll:
SourceScript = GM.CrunchSymbols(GM.retCharList(charList), Source)
if charList == "Consonants":
TargetScript = [
x + Schwa for x in GM.CrunchSymbols(GM.Consonants, Target)
]
elif charList == "Vowels":
TargetScript = [DepV + x for x in GM.CrunchSymbols(GM.Vowels, Target)]
else:
TargetScript = GM.CrunchSymbols(GM.retCharList(charList), Target)
ScriptMap = list(zip(SourceScript, TargetScript))
ScriptMap.sort(reverse=True)
charPairs = charPairs + ScriptMap
charPairs = sorted(charPairs, key=cmp_to_key(lenSort))
if Source == "RomanSemitic":
unasp = ["k", "g", "c", "j", "t", "d", "p", "b", "ɽ", "ʈ", "ɖ", "r"]
charPairsH = [(x, y) for x, y in charPairs if "ʰ" in x]
charPairsNotH = [(x, y) for x, y in charPairs if "ʰ" not in x]
charPairs = charPairsNotH + charPairsH
for x, y in charPairs:
if x in unasp:
Strng = re.sub(x + "(?!(ʰ|\u0324))", y, Strng)
else:
Strng = Strng.replace(x, y)
else:
for x, y in charPairs:
Strng = Strng.replace(x, y)
Strng = CF.FixRomanOutput(Strng, Target)
Strng = CF.VedicSvarsIndicLatin(Strng)
Strng = CF.PostFixRomanOutput(Strng, Source, Target)
elif Source in GM.SemiticScripts and Target in GM.SemiticScripts:
try:
Strng = getattr(CF, "Fix" + Source.replace("-", "_"))(
Strng, Source, reverse=True
)
except AttributeError:
pass
tr = trans.Transliterator()
if Source == "Ugar":
Strng = Strng.replace("𐎟", " ")
Strng = tr.tr(Strng, sc=Source, to_sc=Target)
Strng = CF.FixSemiticOutput(Strng, Source, Target)
elif Source in (GM.IndicScripts + GM.LatinScripts) and Target in GM.SemiticScripts:
tr = trans.Transliterator()
Strng = convertScript(Strng, Source, "RomanSemitic")
Strng = Strng.replace("QQ", "").replace("mQ", "")
if "Arab" not in Target and "Hebr" not in Target and "Latn" not in Target:
Strng = re.sub("(.)" + "\u033D" + r"\1", r"\1", Strng)
Strng = PP.FixSemiticRoman(Strng, Target)
if "Arab" in Target or Target in ["Hebr", "Syre", "Syrj", "Syrn", "Thaa"]:
Strng = PP.insertARomanSemitic(Strng)
Strng = tr.tr(Strng, sc="Latn", to_sc=Target)
Strng = CF.FixSemiticOutput(Strng, Source, Target)
elif Source in GM.SemiticScripts and Target in (GM.IndicScripts + GM.LatinScripts):
try:
Strng = getattr(CF, "Fix" + Source.replace("-", "_"))(
Strng, Source, reverse=True
)
except AttributeError:
pass
tr = trans.Transliterator()
Strng = tr.tr(Strng, sc=Source, to_sc="Latn")
Strng = CF.FixSemiticOutput(Strng, Source, Target)
Strng = PrP.FixSemiticRoman(Strng, Source)
Strng = convertScript(Strng, "RomanSemitic", Target)
if Source == "Ugar":
Strng = Strng.replace("𐎟", " ")
Strng = PP.default(Strng)
return Strng | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/convert.py | convert.py |
import Map
from trans import Transliterator
import convert, post_options, post_processing, pre_processing
import fixer
import json
import requests
import html
import itertools
from collections import Counter
import unicodedata
import collections
import yaml
import warnings
import langcodes
from inspect import getmembers, isfunction
def removeA(a):
if a.count("a") == 1:
return a.replace("a", "")
def unique_everseen(iterable, key=None):
seen = set()
seen_add = seen.add
if key is None:
for element in itertools.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def auto_detect(text, plugin=False):
scripts = []
for uchar in text:
try:
script_name = unicodedata.name(uchar).split(" ")[0].lower()
if script_name != "old":
scripts.append(script_name)
else:
scripts.append(unicodedata.name(uchar).split(" ")[1].lower())
except ValueError:
pass
counts = Counter(scripts)
script_percent = []
for script, count in counts.items():
percent = count / len(scripts) * 100
script_percent.append((percent, script))
if not plugin:
if len(script_percent) > 0:
script = sorted(script_percent)[-1][1]
else:
script = ""
else:
if len(script_percent) > 0:
if sorted(script_percent)[-1][1] == "latin":
script = sorted(script_percent)[-2][1]
else:
script = sorted(script_percent)[-1][1]
else:
script = ""
inputScript = script[0].upper() + script[1:]
laoPali = [
"ຆ",
"ຉ",
"ຌ",
"ຎ",
"ຏ",
"ຐ",
"ຑ",
"ຒ",
"ຓ",
"ຘ",
"ຠ",
"ຨ",
"ຩ",
"ຬ",
"຺",
]
if inputScript == "Bengali":
if "ৰ" in text or "ৱ" in text:
inputScript = "Assamese"
elif inputScript == "Lao":
if any(char in text for char in laoPali):
inputScript = "LaoPali"
elif inputScript == "Batak":
inputScript = "BatakKaro"
elif inputScript == "Myanmar":
inputScript = "Burmese"
mon = ["ၚ", "ၛ", "္ည", "ၞ", "ၟ", "ၠ", "ဳ", "ဨ"]
if any([char in text for char in mon]):
inputScript = "Mon"
countSub = {"Shan": 0, "TaiLaing": 0, "KhamtiShan": 0}
text = text.replace("ႃ", "")
for uchar in text:
try:
char = unicodedata.name(uchar).lower()
except:
pass
if "shan" in char:
countSub["Shan"] += 1
elif "tai laing" in char:
countSub["TaiLaing"] += 1
elif "khamti" in char:
countSub["KhamtiShan"] += 1
import operator
sorted_x = sorted(countSub.items(), key=operator.itemgetter(1))
if (
countSub["Shan"] > 0
or countSub["TaiLaing"] > 0
or countSub["KhamtiShan"] > 0
):
inputScript = sorted_x[-1][0]
elif inputScript == "Meetei":
inputScript = "MeeteiMayek"
elif inputScript == "Persian":
inputScript = "OldPersian"
elif inputScript == "Phags-pa":
inputScript = "PhagsPa"
elif inputScript == "Ol":
inputScript = "Santali"
elif inputScript == "Sora":
inputScript = "SoraSompeng"
elif inputScript == "Syloti":
inputScript = "SylotiNagri"
elif inputScript == "Tai":
inputScript = "TaiTham"
elif inputScript == "Warang":
inputScript = "WarangCiti"
elif inputScript == "Siddham":
preOptions = "siddhamUnicode"
elif inputScript == "Cyrillic":
inputScript = "RussianCyrillic"
elif inputScript == "Zanabazar":
inputScript = "ZanabazarSquare"
elif inputScript == "Syriac":
inputScript = "Syre"
eastern_dia = "ܲ ܵ ܝܼ ܘܼ ܸ ܹ ܘܿ".split(" ")
if any(char in text for char in eastern_dia):
inputScript = "Syrn"
western_dia = "ܰ ܺ ܶ ّ ܽ".split(" ")
if any(char in text for char in western_dia):
inputScript = "Syrj"
elif inputScript == "Arabic":
inputScript = "Arab"
persian_char = "چ گ ژ پ هٔ".split(" ")
if any(char in text for char in persian_char):
inputScript = "Arab-Fa"
urdu_char = "ڈ ٹ ڑ ھ".split(" ")
if any(char in text for char in urdu_char):
inputScript = "Urdu"
shahmukh_char = "ݨ لؕ مھ نھ یھ رھ لھ وھ".split(" ")
if any(char in text for char in shahmukh_char):
inputScript = "Shahmukhi"
elif inputScript == "Latin":
diacritics = [
"ā",
"ī",
"ū",
"ṃ",
"ḥ",
"ś",
"ṣ",
"ṇ",
"ṛ",
"ṝ",
"ḷ",
"ḹ",
"ḻ",
"ṉ",
"ṟ",
"ṭ",
"ḍ",
"ṅ",
"ñ",
]
Itrans = ["R^i", "R^I", "L^i", "L^I", ".N", "~N", "~n", "Ch", "sh", "Sh"]
semitic = ["ʾ", "ʿ", "š", "w"]
BurmeseLOC = ["´", "˝", "ʻ"]
if "ʰ" in text:
inputScript = "Titus"
elif any(char in text for char in semitic):
inputScript = "Latn"
elif any(char in text for char in BurmeseLOC):
inputScript = "IASTLOC"
elif any(char in text for char in diacritics):
if "ē" in text or "ō" in text or "r̥" in text:
inputScript = "ISO"
else:
inputScript = "IAST"
elif any(char in text for char in Itrans):
inputScript = "Itrans"
else:
inputScript = "HK"
elif (
inputScript in Map.IndicScripts
or inputScript in Map.LatinScripts
or inputScript in ["Hiragana", "Katakana"]
):
pass
else:
import trans
tr = trans.Transliterator()
inputScript = tr.auto_script(text)
return inputScript
def detect_preoptions(text, inputScript):
preoptions = []
if inputScript == "Thai":
textNew = text.replace("ห์", "")
if "\u035C" in text or "\u0325" in text or "งํ" in text or "\u0E47" in text:
preoptions = ["ThaiPhonetic"]
elif "์" in textNew and ("ะ" in text):
preoptions = ["ThaiSajjhayawithA"]
elif "์" in textNew:
preoptions = ["ThaiSajjhayaOrthography"]
elif "ะ" in text or "ั" in text:
preoptions = ["ThaiOrthography"]
elif inputScript == "Lao" or inputScript == "LaoPali":
textNew = text.replace("ຫ໌", "")
if "໌" in textNew and ("ະ" in text):
preoptions = ["LaoSajhayaOrthographywithA"]
elif "໌" in textNew:
preoptions = ["LaoSajhayaOrthography"]
elif "ະ" in text or "ັ" in text:
preoptions = ["LaoTranscription"]
elif inputScript == "Urdu":
preoptions = ["UrduShortNotShown"]
return preoptions
def Convert(src, tgt, txt, nativize, preoptions, postoptions):
tgtOld = ""
if tgt == "IASTLOC" and src != "Burmese":
txt = Convert(src, "Burmese", txt, nativize, preoptions, postoptions)
src = "Burmese"
if tgt == "IASTLOC" and src == "Burmese":
preoptions = preoptions + [tgt + src + "Target"]
postoptions = [tgt + src + "Target"] + postoptions
nativize = False
if src == "IASTLOC" and tgt == "Burmese":
preoptions = [src + tgt + "Source"] + preoptions
postoptions = [src + tgt + "Source"] + postoptions
nativize = False
if src == "IASTLOC" and tgt != "Burmese":
txt = Convert(src, "Burmese", txt, nativize, preoptions, postoptions)
src = "Burmese"
if tgt in Map.semiticISO.keys():
if Map.semiticISO[tgt] != src:
txt = Convert(
src, Map.semiticISO[tgt], txt, nativize, preoptions, postoptions
)
src = Map.semiticISO[tgt]
if Map.semiticISO[tgt] == src:
preoptions = [tgt + "Target"] + preoptions
postoptions = [tgt + "Target"] + postoptions
nativize = False
tgt = "Latn"
if src in Map.semiticISO.keys():
if Map.semiticISO[src] == tgt:
preoptions = [src + "Source"] + preoptions
postoptions = [src + "Source"] + postoptions
src = "Latn"
else:
txt = Convert(
src, Map.semiticISO[src], txt, nativize, preoptions, postoptions
)
src = Map.semiticISO[src]
if tgt == "" or tgt == "Ignore":
return txt
if preoptions == [] and postoptions == [] and nativize == False and src == tgt:
return txt
IndicSemiticMapping = {
"Hebrew": "Hebr",
"Thaana": "Thaa",
"Urdu": "Arab-Ur",
"Shahmukhi": "Arab-Pa",
}
if (
tgt in Map.SemiticScripts or tgt in Map.semiticISO.keys()
) and src in IndicSemiticMapping.keys():
src = IndicSemiticMapping[src]
if (
src in Map.SemiticScripts or src in Map.semiticISO.keys()
) and tgt in IndicSemiticMapping.keys():
tgt = IndicSemiticMapping[tgt]
if src in IndicSemiticMapping.keys() and tgt in IndicSemiticMapping.keys():
src = IndicSemiticMapping[src]
tgt = IndicSemiticMapping[tgt]
if not nativize and src == "Hebrew":
src = "Hebr"
if not nativize and src == "Urdu":
src = "Arab-Ur"
if not nativize and src == "Shahmukhi":
src = "Arab-Pa"
if not nativize and src == "Thaana":
src = "Thaa"
if src in ["Arab-Ur", "Arab-Pa"] and tgt in Map.IndicScripts:
txt += "\u05CD"
if nativize:
if src in Map.SemiticScripts and tgt in Map.IndicScripts:
txt += "\u05CC"
if (
src == tgt
and (src != "Hiragana" and src != "Katakana")
and src not in Map.SemiticScripts
):
tgtOld = tgt
tgt = "Devanagari"
txt = pre_processing.PreProcess(txt, src, tgt)
if "siddhammukta" in postoptions and tgt == "Siddham":
tgt = "SiddhamDevanagari"
if "siddhamap" in postoptions and tgt == "Siddham":
tgt = "SiddhamDevanagari"
if "siddhammukta" in preoptions and src == "Siddham":
src = "SiddhamDevanagari"
if "LaoNative" in postoptions and tgt == "Lao":
tgt = "Lao2"
if "egrantamil" in preoptions and src == "Grantha":
src = "GranthaGrantamil"
if "egrantamil" in postoptions and tgt == "Grantha":
tgt = "GranthaGrantamil"
if "nepaldevafont" in postoptions and tgt == "Newa":
tgt = "Devanagari"
if "ranjanalantsa" in postoptions and tgt == "Ranjana":
tgt = "Tibetan"
nativize = False
if "ranjanawartu" in postoptions and tgt == "Ranjana":
tgt = "Tibetan"
nativize = False
if "SoyomboFinals" in postoptions and tgt == "Soyombo":
txt = "\u02BE" + txt
for options in preoptions:
txt = getattr(pre_processing, options)(txt)
if "novowelshebrew" in preoptions and src == "Hebr":
txt = txt.replace("\u05B7", "")
srcOld = ""
if (
(src != "Latn" and src != "Type" and src in Map.SemiticScripts)
or (
src in Map.IndicScripts
and tgt in Map.SemiticScripts
and src not in Map.LatinScripts
)
or (src in ["Hiragana", "Katakana"])
):
txt = pre_processing.retainLatin(txt)
if src == "Hiragana" or src == "Katakana":
txt = pre_processing.JapanesePreProcess(src, txt, preoptions)
srcOld = "Japanese"
src = "ISO"
if tgt == "Hiragana" or tgt == "Katakana":
txt = post_processing.JapanesePostProcess(src, tgt, txt, nativize, postoptions)
if src == "Oriya" and tgt == "IPA":
txt = fixer.OriyaIPAFixPre(txt)
if src == "Itrans" and "##" in txt:
transliteration = ""
for i, word in enumerate(txt.split("##")):
if i % 2 == 0:
transliteration += convert.convertScript(word, src, tgt)
else:
transliteration += word
else:
transliteration = convert.convertScript(txt, src, tgt)
if (
srcOld == "Japanese"
and tgt != "Devanagari"
and "siddhammukta" not in postoptions
):
transliteration = convert.convertScript(transliteration, "Devanagari", "ISO")
if src == tgtOld:
tgt = tgtOld
transliteration = convert.convertScript(transliteration, "Devanagari", tgt)
if (
src not in Map.SemiticScripts and tgt == "Arab" and nativize
) or "arabicRemoveAdditionsPhonetic" in postoptions:
transliteration = getattr(post_processing, "arabicRemoveAdditionsPhonetic")(
transliteration
)
if nativize:
transliteration = post_options.ApplyScriptDefaults(
transliteration, src, tgt, postoptions
)
if tgt != "Latn":
if tgt != "Tamil":
transliteration = post_processing.RemoveDiacritics(transliteration)
else:
transliteration = post_processing.RemoveDiacriticsTamil(transliteration)
if "RemoveDiacritics" in postoptions:
if tgt == "Tamil":
postoptions = map(
lambda x: "RemoveDiacriticsTamil" if x == "RemoveDiacritics" else x,
postoptions,
)
for options in postoptions:
transliteration = getattr(post_processing, options)(transliteration)
if src == "Tamil" and tgt == "IPA":
r = requests.get("http://anunaadam.appspot.com/api?text=" + txt + "&method=2")
r.encoding = r.apparent_encoding
transliteration = r.text
if src == "Oriya" and tgt == "IPA":
transliteration = fixer.OriyaIPAFix(transliteration)
transliteration = pre_processing.retainLatin(transliteration, reverse=True)
transliteration = post_processing.defaultPost(transliteration)
return transliteration
def process(
src, tgt, txt, nativize=True, post_options=[], pre_options=[], param="default"
):
if param == "default":
return process_default(src, tgt, txt, nativize, post_options, pre_options)
if param == "script_code":
return process_script_tag(src, tgt, txt, nativize, post_options, pre_options)
if param == "lang_code":
return process_lang_tag(src, tgt, txt, nativize, post_options, pre_options)
if param == "lang_name":
return process_lang_name(src, tgt, txt, nativize, post_options, pre_options)
import functools
@functools.cache
def _load_data(file_path):
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(file_path, "r", encoding="utf8") as stream:
data_loaded = yaml.safe_load(stream)
return data_loaded
def convert_default(src, tgt, txt, nativize=True, post_options=[], pre_options=[]):
data_loaded = _load_data("scripts.yaml")
scriptList = Map.IndicScripts + Map.LatinScripts + Map.SemiticScripts
preOptionList = list(map(lambda x: x[0], getmembers(pre_processing, isfunction)))
preOptionListLower = list(map(lambda x: x.lower(), preOptionList))
postOptionList = list(map(lambda x: x[0], getmembers(post_processing, isfunction)))
postOptionListLower = list(map(lambda x: x.lower(), postOptionList))
post_options = [
option_id
for option in post_options
for option_id in postOptionList
if option.lower() == option_id.lower()
]
pre_options = [
option_id
for option in pre_options
for option_id in preOptionList
if option.lower() == option_id.lower()
]
font_hack_warning = (
tgt
+ " uses an hacked font to display the script. In the absence of this font, you text may appear different. \n See: https://aksharamukha.appspot.com/describe/"
+ tgt
+ " for the font used"
)
if tgt in data_loaded and "font_hack" in data_loaded[tgt]:
warnings.warn(font_hack_warning)
if src not in scriptList:
script_not_found = (
"Source script: "
+ src
+ " not found in the list of scripts supported. The text will not be transliterated."
)
warnings.warn(script_not_found)
if tgt not in scriptList:
script_not_found = (
"Target script: "
+ tgt
+ " not found in the list of scripts supported. The text will not be transliterated."
)
warnings.warn(script_not_found)
return Convert(src, tgt, txt, nativize, pre_options, post_options)
def process_default(src, tgt, txt, nativize, post_options, pre_options):
scriptList = Map.IndicScripts + Map.LatinScripts
scriptListLower = list(map(lambda x: x.lower(), scriptList))
if src == "autodetect":
src = auto_detect(txt)
pre_options = detect_preoptions(txt, src)
elif src.lower() in scriptListLower:
src = [
script_id for script_id in scriptList if src.lower() == script_id.lower()
][0]
if tgt.lower() in scriptListLower:
tgt = [
script_id for script_id in scriptList if tgt.lower() == script_id.lower()
][0]
return convert_default(src, tgt, txt, nativize, post_options, pre_options)
def process_script_tag(src_tag, tgt_tag, txt, nativize, post_options, pre_options):
data_loaded = _load_data("scripts.yaml")
data_loaded_wiki = _load_data("data.yaml")
src = []
tgt = []
if src_tag == "Syrc":
src_tag = "Syre"
warnings.warn(
"Please specify the variety of Syriac script for the source: Estrangelo (Syre), Eastern (Syrn) or Wester (Syrj). Defaulting to Syre"
)
if tgt_tag == "Syrc":
tgt_tag = "Syre"
warnings.warn(
"Please specify the variety of Syriac script for the target: Estrangelo (Syre), Eastern (Syrn) or Wester (Syrj). Defaulting to Syre"
)
for scrpt in data_loaded.keys():
scrpt_tag = data_loaded[scrpt]["script"]
if "lang" in data_loaded[scrpt].keys():
lang_tag = data_loaded[scrpt]["lang"].split(",")[0]
lang = list(map(lambda x: x.lower(), data_loaded[scrpt]["lang"].split(",")))
else:
population = 0
lang = ""
if (
scrpt_tag in data_loaded_wiki.keys()
and lang_tag in data_loaded_wiki[scrpt_tag].keys()
):
population = data_loaded_wiki[scrpt_tag][lang_tag]["population"]
else:
population = 0
if (
"-" not in src_tag
and data_loaded[scrpt]["script"].lower() == src_tag.lower()
):
src.append((population, scrpt))
if (
"-" not in tgt_tag
and data_loaded[scrpt]["script"].lower() == tgt_tag.lower()
):
tgt.append((population, scrpt))
if "-" in tgt_tag:
lang_part = tgt_tag.split("-")[0].lower()
script_part = tgt_tag.split("-")[1].lower()
if (
scrpt_tag.lower() == script_part.lower()
and "lang" in data_loaded[scrpt].keys()
and lang_part.lower() in lang
):
tgt.append((0, scrpt))
if "-" in src_tag:
lang_part = src_tag.split("-")[0].lower()
script_part = src_tag.split("-")[1].lower()
if (
scrpt_tag.lower() == script_part.lower()
and "lang" in data_loaded[scrpt].keys()
and lang_part.lower() in lang
):
src.append((0, scrpt))
tgt = [(0, "ISO")]
if "-" in src_tag and src_tag.split("-")[0].lower() in ["latn", "en", "eng"]:
src = [(0, src_tag.split("-")[1])]
if "-" in tgt_tag and tgt_tag.split("-")[0].lower() in ["latn", "en", "eng"]:
tgt = [(0, tgt_tag.split("-")[1])]
if src_tag == "autodetect":
src = [(0, auto_detect(txt))]
pre_options = detect_preoptions(txt, src)
if len(src) > 0:
src_pop = sorted(src, reverse=True)[0][1]
elif src_tag in Map.SemiticScripts:
src_pop = src_tag
else:
raise Exception("Source script code: " + src_tag + " not found")
if len(tgt) > 0:
tgt_pop = sorted(tgt, reverse=True)[0][1]
elif tgt_tag in Map.SemiticScripts:
tgt_pop = tgt_tag
else:
raise Exception("Target script code: " + tgt_tag + " not found")
return process_default(src_pop, tgt_pop, txt, nativize, post_options, pre_options)
def process_lang_tag(src_tag, tgt_tag, txt, nativize, post_options, pre_options):
data_loaded = _load_data("scripts.yaml")
data_loaded_wiki = _load_data("data.yaml")
src = []
tgt = []
for scrpt in data_loaded.keys():
if "lang" in data_loaded[scrpt].keys():
lang = list(map(lambda x: x.lower(), data_loaded[scrpt]["lang"].split(",")))
else:
lang = ""
scrpt_tag = data_loaded[scrpt]["script"]
if scrpt_tag in data_loaded_wiki.keys():
script_count = len(data_loaded_wiki[scrpt_tag])
else:
script_count = 1
if src_tag.lower() in lang:
src.append((script_count, scrpt))
if tgt_tag.lower() in lang:
tgt.append((script_count, scrpt))
if "-" in tgt_tag:
lang_part = tgt_tag.split("-")[0].lower()
script_part = tgt_tag.split("-")[1].lower()
if scrpt_tag.lower() == script_part and lang_part in lang:
tgt.append((0, scrpt))
if "-" in src_tag:
lang_part = src_tag.split("-")[0].lower()
script_part = src_tag.split("-")[1].lower()
if scrpt_tag.lower() == script_part and lang_part in lang:
src.append((0, scrpt))
tgt = [(0, "Devanagari")]
if "-" in src_tag and src_tag.split("-")[0].lower() in ["sa", "san", "pi", "pli"]:
for scrpt in data_loaded.keys():
scrpt_tag = data_loaded[scrpt]["script"]
if scrpt_tag.lower() == src_tag.split("-")[1].lower():
src = [(0, scrpt)]
if "-" in tgt_tag and tgt_tag.split("-")[0].lower() in ["sa", "san", "pi", "pli"]:
for scrpt in data_loaded.keys():
scrpt_tag = data_loaded[scrpt]["script"]
if scrpt_tag.lower() == tgt_tag.split("-")[1].lower():
tgt = [(0, scrpt)]
if "-" in src_tag and src_tag.split("-")[0].lower() in ["la", "en", "eng"]:
src = [(0, src_tag.split("-")[1])]
if "-" in tgt_tag and tgt_tag.split("-")[0].lower() in ["la", "en", "eng"]:
tgt = [(0, tgt_tag.split("-")[1])]
if src_tag == "autodetect":
src = [(0, auto_detect(txt))]
pre_options = detect_preoptions(txt, src)
if len(src) > 0:
src_pop = sorted(src, reverse=True)[0][1]
else:
raise Exception("Source language code: " + src_tag + " not found")
if len(tgt) > 0:
tgt_pop = sorted(tgt, reverse=True)[0][1]
else:
raise Exception("Target language code: " + tgt_tag + " not found")
return process_default(src_pop, tgt_pop, txt, nativize, post_options, pre_options)
def process_lang_name(src_name, tgt_name, txt, nativize, post_options, pre_options):
if src_name == "autodetect":
src = auto_detect(txt)
pre_options = detect_preoptions(txt, src)
else:
src = str(langcodes.find(src_name))
tgt = str(langcodes.find(tgt_name))
return process_lang_tag(src, tgt, txt, nativize, post_options, pre_options)
@functools.cache
def get_semitic_json():
from pathlib import Path
cwd = Path(Path(__file__).parent)
with open(Path(cwd, "data.json"), "r", encoding="utf-8") as f:
data = json.load(f)
return data | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/transliterate.py | transliterate.py |
import Map as GM, post_options
from Roman import Avestan
from Core import (
Ahom,
Tamil,
Malayalam,
Gurmukhi,
Oriya,
Saurashtra,
Sinhala,
Urdu,
Devanagari,
Chakma,
Limbu,
Takri,
TamilExtended,
)
from East import Tibetan, Thai, PhagsPa, ZanabazarSquare, Burmese, KhamtiShan
import fixer as CF
import re
import functools
def default(Strng, langage=""):
Strng = (
Strng.replace("\uF001", "")
.replace("\u05CC", "")
.replace("ʻʻ", "")
.replace("\u05CD", "")
)
return Strng
def defaultPost(Strng):
Strng = Strng.replace("\u034F", "")
return Strng
def AnusvaraAsN(Strng):
Strng = Strng.replace("m\u034F", "n")
return Strng
def ShowSchwaHindi(Strng):
import pre_processing as PreP
Strng = PreP.RemoveSchwaHindi(Strng, True)
return Strng
def KannadaSpacingCandrabindu(Strng):
Strng = Strng.replace("\u0C81", "\u0C80")
return Strng
def KannadaNotRepha(Strng):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Kannada")) + ")"
Strng = re.sub("ರ್(?=" + ListC + ")", "ರ್", Strng)
return Strng
def KannadaNakaraPollu(Strng):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Kannada")) + ")"
Strng = re.sub("ನ್(?!" + ListC + ")", "\u0CDD", Strng)
return Strng
def TeluguRemoveNukta(Strng):
Strng = Strng.replace("\u0C3C", "")
return Strng
def TeluguRemoveAeAo(Strng):
Strng = Strng.replace("\u0952\u200B", "")
return Strng
def TeluguNakaraPollu(Strng):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Telugu")) + ")"
Strng = re.sub("న్(?!" + ListC + ")", "\u0C5D", Strng)
return Strng
def syriacVowelsBelow(Strng):
return Strng
def syriacWesternOToU(Strng):
return Strng
def olddogra(Strng):
return Strng
def ISO259Target(Strng):
replacements = [
("b", "ḃ"),
("p", "ṗ"),
("k", "k̇"),
("ḵ", "k"),
("v", "b"),
("f", "p"),
("꞉", "\u0307"),
("š̪", "ś"),
("š̮", "š"),
("š", "s̀"),
("ā", "å"),
("e", "ȩ"),
("ō", "ŵ"),
("ū", "ẇ"),
("\u033D", "°"),
("ĕ", "ḝ"),
]
for x, y in replacements:
Strng = Strng.replace(x, y)
Strng = Strng.replace("\u00B0\u0307", "\u0307\u00B0")
return Strng
def HebrewSBLTarget(Strng):
replacements = [
("v", "ḇ"),
("f", "p̄"),
("d", "ḏ"),
("ḏ꞉", "d"),
("g", "ḡ"),
("ḡ꞉", "g"),
("t", "ṯ"),
("ṯ꞉", "t"),
("š̪", "ś"),
("š̮", "š"),
("ō", "ô"),
("o", "ō"),
("ū", "û"),
("\u033D", "ĕ"),
]
for x, y in replacements:
Strng = Strng.replace(x, y)
Strng = Strng.replace("ĕ꞉", "꞉ĕ")
if "\u05CE" in Strng:
Strng = (
Strng.replace("ḏ", "d")
.replace("ṯ", "t")
.replace("ḡ", "g")
.replace("\u05CE", "")
)
Strng = Strng.replace("\u00B0\u0307", "\u0307\u00B0")
return Strng
def removetddash(Strng):
Strng += "\u05CE"
Strng = Strng.replace("d", "d꞉").replace("t", "t꞉").replace("g", "g꞉")
Strng = (
Strng.replace("ḏ", "d")
.replace("ṯ", "t")
.replace("ḡ", "g")
.replace("\u05CE", "")
)
return Strng
def ISO259Source(Strng):
return Strng
def ISO233Source(Strng):
return Strng
def HebrewSBLSource(Strng):
return Strng
def PersianDMGSBLSource(Strng):
return Strng
def ISO233Target(Strng):
replacements = [
("j", "ǧ"),
("g", "ǧ"),
("ḧ", "ẗ"),
("ḫ", "ẖ"),
("a̮", "ỳ"),
("ˀ", "ˈ"),
("aⁿ", "á"),
("iⁿ", "í"),
("uⁿ", "ú"),
("ā̂", "ʾâ"),
("\u033D", ""),
]
for x, y in replacements:
Strng = Strng.replace(x, y)
return Strng
def inherentAO(Strng):
Strng = Strng.replace("a", "ô")
return Strng
def BengaliOldRA(Strng):
Strng = Strng.replace("র", "ৰ")
return Strng
def PersianDMGTarget(Strng):
replacements = [
("ḏ", "ẕ"),
("ḍ", "ż"),
("ṯ", "s̱"),
("j", "ǧ"),
("ˀ", "ʼ"),
("ʔ", "ʼ"),
("ȳ", "ye"),
("ā̂", "ā"),
("\u033D", ""),
]
for x, y in replacements:
Strng = Strng.replace(x, y)
return Strng
def arabizeLatn(Strng, target="semitic"):
cons = "(" + "|".join(GM.SemiticConsonants) + ")"
Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng)
if target == "indic":
Strng = Strng.replace("ʾā", "ā̂")
if target != "indic":
Strng = re.sub("ʾ", "a", Strng)
else:
Strng = re.sub("ʾ", "â", Strng)
Strng = re.sub("(a̮|ā̮)", "ā", Strng)
Strng = re.sub("ˀ?ā̮̂", "ʼā", Strng)
if target != "indic":
Strng = re.sub("[ˀʔ]", "ʼ", Strng)
else:
Strng = re.sub("[ˀ]", "", Strng)
if target != "indic":
Strng = LatnInitialVowels(Strng, "ʾ")
if target != "indic":
Strng = re.sub("ʼʾ", "ʼ", Strng)
if target != "indic":
Strng = re.sub("\u033d", "", Strng)
Strng = re.sub("(ā)([iau])(ⁿ)", r"\2\3", Strng)
return Strng
def BengaliSwitchYaYYa(Strng):
Strng = re.sub("(?<!\u09CD)য", "@#$", Strng)
Strng = re.sub("য়", "য", Strng)
Strng = Strng.replace("@#$", "য়")
return Strng
def AlephMaterLectionis(Strng, target="semitic"):
cons = "(" + "|".join(GM.SemiticConsonants) + ")"
Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng)
return Strng
def urduizeLatn(Strng, target="semitic"):
cons = "(" + "|".join(GM.SemiticConsonants) + ")"
Strng = re.sub(cons + "(ʾ)", r"\1" + "ā", Strng)
if target == "indic":
Strng = Strng.replace("ʾā", "ā̂")
Strng = re.sub("ʾ", "â", Strng)
Strng = re.sub("[ˀʔ]", "ʾ", Strng)
Strng = re.sub("(a̮|ā̮)", "ā", Strng)
Strng = re.sub("ˀ?ā̮̂", "ʼā", Strng)
if target != "indic":
Strng = re.sub("\u033d", "", Strng)
if target != "indic":
Strng = LatnInitialVowels(Strng)
Strng = re.sub("(ā)([iau])(ⁿ)", r"\2\3", Strng)
return Strng
def syricizeLatn(Strng, target="semitic"):
cons = "(" + "|".join(GM.SemiticConsonants) + ")"
if target != "indic":
Strng = re.sub("â", "ʾa", Strng)
Strng = re.sub("ā̂", "ʾā", Strng)
Strng = re.sub("ê", "ʾe", Strng)
Strng = re.sub("ē̂", "ʾē", Strng)
if target != "indic":
Strng = LatnInitialVowels(Strng)
return Strng
def hebraizeLatn(Strng, target="semitic"):
if target != "indic":
Strng = LatnInitialVowels(Strng, "ʾ")
return Strng
def syriacRoman(Strng):
Strng = (
Strng.replace("v", "ḇ").replace("ġ", "ḡ").replace("ḫ", "ḵ").replace("f", "p̄")
)
return Strng
def alephAyinLatnAlternate(Strng):
Strng = Strng.replace("ʾ", "ʼ").replace("ʿ", "ʽ")
return Strng
def alephAyinLatnAlternate2(Strng):
Strng = Strng.replace("ʾ", "ʔ").replace("ʿ", "ʕ")
return Strng
def ArabRemoveAdditions(Strng):
Strng = Strng.replace("ڨ", "ج").replace("ڤ", "ف").replace("پ", "ف")
return Strng
def arabicRemoveAdditionsPhonetic(Strng):
Strng = Strng.replace("ڨ", "غ").replace("ڤ", "ف").replace("پ", "ب")
return Strng
def removeSemiticLetters(Strng):
Strng = (
Strng.replace("ṭ", "t")
.replace("ḥ", "h")
.replace("ḍ", "z")
.replace("ḏ", "z")
.replace("ẓ", "z")
.replace("w", "v")
.replace("ʿ", "ʾ")
.replace("ṣ", "s")
)
return Strng
def removeNikkud(Strng):
nikkuds = [
"\u05B7",
"\u05B8",
"\u05B4",
"\u05B4י",
"\u05BB",
"\u05C2",
"\u05C1",
"\u05B6",
"\u05B5",
"\u05B9",
"וֹ",
"\u05B1",
"\u05B2",
"\u05B3",
"\u05BC",
"\u05B0",
"\u05C7",
]
for nikkud in nikkuds:
Strng = Strng.replace(nikkud, "")
return Strng
def LatnInitialVowels(Strng, initLetter=""):
initVow = "â ā̂ î ī̂ û ū̂ ê ē̂ âŷ ô ō̂ âŵ".split(" ")
nonInitVow = "a ā i ī u ū e ē aŷ o ō aŵ".split(" ")
for x, y in zip(initVow, nonInitVow):
Strng = Strng.replace(x, initLetter + y)
Strng = Strng.replace("\u0302", "")
Strng = re.sub("\u033d", "", Strng)
return Strng
def removeMajliyana(Strng):
Strng = Strng.replace("\u0330", "")
return Strng
def removeRukkaka(Strng):
Strng = Strng.replace("\u0741", "")
return Strng
def removeQussaya(Strng):
Strng = Strng.replace("\u0742", "")
return Strng
def removeVowelsSyriac(Strng):
Strng = re.sub("[\u0732\u0735\u073C\u0738\u0739\u073F]", "", Strng)
Strng = re.sub("[ّܰܶܺܽܳ]", "", Strng)
return Strng
def removeDiacriticsArabic(Strng):
diacrtics = ["\u0652", "\u064E", "\u0650", "\u064F"]
for diacritic in diacrtics:
Strng = Strng.replace(diacritic, "")
return Strng
def removeSukunEnd(Strng):
Strng = re.sub("(\u0652)(\W|$)", r"\2", Strng)
return Strng
def persianPaGaFaJa(Strng):
Strng = Strng.replace("پ", "ف").replace("گ", "ج")
return Strng
def removeDiacriticsPersian(Strng):
return Strng
def removeDiacriticsSyriac(Strng):
return Strng
def useKtivMale(Strng):
return Strng
def PhoneticMapping(Strng):
return Strng
def ArabicGimelGaGha(Strng):
Strng = Strng.replace("ج", "غ")
return Strng
def ArabicGimelPaBa(Strng):
Strng = Strng.replace("ف", "ب")
return Strng
def IASTLOCBurmeseSource(Strng):
Strng = Strng.replace("ʻ", "")
yrvh = (
Burmese.ConsonantMap[25:27]
+ Burmese.ConsonantMap[28:29]
+ Burmese.ConsonantMap[32:33]
)
yrvhPat = "".join(yrvh)
Strng = re.sub(f"(\u103A)(\u1039)([{yrvhPat}])", r"\2\3", Strng)
virsub = "\u1039"
yrvhsub = ["\u103B", "\u103C", "\u103D", "\u103E"]
for x, y in zip(yrvh, yrvhsub):
Strng = Strng.replace(virsub + x, y)
vowDep = "အော် အော အိ အီ အု အူ အေ".split(" ")
vowIndep = "ဪ ဩ ဣ ဤ ဥ ဦ ဧ".split(" ")
Strng = Strng.replace("ʼ", "’")
for x, y in zip(vowDep, vowIndep):
Strng = Strng.replace("’" + y, x)
Strng = Strng.replace("\u102Fဣ", "\u102D\u102F")
Strng = Strng.replace("’အ", "အ")
Strng = Strng.replace("့်", "့်")
return Strng
def removeSegmentSpacesBurmese(Strng):
import regex
Strng = regex.sub("(\p{L}\p{M}*) (\p{L})", r"\1\2", Strng)
Strng = regex.sub("(\p{L}\p{M}*) (\p{L})", r"\1\2", Strng)
return Strng
def IASTLOCBurmeseTarget(Strng):
Strng = Strng.replace("˳", "ʹ")
Strng = Strng.replace("auʻ", "oʻ")
Strng = Strng.replace("ḥ", "ʺ")
chars_misc = {"e*": "၏", "n*": "၌", "r*": "၍", "l*": "၎"}
for lat, bur in chars_misc.items():
Strng = Strng.replace(bur, lat)
return Strng
def insertARomanSemitic(Strng):
Strng = Strng.replace("\u02BD", "")
consonantsAll = (
"(" + "|".join(sorted(GM.SemiticConsonants, key=len, reverse=True)) + ")"
)
vowelsAll = "(" + "|".join(GM.SemiticVowels) + ")"
Strng = re.sub(consonantsAll + "(?![꞉ʰ])(?!" + vowelsAll + ")", r"\1" + "a", Strng)
Strng = re.sub("(꞉)(?!ʰ)(?!" + vowelsAll + ")", r"\1" + "a", Strng)
return Strng
def FixSemiticRoman(Strng, Target):
vir = "\u033D"
Strng = re.sub("ō̂̄̂", "ō̂", Strng)
if "Arab" in Target:
consonantsAll = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.Consonants, "RomanSemitic"),
key=len,
reverse=True,
)
)
+ ")"
)
Strng = re.sub(consonantsAll + vir + r"\1", r"\1" + "꞉", Strng)
Strng = re.sub("âQ", "ʿ", Strng)
Strng = re.sub("aQ", "Qa", Strng)
SemiticIndic = [
("ʾQā", "ā̂Q"),
("ʾQi", "îQ"),
("ʾQī", "ī̂Q"),
("ʾQu", "ûQ"),
("ʾQū", "ū̂Q"),
("ʾQe", "êQ"),
("ʾQē", "ē̂Q"),
("ʾQo", "ôQ"),
("ʾQō", "ō̂Q"),
("ṣ", "sQ"),
("ʿ", "ʾQ"),
("ṭ", "tQ"),
("ḥ", "hQ"),
("ḍ", "dQ"),
("p̣", "pQ"),
("ž", "šQ"),
("ž", "zQ"),
("ẓ", "jʰQ"),
("ḏ", "dʰQ"),
("ṯ", "tʰQ"),
("w", "vQ"),
]
for s, i in SemiticIndic:
Strng = Strng.replace(i, s)
Strng = Strng.replace("\u033d\u033d", "\u033d")
return Strng
def ArabAtoAleph(Strng):
Strng = Strng.replace("أ", "ا")
return Strng
def estrangelasyriac(Strng):
return Strng
def easternsyriac(Strng):
return Strng
def westernsyriac(Strng):
return Strng
def kawitan(Strng):
return Strng
def sundapura(Strng):
return Strng
def readableItrans(Strng):
pairsReadable = [
("R^i", "RRi"),
("R^I", "RRii"),
("", ""),
("", ""),
("A", "aa"),
("I", "ii"),
("U", "uu"),
("Ch", "chh"),
("kSh", "x"),
("M", ".m"),
]
for x, y in pairsReadable:
Strng = Strng.replace(x, y)
return Strng
def NasalTilde(Strng):
Strng = re.sub("(m̐|ṃ|ṁ)", "\u0303", Strng)
return Strng
def verticalKana(Strng):
return Strng
def verticalSiddham(Strng):
return Strng
def vtobJapanese(txt):
return txt
def SogdReshAyin(Strng):
Strng = Strng.replace("𐼽", "𐽀")
return Strng
def SogoReshAyinDaleth(Strng):
Strng = Strng.replace("𐼓", "𐼘")
return Strng
def arabPaFa(Strng):
return Strng.replace("پ", "ف")
def arabChaSa(Strng):
return Strng.replace("چ", "س")
def gainGimel(Strng):
return Strng.replace("עׄ", "ג")
def tavTwodot(Strng):
return Strng.replace("ת", "ת̈")
def tavThreedot(Strng):
return Strng.replace("תׄ", "ת֒")
def gainGimel(Strng):
return Strng.replace("ק", "ק̈")
def tokushuon(txt):
txt = txt.replace("si", "suxi").replace("zi", "zuxi")
txt = txt.replace("yi", "ixi")
txt = txt.replace("fy", "fux")
txt = txt.replace("nye", "nixe")
txt = re.sub("(?<![sc])hu", "hoxu", txt)
txt = re.sub("(?<![sc])hye", "hixe", txt)
return txt
def JapanesePostProcess(src, tgt, txt, nativize, postoptions):
from Other import kana2roman
import pykakasi
import convert
txt = convert.convertScript(txt, src, "Telugu")
txt = txt.replace("ˆ", "")
txt = convert.convertScript(txt.lower(), "ISO", "Inter")
txt = convert.convertScript(txt, "Telugu", "RomanKana")
txt = re.sub("([aiueo])" + r"\1" + "H", r"\1" + r"\1" + "h" + r"\1", txt)
txt = re.sub("([aiueo])H", r"\1" + "h" + r"\1", txt)
txt = (
txt.replace("Gk", "nk")
.replace("Gg", "ng")
.replace("Jc", "nc")
.replace("Jj", "nj")
.replace("mb", "nb")
.replace("mp", "np")
)
txt = (
txt.replace("nn", "nnn")
.replace("c", "ch")
.replace("chch", "cch")
.replace("shsh", "ssh")
.replace("mm", "nm")
)
txt = txt.replace(",", "、").replace("\uEA01", "。").replace("\uEA02", "。。")
txt = txt.replace("JJ", "nnny")
txt = txt.replace("J", "ny")
if "vtobJapanese" in postoptions:
txt = txt.replace("v", "b")
txt = (
txt.replace("tr", "tor")
.replace("dr", "dor")
.replace("Dr", "dor")
.replace("Tr", "tor")
)
txt = (
txt.replace("tya", "tiya")
.replace("dya", "diya")
.replace("sya", "suya")
.replace("shya", "shuya")
.replace("chya", "chuya")
)
txt = txt.replace("di", "dexi").replace("du", "doxu")
txt = txt.replace("ti", "texi").replace("tu", "toxu")
txt = txt.replace("mye", "mixe").replace("pye", "pixe").replace("bye", "bixe")
txt = txt.replace("ye", "ixe")
txt = txt.replace("vye", "vuxixe").replace("vy", "vuxy")
txt = txt.replace("she", "shixe")
if not nativize:
txt = re.sub("(r)(r\u309A)", "rur\u309A", txt)
txt = re.sub("(r\u309A)(r\u309A)", "rr" + "\u309A", txt)
txt = re.sub("(k\u309A)(k\u309A)", "kk" + "\u309A", txt)
txt = re.sub("([rk])(\u309A)([aieou])", r"\1\3\2", txt)
txt = tokushuon(txt)
else:
txt = (
txt.replace("r\u309A", "r")
.replace("k\u309Ak" + "\u309A", "ng")
.replace("k\u309A", "ng")
)
txt = txt.replace("yi", "i").replace("ye", "e").replace("wu", "u")
txt = txt.replace("wo", "uxo")
txt = txt.replace("she", "shie")
if tgt == "Hiragana":
txt = kana2roman.to_hiragana(txt)
txt = re.sub(
"(k|g|ch|j|p|b|m|y|r|w|sh|s|h|z|f)" + "(" + r"\1" + ")", r"\1" + "u", txt
)
txt = re.sub("(d|t)" + "(" + r"\1" + ")", r"\1" + "o", txt)
if not nativize:
txt = tokushuon(txt)
txt = kana2roman.to_hiragana(txt)
txt = re.sub("(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)", r"\1" + "u", txt)
txt = re.sub("(d|t)", r"\1" + "o", txt)
if not nativize:
txt = tokushuon(txt)
txt = kana2roman.to_hiragana(txt)
txt = txt.replace("う゛", "ゔ")
if tgt == "Katakana":
txt = (
txt.replace("aa", "a-")
.replace("ii", "i-")
.replace("ee", "e-")
.replace("oo", "o-")
.replace("uu", "u-")
)
txt = (
txt.replace("a\u309Aa", "a\u309A-")
.replace("i\u309Ai", "i\u309A-")
.replace("e\u309Ae", "e\u309A-")
.replace("o\u309Ao", "o\u309A-")
.replace("u\u309Au", "u\u309A-")
)
txt = kana2roman.to_katakana(txt)
txt = re.sub(
"(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)" + "(" + r"\1" + ")", r"\1" + "u", txt
)
txt = re.sub("(d|t)" + "(" + r"\1" + ")", r"\1" + "o", txt)
if not nativize:
txt = tokushuon(txt)
txt = kana2roman.to_katakana(txt)
txt = re.sub("(k|g|ch|j|p|b|m|y|r|sh|s|h|z|f|v)", r"\1" + "u", txt)
txt = re.sub("(d|t)", r"\1" + "o", txt)
if not nativize:
txt = tokushuon(txt)
txt = kana2roman.to_katakana(txt)
txt = convert.convertScript(txt, "Inter", "ISO")
return txt
def urduRemoveInherent(Strng):
Strng = re.sub("\Ba", "", Strng)
return Strng
def HebrewVetVav(Strng):
shortVowels = (
"("
+ "|".join(
[
"\u05B7",
"\u05B8",
"\u05B4",
"\u05BB",
"\u05B5",
"\u05B6",
"\u05B9",
"\u05B0",
]
)
+ ")"
)
Strng = re.sub(shortVowels + "(" + "ו" + ")" + "(?!\u05BC)", r"\1" + "ב", Strng)
Strng = Strng.replace("בֺ", "בֹ")
return Strng
def devanagariuttara(Strng):
return Strng
def devanagarinepali(Strng):
return Strng
def devanagaribalbodh(Strng):
return Strng
def devanagarijain(Strng):
return Strng
def HiraganaaunotDipthong(Strng):
return Strng
def IASTISONasalTilde(Strng):
return Strng
def HeberewQoph(Strng):
Strng = Strng.replace("כּ", "ק").replace("ךּ", "ק")
return Strng
def HebewShortO(Strng):
Strng = re.sub("(?<!ו)\u05B9", "\u05C7", Strng)
return Strng
def HebrewKatevMalei(Strng):
Strng = Strng.replace("ָ", "א")
Strng = Strng.replace("ַ", "א")
return Strng
def HebrewnonFinalShort(Strng):
finals = ["ך", "ם", "ן", "ף", "ץ", "ףּ", "ךּ"]
finalCons = ["כ", "מ", "נ", "פ", "צ", "פּ", "כּ"]
otherCons = "ב,ח,ע,צ,ש,ת".split(",")
consonantsAll = (
"("
+ "|".join(
GM.CrunchSymbols(GM.Consonants, "Hebrew")
+ finals
+ ["׳", "י", "ו"]
+ otherCons
)
+ ")"
)
shortVowels = [
"\u05B7",
"\u05B8",
"\u05B4",
"\u05BB",
"\u05B5",
"\u05B6",
"\u05B9",
"\u05C7",
]
shortVowelsR = (
"("
+ "|".join(
[
"\u05B7",
"\u05B8",
"\u05B4",
"\u05BB",
"\u05B5",
"\u05B6",
"\u05B9",
"\u05C7",
]
+ ["׳"]
)
+ ")"
)
for s in shortVowels:
Strng = re.sub(
"(" + s + ")" + "(׳?)" + "(?!" + consonantsAll + ")",
r"\1\2" + "ה" + "\u02BE",
Strng,
)
for f, c in zip(finals, finalCons):
Strng = re.sub(
"(" + f + ")" + shortVowelsR + "(׳?)" + "ה" + "\u02BE",
c + r"\2\3" + "ה",
Strng,
)
for f in finals:
Strng = Strng.replace(f + "\u05B0", f)
Strng = Strng.replace("\u05B0" + "׳" + "ה" + "\u02BE", "\u05B0" + "׳")
Strng = Strng.replace("וֹה" + "\u02BE", "וֹ")
Strng = Strng.replace("\u02BE", "")
uVowels = ["וֹ", "וּ"]
return Strng
def DevanagariAnusvara(Strng):
return NasalToAnusvara(Strng, "Devanagari")
def jainomDevangari(Strng):
Strng = Strng.replace("ॐ", "ꣽ")
return Strng
def GurmukhiCandrabindu(Strng):
Strng = Strng.replace("ਁ", "ਂ")
return Strng
def mDotAboveToBelow(Strng):
Strng = Strng.replace("ṃ", "ṁ")
return Strng
def noLongEO(Strng):
Strng = Strng.replace("ē", "e").replace("ō", "o")
return Strng
def TamilStyleUUCore(Strng):
Strng = re.sub("([ഖഗഘഛഝഠഡഢഥദധഫബഭ])" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng)
return Strng
def TamilStyleUUOther(Strng):
Strng = re.sub("([ജശഷസഹ])" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng)
Strng = re.sub("(ശ്ര)" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng)
Strng = re.sub("(ശ്ര)" + "([ുൂ])", r"\1" + "\u200D" + r"\2", Strng)
return Strng
def ContextualLLa(Strng):
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil"))
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Tamil"))
Strng = re.sub("(ஆவ|ாவ)" + "ல", r"\1" + "ள", Strng)
Strng = re.sub("(்ரவா|்ரவ|ர|பவ|வி|ரா|ஷ்க|த⁴வ)" + "ல", r"\1" + "ள", Strng)
Strng = re.sub("(யா|யாம|கோம)" + "ல", r"\1" + "ள", Strng)
Strng = re.sub("(மௌ)" + "ல", r"\1" + "ள", Strng)
Strng = re.sub("([\s^])(ந)" + "ல", r"\1" + "ள", Strng)
Strng = Strng.replace("கலத்ர", "களத்ர")
Strng = Strng.replace("ஶீதல", "ஶீதள")
Strng = Strng.replace("ஸுதல", "ஸுதள")
Strng = Strng.replace("காலி", "காளி")
Strng = Strng.replace("காலீ", "காளீ")
Strng = Strng.replace("கலேவர", "களேவர")
Strng = Strng.replace("கலேவர", "களேவர")
Strng = Strng.replace("ப³ஹுல", "ப³ஹுள")
Strng = Strng.replace("கஶ்மல", "கஶ்மள")
Strng = re.sub(
"([கத])" + "(" + ListVS + ")?" + "([³⁴])" + "ல", r"\1\2\3" + "ள", Strng
)
Strng = re.sub("(ஜு)" + "ல", r"\1" + "ள", Strng)
Strng = re.sub("(து)" + "லசி", r"\1" + "ளசி", Strng)
Strng = re.sub("(ரிம)" + "ல", r"\1" + "ள", Strng)
Strng = Strng.replace("ள்ய", "ல்ய")
return Strng
def FinalNNa(Strng):
Strng = re.sub("ன", "ந", Strng)
Strng = re.sub("ந்" + "([\.।॥,!-])", "ன்" + r"\1", Strng)
Strng = re.sub("ந்" + "(\s)", "ன்" + r"\1", Strng)
Strng = re.sub("ந்$", "ன்", Strng)
return Strng
def TamilpredictDentaNaExtended(Strng):
listDentalNa = """ഩഖ
ഩഗര
ഩകുല
ഩഗ്ഩ
ഩക്ഷത്ര
ഩടരാജ
ഩടീ
ഩദീ
ഩന്ദഩ
ഩപുംസക
ഩഭ**
ഩമ**
ഩമശ്
ഩമസ്
ഩമാമ
ഩമാമി
ഩമാമോ
ഩമുചി
ഩമോ
ഩമോനമ
ഩമോനമോ
ഩമോസ്തു
ഩമോസ്തുതേ
ഩമഃ
ഩയഩ
ഩര**
ഩരക
ഩര്തക
ഩര്തഩ
ഩര്മദ
ഩല**
ഩലിഩ
ഩവ**
ഩവീഩ
ഩവ്യ
ഩശ്**
ഩഷ്ട
ഩാരായണ
ഩാഗ
ഩാടക
ഩാഡീ
ഩാട്യ
ഩാഡ്യ
ഩാഥ
ഩാദ
ഩാരത
ഩാഩാ***
ഩാഩ്യ**
ഩാഩൃത
ഩാഭ
ഩാമ
ഩായക
ഩായികാ
ഩാരദ
ഩാരസിംഹ
ഩാരി
ഩാരീ
ഩാവ***
ഩാശ
ഩാസിക
ഩിഗമ
ഩികട
ഩികര
ഩികാമ
ഩികായ
ഩിഖില
ഩികുഞ്ജ
ഩിഘൂഩ
ഩികേത
ഩിഗ്രഹ
ഩിഗൃഹ
ഩികൃന്ത
ഩിഗ്രന്ത
ഩിക്ഷിപ
ഩിക്ഷേപ
ഩിഘ്ഩ
ഩിജ
ഩിദര്ശ
ഩിതമ്ബ
ഩിതര
ഩിദാഘ
ഩിദാഩ
ഩിതാന്ത
ഩിധാഩ
ഩിധായ
ഩിധ
ഩിധേഹി
ഩിദ്ര
ഩിത്യ
ഩിന്ദാ
ഩിബദ്ധ
ഩിബധ്
ഩിബന്ധഩ
ഩിപട
ഩിപതിത
ഩിപത്യ
ഩിപപാത
ഩിപാതിത
ഩിപാത്യ
ഩിപുണ
ഩിബോധ
ഩിഭൃത
ഩിമഗ്ഩ
ഩിമിത്ത
ഩിമിഷ
ഩിയത
ഩിയന്ത
ഩിയന്ത്ര
ഩിയമ
ഩിയുക്ത
ഩിയുജ്യ
ഩിയോ
ഩിര
ഩിര്
ഩിലയ
ഩിവര്
ഩിവസ
ഩിവാര
ഩിവാസ
ഩിവിഷ്ട
ഩിവേദ
ഩിവേശ
ഩിവൃ
ഩിശ
ഩിശ്
ഩിഷ
ഩിഷ്
ഩിസ
ഩിസ്
ഩിഹിത
ഩിഃശ
ഩിഃഷ
ഩിഃസ
ഩീച
ഩീതി
ഩീര
ഩീല
ഩൂതഩ
ഩൂപുര
ഩേത്ര
ഩേയ**
ഩൈമിത്ത
ഩൈമിഷ
ഩൈരാശ്യ
ഩൈരൃത
ഩൈവേദ്യ
ഩൈഷ്
ഩ്യായ
ഩ്യാസ
ഩ്യൂഩ
ഩൃ""".split(
"\n"
)
vir = Tamil.ViramaMap[0]
for wordNna in listDentalNa:
wordNa = re.sub("^ഩ", "ന", wordNna)
if "²" in wordNna[-1] or "³" in wordNna[-1] or "⁴" in wordNna[-1]:
number = wordNna[-1]
wordNnaN = wordNna[:-1]
wordNaN = wordNa[:-1]
for vow in GM.CrunchSymbols(GM.VowelSigns, "Tamil"):
Strng = Strng.replace(wordNnaN + vow + number, wordNaN + vow + number)
Strng = Strng.replace(wordNna, wordNa)
for wordNna in ["ഩാമ", "ഩര"]:
wordNa = re.sub("^ഩ", "ന", wordNna)
Strng = Strng.replace(wordNa + vir, wordNna + vir)
Strng = Strng.replace("ഩ്ന", "ന്ന")
return Strng
def TamilpredictDentaNa(Strng):
listDentalNa = """னக²
னக³ர
னகுல
னக்³ன
னக்ஷத்ர
னடராஜ
னடீ
னதீ³
னந்த³ன
னபும்ʼஸக
னப⁴**
னம**
னமஶ்
னமஸ்
னமாம
னமாமி
னமாமோ
னமுசி
னமோ
னமோநம
னமோநமோ
னமோஸ்து
னமோஸ்துதே
னம꞉
னயன
னர**
னரக
னர்தக
னர்தன
னர்மத³
னல**
னலின
னவ**
னவீன
னவ்ய
னஶ்**
னஷ்ட
னாராயண
னாக³
னாடக
னாடீ³
னாட்ய
னாட்³ய
னாத²
னாத³
னாரத
னானா***
னான்ய**
னான்ருʼத
னாப⁴
னாம
னாயக
னாயிகா
னாரத³
னாரஸிம்ʼஹ
னாரி
னாரீ
னாவ***
னாஶ
னாஸிக
னிக³ம
னிகட
னிகர
னிகாம
னிகாய
னிகி²ல
னிகுஞ்ஜ
னிகூ⁴ன
னிகேத
னிக்³ரஹ
னிக்³ருʼஹ
னிக்ருʼந்த
னிக்³ரந்த
னிக்ஷிப
னிக்ஷேப
னிக்⁴ன
னிஜ
னித³ர்ஶ
னிதம்ப³
னிதர
னிதா³க⁴
னிதா³ன
னிதாந்த
னிதா⁴ன
னிதா⁴ய
னித⁴
னிதே⁴ஹி
னித்³ர
னித்ய
னிந்தா³
னிப³த்³த⁴
னிப³த்⁴
னிப³ந்த⁴ன
னிபட
னிபதித
னிபத்ய
னிபபாத
னிபாதித
னிபாத்ய
னிபுண
னிபோ³த⁴
னிப்⁴ருʼத
னிமக்³ன
னிமித்த
னிமிஷ
னியத
னியந்த
னியந்த்ர
னியம
னியுக்த
னியுஜ்ய
னியோ
னிர
னிர்
னிலய
னிவர்
னிவஸ
னிவார
னிவாஸ
னிவிஷ்ட
னிவேத³
னிவேஶ
னிவ்ருʼ
னிஶ
னிஶ்
னிஷ
னிஷ்
னிஸ
னிஸ்
னிஹித
னி꞉ஶ
னி꞉ஷ
னி꞉ஸ
னீச
னீதி
னீர
னீல
னூதன
னூபுர
னேத்ர
னேய**
னைமித்த
னைமிஷ
னைராஶ்ய
னைர்ருʼத
னைவேத்³ய
னைஷ்
ன்யாய
ன்யாஸ
ன்யூன
ன்ருʼ""".split(
"\n"
)
vir = Tamil.ViramaMap[0]
Tamillist = "²³⁴ஃஅஆஇஈஉஊஎஏஐஒஓஔகஙசஜஞடணதநனபமயரறலளழவஷஸஹாிீுூெேைொோௌ்ௗ"
for wordNna in listDentalNa:
wordNa = re.sub("^ன", "ந", wordNna)
if "²" in wordNna[-1] or "³" in wordNna[-1] or "⁴" in wordNna[-1]:
number = wordNna[-1]
wordNnaN = wordNna[:-1]
wordNaN = wordNa[:-1]
for vow in GM.CrunchSymbols(GM.VowelSigns, "Tamil"):
Strng = Strng.replace(wordNnaN + vow + number, wordNaN + vow + number)
Strng = Strng.replace(wordNna, wordNa)
for wordNna in ["னாம", "னர"]:
wordNa = re.sub("^ன", "ந", wordNna)
Strng = re.sub(
"([" + Tamillist + "])(" + wordNa + vir + ")",
r"\1" + wordNna + vir,
Strng,
)
Strng = Strng.replace("ன்ந", "ந்ந")
Strng = Strng.replace("னாம்ன", "நாம்ன")
return Strng
def AhomClosed(Strng):
vir = Ahom.ViramaMap[0]
anu = Ahom.AyogavahaMap[1]
Strng = Strng.replace("\U00011722", "\U00011723")
Strng = re.sub("(\U00011723)(.)(" + vir + ")", "\U00011722" + r"\2\3", Strng)
Strng = Strng.replace(anu + "\U00011723", anu + "\U00011722")
Strng = Strng.replace("\U00011724", "\U00011725")
Strng = re.sub("(\U00011725)(.)(" + vir + ")", "\U00011724" + r"\2\3", Strng)
Strng = Strng.replace(anu + "\U00011725", anu + "\U00011724")
Strng = re.sub(
"(\U00011726\U00011727)(.)(" + vir + ")", "\U00011726" + r"\2\3", Strng
)
Strng = Strng.replace("\U00011726\U0001172A\U00011727", anu + "\U00011727")
Strng = re.sub(
"(\U00011726\U00011721)(.)(" + vir + ")", "\U00011728" + r"\2\3", Strng
)
Strng = Strng.replace("\U00011726\U0001172A\U00011721", anu + "\U00011728")
return Strng
def TeluguTamilZha(Strng):
return Strng
def TeluguTamilRra(Strng):
Strng = Strng.replace("ఱ్ఱ", "ౘ్ౘ")
Strng = Strng.replace("ట్ర", "ౘ్ౘ")
Strng = Strng.replace("ండ్ర", "న్ఱ")
return Strng
def ThaiNativeConsonants(Strng):
Strng = Strng.replace("ท", "ด")
Strng = Strng.replace("พ", "บ")
Strng = Strng.replace("\u0E36", "\u0E34\u0E4D")
Strng = Strng.replace("ํ", "งฺ")
Strng = re.sub("(\u0E3A)([ยรลวห])", "\u035C" + r"\2", Strng)
Strng = Strng.replace("ห\u0E3A", "ห\u035C")
Strng = re.sub("([ยรลวห])" + "\u035C" + r"\1", r"\1" + "\u0E3A" + r"\1", Strng)
Strng = re.sub("(า)(.)(ฺ)", "็" + r"\1\2\3", Strng)
Strng = re.sub("([เโ])(.)(.)(ฺ)", r"\1\2" + "็" + r"\3\4", Strng)
Strng = ThaiTranscription(Strng, False)
Strng = Strng.replace("ะ͜", "\u035C")
Strng = Strng.replace("ะ็", "็")
Strng = re.sub("([เโไ])(.)(\u035C)(.)([ะ\u0E31])", r"\1\2\3\4", Strng)
Strng = Strng.replace("ค", "ก\u0325")
Strng = Strng.replace("ช", "จ\u0325")
Strng = Strng.replace("ํ", "ง")
Strng = Strng.replace("ง", "งํ")
Strng = Strng.replace("ะงํ\u035C", "\u0E31งํ")
Strng = re.sub("([เโไ])(งํ)([าัะ])", r"\1" + "ง" + r"\2", Strng)
Strng = re.sub("([เโไ])(งํ)", r"\1" + "ง", Strng)
Strng = re.sub("(งํ)([าัะ])", "ง" + r"\2", Strng)
return Strng
def KhamiShanMyanmarNumerals(Strng):
for x, y in zip(KhamtiShan.NumeralMap, Burmese.NumeralMap):
Strng = Strng.replace(x, y)
return Strng
def KhamtiShanRa(Strng):
Strng = Strng.replace("ရ", "ꩳ")
return Strng
def granthafinal(Strng):
return Strng
def Dot2Dandas(Strng):
Strng = Strng.replace("..", "॥")
Strng = Strng.replace(".", "।")
return Strng
def SaurastraHaaruColon(Strng):
vir = Tamil.ViramaMap[0]
ha = Tamil.ConsonantMap[-1]
Strng = Strng.replace(vir + ha, ":")
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil"))
Strng = re.sub("(:)" + "(" + ListVS + ")", r"\2\1", Strng)
Strng = re.sub("(\s)(ன)", r"\1" + "ந", Strng)
Strng = re.sub("^ன", "ந", Strng)
return Strng
def TamilExtendedNNA(Strng):
na = TamilExtended.ConsonantMap[19]
nna = TamilExtended.SouthConsonantMap[3]
vir = TamilExtended.ViramaMap[0]
ta = TamilExtended.ConsonantMap[15]
ListV = "|".join(
GM.CrunchSymbols(GM.Vowels + GM.VowelSigns + GM.Consonants, "TamilExtended")
+ [TamilExtended.SignMap[0]]
)
Strng = re.sub(
"(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub(
"(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub("(ന്)(?![തഥദധ])", "ഩ്", Strng)
Strng = re.sub("(\s)ഩ്", r"\1" + "ന്", Strng)
Strng = re.sub("^ഩ്", r"" + "ന്", Strng)
Strng = TamilpredictDentaNaExtended(Strng)
return Strng
def TakriRemoveGemination(Strng):
Strng = re.sub("(.)" + Takri.ViramaMap[0] + r"\1", r"\1", Strng)
return Strng
def MongolianSyllabize(Strng):
vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Mongolian") + ["\u1820"]) + ")"
consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Mongolian")) + ")"
Strng = re.sub(consonants + "?" + vowels, r"\1\2" + " ", Strng)
Strng = re.sub("(\u180E\u1820)" + consonants, r"\1 \2", Strng)
Strng = re.sub("\u1820 ", "\u1820\u180B ", Strng)
Strng = Strng.replace("ᠣᠸᠠ᠋", "ᠣᠸᠠ")
Strng = Strng.replace("ᠣᠸᠸᠠ᠋", "ᠣᠸᠸᠠ")
Strng = Strng.replace(" \u180E", "\u180E")
Strng = Strng.replace(" " + "\u200B", "")
Strng = Strng.replace(" ᢁ", "ᢁ")
return Strng
def TibetanSyllabize(Strng):
vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Tibetan")) + ")"
consonants = (
"("
+ "|".join(
GM.CrunchSymbols(GM.Consonants, "Tibetan") + ["ཨ", "ཅ", "ཆ", "ཇ", "ཇྷ"]
)
+ ")"
)
vowelsigns = (
"(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tibetan") + ["\u0F80"]) + ")"
)
combiningSigns = (
"("
+ "|".join(GM.CrunchSymbols(GM.CombiningSigns, "Tibetan") + ["\u0F82"])
+ ")"
)
ListSubC = (
"("
+ "|".join([chr(x + 80) for x in range(0x0F40, 0x0F68)] + ["ྻ", "ྺ", "ྼ"])
+ ")"
)
Strng = re.sub(vowelsigns + combiningSigns + "?", r"\1\2་", Strng)
Strng = re.sub(consonants, r"\1་", Strng)
Strng = re.sub(ListSubC, r"\1་", Strng)
Strng = re.sub("་" + vowelsigns, r"\1", Strng)
Strng = re.sub("་" + ListSubC, r"\1", Strng)
Strng = re.sub("་" + combiningSigns, r"\1", Strng)
Strng = re.sub(combiningSigns, r"\1་", Strng)
Strng = Strng.replace("་་", "་")
return Strng
def SoyomboSyllabize(Strng):
vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Soyombo")) + ")"
consonants = (
"("
+ "|".join(GM.CrunchSymbols(GM.Consonants, "Soyombo") + ["𑩐", "\U00011A83"])
+ ")"
)
vowelsigns = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Soyombo")) + ")"
combiningSigns = (
"(" + "|".join(GM.CrunchSymbols(GM.CombiningSigns, "Soyombo")) + ")"
)
fin = (
"("
+ "|".join(
[
"\U00011A8A",
"\U00011A8B",
"\U00011A8C",
"\U00011A8D",
"\U00011A8E",
"\U00011A8F",
"\U00011A90",
"\U00011A91",
"\U00011A92",
"\U00011A93",
"\U00011A94",
]
)
+ ")"
)
Strng = re.sub(vowelsigns + combiningSigns + "?", r"\1\2 ", Strng)
Strng = re.sub(consonants, r"\1 ", Strng)
Strng = re.sub(" " + vowelsigns, r"\1", Strng)
Strng = re.sub(" " + combiningSigns, r"\1", Strng)
Strng = re.sub("\U00011A99" + " ", "\U00011A99", Strng)
Strng = re.sub(combiningSigns, r"\1 ", Strng)
Strng = re.sub(" 𑪘", "\U00011A98", Strng)
Strng = re.sub(fin, r"\1 ", Strng)
Strng = re.sub("( )" + fin, r"\2 ", Strng)
return Strng
def TakriArchaicKha(Strng):
return Strng.replace("𑚸", "𑚋")
def TeluguReph(Strng):
consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Telugu")) + ")"
Strng = re.sub("ర్" + consonants, "ర్" + r"\1", Strng)
Strng = Strng.replace("\u0C4Dర్", "\u0C4Dర్")
return Strng
def PhagsPaTib(Strng):
return Strng
def PhagsPaSeal(Strng):
return Strng
def TamilExtendedAnusvara(Strng):
Strng = AnusvaraToNasal(Strng, "TamilExtended")
Strng = Strng.replace("\u0D02", "മ്")
return Strng
def RomanReadableLongEO(Strng):
Strng = Strng.replace("o", "oa")
Strng = Strng.replace("oa'", "o")
Strng = Strng.replace("e", "ae")
Strng = Strng.replace("ae'", "e")
Strng = Strng.replace("aeae", "ee")
Strng = Strng.replace("oaoa", "oo")
return Strng
def TeluguArasunnaChandrabindu(Strng):
Strng = Strng.replace("ఀ", "ఁ")
return Strng
def MarchenSanskritPalatals(Strng):
tsaSeries = ["\U00011C82", "\U00011C83", "\U00011C84"]
jaSereis = ["\U00011C76", "\U00011C77", "\U00011C78"]
for x, y in zip(tsaSeries, jaSereis):
Strng = Strng.replace(x, y)
return Strng
def SoyomboSanskritPalatals(Strng):
tsaSeries = ["𑩵", "𑩶", "𑩷"]
caSeries = ["𑩡", "𑩢", "𑩣"]
for x, y in zip(tsaSeries, caSeries):
Strng = Strng.replace(x, y)
return Strng
def TibetanSanskritPalatals(Strng):
caSeries = ["ཅ", "ཆ", "ཇ", "ཇྷ"]
tsaSeries = ["ཙ", "ཚ", "ཛ", "ཛྷ"]
for x, y in zip(tsaSeries, caSeries):
Strng = Strng.replace(x, y)
return Strng
def ZanabazarSanskritPalatals(Strng):
tsaSeries = ["𑨣", "𑨤", "𑨥"]
caSeries = ["𑨐", "𑨑", "𑨒"]
for x, y in zip(tsaSeries, caSeries):
Strng = Strng.replace(x, y)
return Strng
def SoyomboFinals(Strng):
return Strng
def SoyomboInitials(Strng):
viraCon = ["\U00011A7C\U00011A99", "\U00011A7D\U00011A99", "\U00011A81\U00011A99"]
initial = ["\U00011A86", "\U00011A87", "\U00011A89"]
for x, y in zip(viraCon, initial):
Strng = Strng.replace(x, y)
return Strng
def ZanzabarSpaceTsheg(Strng):
Strng = Strng.replace(" ", "\U00011A41")
return Strng
def SoyomboSpaceTscheg(Strng):
Strng = Strng.replace(" ", "\U00011A9A")
return Strng
def AnusvaratoNasalASTISO(Strng):
Strng = Strng.replace("ṁ", "ṃ")
Strng = re.sub("(ṃ)(k|g)", "ṅ" + r"\2", Strng)
Strng = re.sub("(ṃ)(c|j)", "ñ" + r"\2", Strng)
Strng = re.sub("(ṃ)(ṭ|ḍ)", "ṇ" + r"\2", Strng)
Strng = re.sub("(ṃ)(t|d)", "n" + r"\2", Strng)
Strng = re.sub("(ṃ)(p|b)", "m" + r"\2", Strng)
return Strng
def removeDiacritics(Strng):
diacritics = [
"\u0331",
"\u0306",
"\u0323",
"\u035F",
"\u0324",
"\u035F",
"\u0307",
"\u0301",
"\u0303",
"\u0310",
"\u0306",
"\u0302",
"\u0304",
]
for dia in diacritics:
Strng = Strng.replace(dia, "")
vowelDia = ["а̄", "ӣ", "ӯ", "ӗ"]
vowel = ["\u0430", "\u0438", "\u0443", "\u044D"]
for x, y in zip(vowelDia, vowel):
Strng = Strng.replace(x, y)
return Strng
def ranjanalantsa(Strng):
Strng = Strng.replace("་", " ")
return Strng
def ranjanawartu(Strng):
Strng = Strng.replace("་", "࿎ ")
return Strng
def TaiKuen(Strng):
return Strng
def TaiThamLao(Strng):
return Strng
def egrantamil(Strng):
return Strng
def tibetandbumed(Strng):
return Strng
def oldtamilortho(Strng):
return Strng
def nepaldevafont(Strng):
return Strng
def granthaserif(Strng):
return Strng
def ChakmaPali(Strng):
listC = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1],
key=len,
reverse=True,
)
)
+ ")"
)
listV = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.VowelSigns, "Chakma")
+ Chakma.ViramaMap
+ ["\U00011133"],
key=len,
reverse=True,
)
)
+ ")"
)
Strng = ChakmaGemination(Strng, reverse=True)
Strng = Strng.replace("𑄤", "\U00011147")
Strng = Strng.replace("𑄡", "𑄠")
Strng = Strng.replace("\U00011127", "\u02BE")
Strng = re.sub(
"(" + listC + ")" + "(?!" + listV + "|\u02BE" + ")", r"\1" "\U00011127", Strng
)
Strng = Strng.replace("\u02BE", "")
Strng = Strng.replace("\U00011127", "\U00011102")
Strng = Strng.replace("\U00011133", "\U00011134")
return Strng
def ThaiSajjhayawithA(Strng):
Strng = ThaiSajjhayaOrthography(Strng)
Strng = Strng.replace("ัง", "ังฺ")
Strng = ThaiTranscription(Strng, anusvaraChange=True)
Strng = Strng.replace("ะํ", "ํ")
Strng = Strng.replace("ะั", "ั")
Strng = Strng.replace("ะ๎", "๎")
Strng = re.sub("([เโไ])(.๎)([ยรลวศษสหฬ])ะ", r"\1\2\3", Strng)
Strng = Strng.replace("\u0E32\u0E4D", "\u0E33").replace("\u0E34\u0E4D", "\u0E36")
return Strng
def LaoSajjhaya(Strng):
Strng = ThaiSajjhayaOrthography(Strng, Script="LaoPali")
Strng = re.sub("([ເໂໄ])(.)(\u0ECE)", r"\2\3\1", Strng)
return Strng
def LaoSajjhayawithA(Strng):
Strng = LaoSajjhaya(Strng)
Strng = Strng.replace("\u0ECE", "\u0E4E")
Strng = Strng.replace("ັງ", "ັງ຺")
Strng = CF.LaoPaliTranscribe(Strng, anusvaraChange=True)
Strng = Strng.replace("ະໍ", "ໍ")
Strng = Strng.replace("ະັ", "ັ")
Strng = Strng.replace("ະ๎", "๎")
Strng = Strng.replace("ະ໌", "໌")
Strng = Strng.replace("ະົ", "ົ")
Strng = re.sub("([ເໂໄ])(.๎)([ຍຣລວຨຩສຫຬ])ະ", r"\1\2\3", Strng)
Strng = Strng.replace("າໍ", "ຳ")
Strng = Strng.replace("\u0E4E", "\u0ECE")
return Strng
def UseAlternateVSU(Strng):
Strng = Strng.replace("𑖲", "𑗜")
return Strng
def UseAlternateVSUU(Strng):
Strng = Strng.replace("𑖳", "𑗝")
return Strng
def UseAlternateU(Strng):
Strng = Strng.replace("𑖄", "𑗛")
return Strng
def UseAlternateI1(Strng):
Strng = Strng.replace("𑖂", "𑗘")
return Strng
def UseAlternateI2(Strng):
Strng = Strng.replace("𑖂", "𑗙")
return Strng
def UseAlternateII(Strng):
Strng = Strng.replace("𑖃", "𑗚")
return Strng
def GranthaOldau(Strng):
Strng = Strng.replace("𑍗", "𑍌")
return Strng
def DevanagariACandra(Strng):
Strng = Strng.replace("ऍ", "ॲ")
return Strng
def WarangCitiModernOrthogaphy(Strng):
Strng = re.sub(
"([\U000118D4\U000118D5\U000118CC\U000118CB\U000118CF\U000118CE\U000118D2\U000118D1\U000118D5\U000118D4\U000118D8\U000118D7\U000118DB])(\u200D)(𑣙)",
r"\1",
Strng,
)
Strng = Strng.replace("𑣝", "𑣞")
Strng = Strng.replace("\u200D", "")
return Strng
def ChakmaEnableAllConjuncts(Strng):
listC = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1],
key=len,
reverse=True,
)
)
+ ")"
)
Strng = re.sub("\U00011134" + "(" + listC + ")", "\U00011133" + r"\1", Strng)
Strng = ChakmaGemination(Strng)
return Strng
def ChakmaGemination(Strng, reverse=False):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Chakma")) + ")"
virs = "([\U00011134\U00011133])"
virExp = "\U00011134"
virDep = "\U00011133"
ListV = (
"("
+ "|".join(
sorted(GM.CrunchSymbols(GM.VowelSignsNV, "Chakma"), key=len, reverse=True)
)
+ ")"
)
if not reverse:
Strng = re.sub(ListC + virs + r"\1" + ListV, r"\1" + virExp + r"\3", Strng)
Strng = re.sub(
ListC + virExp + r"\1" + virDep + ListC,
r"\1" + virExp + virDep + r"\2",
Strng,
)
Strng = re.sub(
ListC + virDep + r"\1" + virDep + ListC,
r"\1" + virExp + virDep + r"\2",
Strng,
)
Strng = re.sub(
virDep + ListC + virExp + ListV, virExp + r"\1" + virExp + r"\2", Strng
)
else:
Strng = re.sub(ListC + virExp + ListV, r"\1" + virExp + r"\1" + r"\2", Strng)
Strng = re.sub(ListC + virExp + virDep, r"\1" + virExp + r"\1" + virDep, Strng)
return Strng
def ChakmaVowelsIndependent(Strng):
vowelDepA = ["𑄃𑄨", "𑄃𑄪", "𑄃𑄬"]
vowelIndep = ["\U00011104", "\U00011105", "\U00011106"]
for x, y in zip(vowelDepA, vowelIndep):
Strng = Strng.replace(x, y)
return Strng
def MultaniAbjad(Strng):
ListAll = (
"(" + "|".join(GM.CrunchSymbols(GM.Characters, "Multani") + ["𑊓", "𑊍"]) + ")"
)
ListC = (
"(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Multani") + ["𑊓", "𑊍"]) + ")"
)
ListV = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Multani") + ["𑊓", "𑊍"]) + ")"
Strng = re.sub(ListC + ListV + ListC, r"\1\3", Strng)
Strng = re.sub("(" + ListC + "{2,})" + ListV, r"\1", Strng)
Strng = re.sub(ListV + ListC + ListV, r"\1\2", Strng)
return Strng
def LaoNative(Strng):
Strng = re.sub("ຕ([ເແໂໄ]?)ຕ", "ດ" + r"\1" + "ຕ", Strng)
Strng = re.sub("ຕ([ເແໂໄ]?)ຖ", "ດ" + r"\1" + "ຖ", Strng)
Strng = re.sub("ທ([ເແໂໄ]?)ທ", "ດ" + r"\1" + "ທ", Strng)
Strng = re.sub("ສ([ເແໂໄ]?)ສ", "ດ" + r"\1" + "ສ", Strng)
Strng = re.sub("ປ([ເແໂໄ]?)ປ", "ບ" + r"\1" + "ປ", Strng)
Strng = re.sub("ພ([ເແໂໄ]?)ພ", "ບ" + r"\1" + "ພ", Strng)
return Strng
def SundaneseHistoricConjuncts(Strng, reverse=False):
ListC = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Sundanese")
)
if not reverse:
Strng = Strng.replace("᮪ᮙ", "\u1BAC")
ListC = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Sundanese")
)
Strng = re.sub("(" + ListC + ")" + "ᮊ᮪", r"\1" + "ᮾ", Strng)
Strng = re.sub("(" + ListC + ")" + "ᮙ᮪", r"\1" + "ᮿ", Strng)
else:
Strng = Strng.replace("\u1BAC", "᮪ᮙ")
Strng = Strng.replace("\u1BAD", "᮪ᮝ")
Strng = Strng.replace("ᮾ", "ᮊ᮪")
Strng = Strng.replace("ᮿ", "ᮙ᮪")
return Strng
def LimbuSpellingSaI(Strng):
vir = Limbu.ViramaMap[0]
FCons = [
x + vir for x in [Limbu.ConsonantMap[x] for x in [0, 4, 15, 19, 20, 24, 26, 27]]
]
FinalCons = [
"\u1930",
"\u1931",
"\u1933",
"\u1934",
"\u1935",
"\u1936",
"\u1937",
"\u1938",
]
for x, y in zip(FCons, FinalCons):
Strng = Strng.replace("\u193A" + y, x)
Strng = Strng.replace("\u193A\u1922" + y, "\u1922" + x)
return Strng
def siddhammukta(Strng):
return Strng
def tradOrtho(Strng):
return Strng
def siddhamap(Strng):
return Strng
def KhojkiRetainSpace(Strng):
Strng = Strng.replace("\U0001123A", " ")
return Strng
def BhaiksukiRetainSpace(Strng):
Strng = Strng.replace("𑱃", " ")
return Strng
def KaithiRetainSpace(Strng):
Strng = Strng.replace("⸱", " ")
return Strng
def MedievalTamilOrthography(Strng):
OldEO = ["எ்", "ெ்", "ஒ்", "ெ்ா", "எ", "ெ", "ஒ", "ொ"]
NewEO = ["எ", "ெ", "ஒ", "ொ", "ஏ", "ே", "ஓ", "ோ"]
for x, y in zip(NewEO, OldEO):
Strng = Strng.replace(x, y)
return Strng
def AmbigousTamilOrthography(Strng):
return Strng
def NewaMurmurConsonants(Strng):
murmur = ["𑐓", "𑐙", "𑐤", "𑐪", "𑐭", "𑐯"]
connsh = ["𑐴𑑂𑐒", "𑐴𑑂𑐘", "𑐴𑑂𑐣", "𑐴𑑂𑐩", "𑐴𑑂𑐬", "𑐴𑑂𑐮"]
for x, y in zip(murmur, connsh):
Strng = Strng.replace(y, x)
return Strng
def ModiRemoveLong(Strng):
Strng = Strng.replace("𑘂", "𑘃")
Strng = Strng.replace("𑘅", "𑘄")
Strng = Strng.replace("𑘱", "𑘲")
Strng = Strng.replace("𑘴", "𑘳")
Strng = Strng.replace("𑘆", "𑘨𑘲")
Strng = Strng.replace("𑘇", "𑘨𑘲")
Strng = Strng.replace("𑘈", "𑘩𑘲")
Strng = Strng.replace("𑘉", "𑘩𑘲")
Strng = Strng.replace("𑘵", "𑘿𑘨𑘲")
Strng = Strng.replace("𑘶", "𑘿𑘨𑘲")
Strng = Strng.replace("𑘷", "𑘿𑘩𑘲")
Strng = Strng.replace("𑘸", "𑘿𑘩𑘲")
return Strng
def LimbuDevanagariConvention(Strng):
Strng = Strng.replace("ऎ", "ए़")
Strng = Strng.replace("ऒ", "ओ़")
Strng = Strng.replace("ॆ", "े़")
Strng = Strng.replace("ॊ", "ो़")
Strng = Strng.replace("꞉", "ः")
return Strng
def NandinagariPrishtamatra(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("𑧚", "𑧤")
Strng = Strng.replace("𑧛", "𑧤𑧚")
Strng = Strng.replace("𑧜", "𑧤𑧑")
Strng = Strng.replace("𑧝", "𑧤𑧜")
else:
Strng = Strng.replace("𑧤𑧚", "𑧛")
Strng = Strng.replace("𑧤𑧑", "𑧜")
Strng = Strng.replace("𑧤𑧜", "𑧝")
Strng = Strng.replace("𑧤", "𑧚")
return Strng
def DevanagariPrishtamatra(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("े", "ॎ")
Strng = Strng.replace("ै", "ॎे")
Strng = Strng.replace("ो", "ॎा")
Strng = Strng.replace("ौ", "ॎो")
else:
Strng = Strng.replace("ॎे", "ै")
Strng = Strng.replace("ॎो", "ौ")
Strng = Strng.replace("ॎा", "ो")
Strng = Strng.replace("ॎ", "े")
return Strng
def ThaanaRemoveHistorical(Strng):
return Strng.replace("ޱ", "ނ")
def OriyaVaAlt(Strng):
return Strng.replace("ୱ", "ଵ")
def GurmukhiYakaash(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("੍ਯ", "ੵ")
else:
Strng = Strng.replace("ੵ", "੍ਯ")
return Strng
def dotReph(Strng):
ListC = "(" + "|".join(sorted(GM.CrunchSymbols(GM.Consonants, "Malayalam"))) + ")"
Strng = re.sub("(?<!്)" + "ർ" + ListC, "ൎ" + r"\1", Strng)
Strng = re.sub("(?<!്)" + "ര്" + ListC, "ൎ" + r"\1", Strng)
return Strng
def TamilGranthaVisarga(Strng):
Strng = Strng.replace("꞉", "𑌃")
return Strng
def archaicAIAU(Strng):
Strng = Strng.replace("ൗ", "ൌ")
Strng = Strng.replace("ഈ", "ൟ")
return Strng
def MalayalamremoveHistorical(Strng):
Strng = Strng.replace("\u0D29", "\u0D28")
Strng = Strng.replace("ന്", "ൻ")
return Strng
def LimburemoveHistorical(Strng):
removePairs = [("ᤉ", "ᤈ"), ("ᤊ", "ᤏ"), ("ᤚ", "ᤙ"), ("ᤲ", "ᤱ")]
for x, y in removePairs:
Strng = Strng.replace(x, y)
return Strng
def MalayalamPrakrit(Strng):
Strng = Strng.replace("ം", "ഀ")
Strng = InsertGeminationSign(Strng, "Malayalam")
return Strng
def GranthaPrakrit(Strng):
Strng = Strng.replace("𑌂", "𑌀")
Strng = InsertGeminationSign(Strng, "Grantha")
pat = r"\s𑌂."
Strng = functools.reduce(
lambda s, m: s.replace(m, ReverseGeminationSign(m, "Grantha")),
re.findall(pat, Strng),
Strng,
)
pat = r"𑍍𑌂."
Strng = functools.reduce(
lambda s, m: s.replace(m, ReverseGeminationSign(m, "Grantha")),
re.findall(pat, Strng),
Strng,
)
return Strng
def MeeteiMayekremoveHistorical(Strng):
removePairs = [
("ꫢ", "ꯆ"),
("ꫣ", "ꯅ"),
("ꫤ", "ꯇ"),
("ꫥ", "ꯊ"),
("ꫦ", "ꯗ"),
("ꫧ", "ꯙ"),
("ꫨ", "ꯅ"),
("ꫩ", "ꯁ"),
("ꫪ", "ꯁ"),
("\uAAF5", "ꯍ꯭"),
("ꯑꫫ", "ꯏ"),
("ꯑꫬ", "ꯎ"),
("ꫫ", "ꯤ"),
("ꫬ", "ꯨ"),
]
for x, y in removePairs:
Strng = Strng.replace(x, y)
return Strng
def TamilOmDisable(Strng):
return Strng.replace("ௐ", "ஓம்")
def TamilSHADisable(Strng):
return Strng.replace("ஶ", "ஸ²")
def TamilNaToNNa(Strng):
na = Tamil.ConsonantMap[19]
nna = Tamil.SouthConsonantMap[3]
vir = Tamil.ViramaMap[0]
ta = Tamil.ConsonantMap[15]
ListV = "|".join(
GM.CrunchSymbols(GM.Vowels + GM.VowelSigns + GM.Consonants, "Tamil")
+ [Tamil.SignMap[0].replace("(", "\(").replace(")", "\)")]
)
Strng = re.sub(
"(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub(
"(" + ListV + ")" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub(
"(²|³|⁴)" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub(
"(²|³|⁴)" + GM.VedicSvaras + "(" + na + ")" + "(?!" + vir + ta + ")",
r"\1\2" + nna,
Strng,
)
Strng = re.sub("(?<=ஶ்ரீ)(ன)(?!" + vir + ")", "ந", Strng)
return Strng
def MalayalamChillu(Strng, reverse=False, preserve=False):
Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E", "ഩ്"]
ListC = "(" + "|".join(GM.CrunchSymbols(GM.CharactersNV, "Malayalam") + ["ഽ"]) + ")"
vir = Malayalam.ViramaMap[0]
ConVir = [
Malayalam.ConsonantMap[14] + vir,
Malayalam.ConsonantMap[19] + vir,
Malayalam.ConsonantMap[26] + vir,
Malayalam.ConsonantMap[27] + vir,
Malayalam.SouthConsonantMap[0] + vir,
"ഩ്",
]
CList = [
Malayalam.ConsonantMap[10:15]
+ Malayalam.ConsonantMap[24:26]
+ Malayalam.ConsonantMap[28:29],
Malayalam.ConsonantMap[15:20]
+ Malayalam.ConsonantMap[24:27]
+ Malayalam.ConsonantMap[28:29],
Malayalam.ConsonantMap[25:27],
Malayalam.ConsonantMap[20:21]
+ Malayalam.ConsonantMap[24:26]
+ Malayalam.ConsonantMap[27:29],
Malayalam.SouthConsonantMap[0:1] + Malayalam.ConsonantMap[25:27],
Malayalam.ConsonantMap[15:20]
+ Malayalam.ConsonantMap[24:27]
+ Malayalam.ConsonantMap[28:29],
]
if not reverse:
for i in range(len(Chillus)):
Strng = re.sub(
ListC
+ GM.VedicSvaras
+ "("
+ ConVir[i]
+ ")"
+ "(?!["
+ "".join(CList[i])
+ "])",
r"\1\2" + Chillus[i],
Strng,
)
Strng = re.sub(
ListC
+ GM.VedicSvaras
+ "("
+ ConVir[i]
+ ")"
+ "(?=(["
+ "".join(CList[i])
+ "])"
+ vir
+ r"\4"
+ ")",
r"\1\2" + Chillus[i],
Strng,
)
Strng = re.sub("(?<!ത്)ˍ", "", Strng)
else:
if preserve:
for x, y in zip(Chillus, ConVir):
Strng = Strng.replace(x, y + "ˍ")
else:
for x, y in zip(Chillus, ConVir):
Strng = Strng.replace(x, y)
return Strng
def RemoveSchwa(Strng, Target):
vir = (
GM.CrunchSymbols(GM.VowelSigns, Target)[0]
+ GM.CrunchSymbols(GM.VowelSigns, Target)[0]
)
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Target))
ListV = "|".join(GM.CrunchSymbols(GM.Vowels, Target))
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target))
ListAll = "|".join(
GM.CrunchSymbols(
GM.Vowels + GM.VowelSigns + GM.Consonants + GM.CombiningSigns, Target
)
)
Strng = re.sub(
"(" + ListAll + ")" + "(" + ListC + ")" + "(?!" + ListAll + ")",
r"\1\2" + vir,
Strng,
)
Strng = re.sub(
"("
+ ListAll
+ ")"
+ "(?<!"
+ vir
+ ")"
+ "("
+ ListC
+ ")"
+ "("
+ ListC
+ ")"
+ "("
+ ListVS
+ ")",
r"\1\2" + vir + r"\3\4",
Strng,
)
return Strng
def InsertGeminationSign(Strng, Target):
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ConUnAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [
0,
2,
5,
7,
10,
12,
15,
17,
20,
22,
4,
9,
14,
19,
24,
25,
26,
27,
28,
29,
30,
31,
32,
]
]
ConUnAsp = (
ConUnAsp
+ GM.CrunchList("SouthConsonantMap", Target)
+ GM.CrunchList("NuktaConsonantMap", Target)
)
ConAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23]
]
ConOthrs = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24]
]
Strng = re.sub(
"(" + "|".join(ConUnAsp) + ")" + "(" + vir + ")" + r"\1",
GM.Gemination[Target] + r"\1",
Strng,
)
for i in range(len(ConAsp)):
Strng = re.sub(
"(" + ConUnAsp[i] + ")" + "(" + vir + ")" + "(" + ConAsp[i] + ")",
GM.Gemination[Target] + r"\3",
Strng,
)
return Strng
def ReverseGeminationSign(Strng, Target):
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ConUnAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [
0,
2,
5,
7,
10,
12,
15,
17,
20,
22,
4,
9,
14,
19,
24,
25,
26,
27,
28,
29,
30,
31,
32,
]
]
ConUnAsp = (
ConUnAsp
+ GM.CrunchList("SouthConsonantMap", Target)
+ GM.CrunchList("NuktaConsonantMap", Target)
)
ConAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23]
]
ConOthrs = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24]
]
Strng = re.sub(
"(" + GM.Gemination[Target] + ")" + "(" + "|".join(ConUnAsp) + ")",
r"\2" + vir + r"\2",
Strng,
)
for i in range(len(ConAsp)):
Strng = re.sub(
"(" + GM.Gemination[Target] + ")" + "(" + ConAsp[i] + ")",
ConUnAsp[i] + vir + r"\2",
Strng,
)
return Strng
def GurmukhiTippiBindu(Strng):
Bindi = Gurmukhi.AyogavahaMap[1]
Tippi = "\u0A70"
ListTippi = "|".join(
GM.CrunchSymbols(GM.Consonants, "Gurmukhi")
+ [Gurmukhi.VowelMap[x] for x in [0, 2, 3, 4]]
+ [Gurmukhi.VowelSignMap[1]]
+ [Gurmukhi.VowelSignMap[3]]
+ [Gurmukhi.VowelSignMap[4]]
)
Char = "|".join(
GM.CrunchSymbols(GM.Consonants, "Gurmukhi")
+ GM.CrunchSymbols(GM.Vowels, "Gurmukhi")
)
Strng = re.sub(
"(" + Gurmukhi.VowelSignMap[4] + ")" + Bindi + "(?!" + Char + ")",
r"\1" + Tippi,
Strng,
)
Strng = re.sub("(" + ListTippi + ")" + "(" + Bindi + ")", r"\1" + Tippi, Strng)
return Strng
def GurmukhiTippiGemination(Strng):
n = Gurmukhi.ConsonantMap[19]
m = Gurmukhi.ConsonantMap[24]
vir = Gurmukhi.ViramaMap[0]
Addak = "ੱ"
Tippi = "\u0A70"
Strng = Strng.replace(Addak + m, Tippi + m)
Strng = Strng.replace(Addak + n, Tippi + n)
return Strng
def BengaliConjunctVB(Strng):
Strng = Strng.replace("\u09CD\u200C\u09AC", "\u09CD\u09AC")
Strng = khandatabatova(Strng)
return Strng
def khandatabatova(Strng):
Strng = Strng.replace("ৎব", "ত্ব")
Strng = Strng.replace("ৎ\u200Cব", "ত্ব")
return Strng
def BengaliRaBa(Strng):
Strng = (
Strng.replace("ব", "ৰ")
.replace("ভ়", "ব")
.replace("ৰু", "ৰু")
.replace("ৰূ", "ৰূ")
)
Strng = Strng.replace("\u09CD\u09F0", "\u09CD\u200C\u09F0")
Strng = re.sub(
"(\u09F0)(\u09CD)([\u09B0\u09AF])", r"\1" + "\u200D" + r"\2\3", Strng
)
Strng = re.sub("(\u09F0)(\u09CD)", r"\1\2" + "\u200C", Strng)
Strng = Strng.replace("র্ৰ", "ৰ্ৰ")
return Strng
def BengaliIntervocalicDDA(Strng):
Target = "Bengali"
ListC = "|".join(
GM.CrunchSymbols(GM.Characters, Target)
+ [GM.CrunchList("SignMap", Target)[0]]
+ ["ৰ"]
)
replacements = [("ড", "ড়"), ("ঢ", "ঢ়")]
for x, y in replacements:
Strng = re.sub("(" + ListC + ")" + GM.VedicSvaras + x, r"\1\2" + y, Strng)
return Strng
def KhandaTa(Strng, Target, reverse=False):
ta = GM.CrunchSymbols(GM.Consonants, Target)[15]
khandata = "\u09CE"
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ListC = "|".join(
[
GM.CrunchList("ConsonantMap", Target)[x]
for x in [15, 16, 19, 27, 24, 25, 26, 28]
]
+ ["ৰ", "য়"]
)
if not reverse:
Strng = re.sub(
"(?<!" + vir + ")" + "(" + ta + ")" + "(" + vir + ")" + "(?!" + ListC + ")",
khandata,
Strng,
)
Strng = Strng.replace("ৎˍ", "ৎ")
else:
Strng = Strng.replace(khandata, ta + vir)
return Strng
def NasalToAnusvara(Strng, Target):
ListN = [GM.CrunchSymbols(GM.Consonants, Target)[x] for x in [4, 9, 14, 19, 24]]
ListC = [
"|".join(GM.CrunchList("ConsonantMap", Target)[0:4]),
"|".join(GM.CrunchList("ConsonantMap", Target)[5:9]),
"|".join(GM.CrunchList("ConsonantMap", Target)[10:14]),
"|".join(GM.CrunchList("ConsonantMap", Target)[15:19]),
"|".join(GM.CrunchList("ConsonantMap", Target)[20:24]),
]
ListCAll = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")"
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
Anu = GM.CrunchSymbols(GM.CombiningSigns, Target)[1]
for i in range(len(ListN)):
Strng = re.sub(
ListCAll
+ GM.VedicSvaras
+ "(?<!"
+ vir
+ ")"
+ "("
+ ListN[i]
+ ")"
+ "("
+ vir
+ ")"
+ "("
+ ListC[i]
+ ")",
r"\1\2" + Anu + r"\5",
Strng,
)
Strng = re.sub(
ListCAll
+ GM.VedicSvaras
+ "(?<!"
+ vir
+ ")"
+ "("
+ ListN[i]
+ ")"
+ "("
+ vir
+ ")"
+ "("
+ ListC[i]
+ ")",
r"\1\2" + Anu + r"\5",
Strng,
)
for svara in GM.VedicSvarasList:
Strng = Strng.replace(svara + Anu, Anu + svara)
return Strng
def AnusvaraToNasal(Strng, Target):
ListN = [GM.CrunchSymbols(GM.Consonants, Target)[x] for x in [4, 9, 14, 19, 24]]
ListC = [
"|".join(GM.CrunchList("ConsonantMap", Target)[0:4]),
"|".join(GM.CrunchList("ConsonantMap", Target)[5:9]),
"|".join(GM.CrunchList("ConsonantMap", Target)[10:14]),
"|".join(GM.CrunchList("ConsonantMap", Target)[15:19]),
"|".join(GM.CrunchList("ConsonantMap", Target)[20:24]),
]
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
Anu = GM.CrunchSymbols(GM.CombiningSigns, Target)[1]
for i in range(len(ListN)):
Strng = re.sub(
"(" + Anu + ")" + GM.VedicSvaras + "(" + ListC[i] + ")",
ListN[i] + vir + r"\2\3",
Strng,
)
if Target == "Tamil":
Strng = re.sub(
"(ம்)" + GM.VedicSvaras + "(ʼ)" + "(" + ListC[i] + ")",
ListN[i] + vir + r"\2\4",
Strng,
)
return Strng
def MalayalamAnusvaraNasal(Strng):
ListNNasal = [Malayalam.ConsonantMap[x] for x in [4, 9, 14, 19, 24]]
ListCNasal = [
"|".join(Malayalam.ConsonantMap[0:1]),
"|".join(Malayalam.ConsonantMap[5:8]),
"|".join(Malayalam.ConsonantMap[10:14]),
"|".join(Malayalam.ConsonantMap[15:19]),
"|".join(Malayalam.ConsonantMap[20:21]),
]
ListNAnu = [Malayalam.ConsonantMap[x] for x in [4, 24]]
ListCAnu = [
"|".join(Malayalam.ConsonantMap[1:4]),
"|".join(Malayalam.ConsonantMap[21:24]),
]
vir = Malayalam.ViramaMap[0]
Anu = Malayalam.AyogavahaMap[1]
Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E", "ഩ്"]
for i in range(len(ListNNasal)):
Strng = re.sub(
"(" + Anu + ")" + "(" + ListCNasal[i] + ")",
ListNNasal[i] + vir + r"\2",
Strng,
)
for i in range(len(ListNAnu)):
Strng = re.sub(
"(?<!["
+ ".".join(Chillus)
+ "])"
+ "("
+ ListNAnu[i]
+ ")"
+ "("
+ vir
+ ")"
+ "("
+ ListCAnu[i]
+ ")",
Anu + r"\3",
Strng,
)
return Strng
def MToAnusvara(Strng, Target):
M = (
GM.CrunchList("ConsonantMap", Target)[24]
+ GM.CrunchList("ViramaMap", Target)[0]
)
vir = GM.CrunchList("ViramaMap", Target)[0]
Anusvara = GM.CrunchList("AyogavahaMap", Target)[1]
ListC = "|".join(GM.CrunchSymbols(GM.Characters, Target))
Chillus = "|".join([vir, "\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E"])
ListCAll = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")"
Strng = re.sub(
ListCAll
+ GM.VedicSvaras
+ "(?<!"
+ vir
+ ")"
+ "("
+ M
+ ")"
+ "(?!"
+ ListC
+ ")",
r"\1\2" + Anusvara,
Strng,
)
for svara in GM.VedicSvarasList:
Strng = Strng.replace(svara + Anusvara, Anusvara + svara)
return Strng
def OriyaYYA(Strng):
return YYAEverywhere(Strng, "Oriya")
def BengaliYYA(Strng):
return YYAEverywhere(Strng, "Bengali")
def YYAEverywhere(Strng, Target):
Ya = GM.CrunchList("ConsonantMap", Target)[25]
YYa = GM.CrunchList("NuktaConsonantMap", Target)[7]
Strng = Strng.replace(Ya, YYa)
return Strng
def YaToYYa(Strng, Target):
YYa = GM.CrunchList("NuktaConsonantMap", Target)[7]
ListC = "|".join(
GM.CrunchSymbols(GM.Characters, Target)
+ [GM.CrunchList("SignMap", Target)[0]]
+ ["ৰ"]
)
ListS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) + ")"
Ya = GM.CrunchList("ConsonantMap", Target)[25]
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ListVarga = "|".join(GM.CrunchList("ConsonantMap", Target)[0:25])
if Target in ["Assamese", "Bengali", "Oriya", "Chakma"]:
Strng = re.sub("(" + ListC + ")" + GM.VedicSvaras + Ya, r"\1\2" + YYa, Strng)
if Target in ["Assamese", "Bengali"]:
Strng = Strng.replace(vir + YYa, vir + Ya)
if Target == "Chakma":
Strng = Strng.replace("𑄠𑄡", "𑄠𑄠")
Strng = Strng.replace(vir + YYa, "\U00011133" + YYa)
return Strng
def VaToBa(Strng, Target):
va = GM.CrunchSymbols(GM.Consonants, Target)[28]
ba = GM.CrunchSymbols(GM.Consonants, Target)[22]
if Target == "Bengali":
pass
Strng = Strng.replace(va, ba)
return Strng
def tbadiff(Strng, Target):
Strng = Strng.replace("ৎব", "ত্ব")
return Strng
def RetainDandasIndic(Strng, Target, reverse=False):
Dandas = GM.CrunchList("SignMap", Target)[1:3]
if not reverse:
Strng = Strng.replace("..", Dandas[1])
Strng = Strng.replace(".", Dandas[0])
else:
Strng = Strng.replace(Dandas[0], ".")
Strng = Strng.replace(Dandas[1], "..")
return Strng
def RetainIndicNumerals(Strng, Target, reverse=False):
NativeNumerals = GM.CrunchList("NumeralMap", Target)
ArabicNumerals = GM.CrunchList("NumeralMap", "ISO")
if not reverse:
for x, y in zip(ArabicNumerals, NativeNumerals):
Strng = re.sub("(?<!h)" + x, y, Strng)
else:
for x, y in zip(NativeNumerals, ArabicNumerals):
Strng = Strng.replace(x, y)
return Strng
def RetainRomanNumerals(Strng, Target, reverse=False):
NativeNumerals = GM.CrunchList("NumeralMap", Target)
ArabicNumerals = GM.CrunchList("NumeralMap", "ISO")
if not reverse:
for y, x in zip(ArabicNumerals, NativeNumerals):
Strng = re.sub("(?<!h)" + x, y, Strng)
else:
for y, x in zip(NativeNumerals, ArabicNumerals):
Strng = Strng.replace(x, y)
return Strng
def RetainTeluguDanda(Strng):
return RetainDandasIndic(Strng, "Telugu")
def RetainTeluguNumerals(Strng):
return RetainIndicNumerals(Strng, "Telugu")
def RetainTamilDanda(Strng):
return RetainDandasIndic(Strng, "Tamil")
def RetainTamilNumerals(Strng):
return RetainIndicNumerals(Strng, "Tamil")
def RetainKannadaDanda(Strng):
return RetainDandasIndic(Strng, "Kannada")
def RetainKannadaNumerals(Strng):
return RetainIndicNumerals(Strng, "Kannada")
def RetainMalayalamDanda(Strng):
return RetainDandasIndic(Strng, "Malayalam")
def RetainMalayalamNumerals(Strng):
return RetainIndicNumerals(Strng, "Malayalam")
def RetainGujaratiDanda(Strng):
return RetainDandasIndic(Strng, "Gujarati")
def RetainGurmukhiNumerals(Strng):
return RetainIndicNumerals(Strng, "Gurmukhi")
def SundaneseRemoveHistoric(Strng):
Strng = Strng.replace("᮪ᮻ", "ᮢᮩ")
Strng = Strng.replace("᮪ᮼ", "ᮣᮩ")
Strng = Strng.replace("ᮻ", "ᮛᮩ")
Strng = Strng.replace("ᮼ", "ᮜᮩ")
Strng = Strng.replace("\u1BBD", "\u1B98")
return Strng
def OriyaVa(Strng):
va = Oriya.ConsonantMap[28]
OriyaVa = "\u0B2C"
Strng = re.sub("(?<!୍)" + va, OriyaVa, Strng)
return Strng
def RemoveDiacritics(Strng):
for x in GM.DiacriticsRemovable:
Strng = Strng.replace(x, "")
return Strng
def RemoveDiacriticsTamil(Strng):
for x in GM.DiacriticsRemovableTamil:
Strng = Strng.replace(x, "")
return Strng
def TamilSubScript(Strng):
SuperScript = ["\u00B9", "\u00B2", "\u00B3", "\u2074"]
SubScript = ["\u2081", "\u2082", "\u2083", "\u2084"]
for x, y in zip(SuperScript, SubScript):
Strng = Strng.replace(x, y)
return Strng
def TamilAddFirstVarga(Strng):
CM = GM.CrunchList("ConsonantMap", "Tamil")
ConUnVoiced = "|".join([CM[x] for x in [0, 5, 10, 15, 20]])
SuperScript = "|".join(["\u00B2", "\u00B3", "\u2074"])
Strng = re.sub(
"(" + ConUnVoiced + ")" + "(?!" + SuperScript + ")", r"\1" + "\u00B9", Strng
)
return Strng
def SaurashtraHaru(Strng):
ListC = "|".join([Saurashtra.ConsonantMap[x] for x in [19, 24, 26, 27]])
vir = Saurashtra.ViramaMap[0]
ha = Saurashtra.ConsonantMap[32]
Strng = re.sub("(" + ListC + ")" + vir + ha, r"\1" + "\uA8B4", Strng)
return Strng
def SinhalaDefaultConjuncts(Strng):
vir = Sinhala.ViramaMap[0]
YR = "|".join(Sinhala.ConsonantMap[25:27])
Strng = re.sub("(" + vir + ")" + "(" + YR + ")", r"\1" + "\u200D" + r"\2", Strng)
Strng = re.sub(
"(" + YR[2] + ")" + "(" + vir + ")" + "(" + "\u200D" + ")" + "(" + YR[0] + ")",
r"\1\3\2\3\4",
Strng,
)
Strng = Strng.replace(
Sinhala.ConsonantMap[7] + Sinhala.ViramaMap[0] + Sinhala.ConsonantMap[9],
"\u0DA5",
)
Strng = Strng.replace(
Sinhala.ConsonantMap[0] + vir + Sinhala.ConsonantMap[30],
Sinhala.ConsonantMap[0] + vir + "\u200D" + Sinhala.ConsonantMap[30],
)
Strng = Strng.replace("ර්ය", "ර්ය")
Strng = Strng.replace("ර්ර", "ර්ර")
return Strng
def IASTPali(Strng):
Strng = Strng.replace("l̤", "ḷ")
return Strng
def CyrillicPali(Strng):
Strng = Strng.replace("л̤", "л̣")
return Strng
def SinhalaConjuncts(Strng):
ListC = Sinhala.ConsonantMap + [Sinhala.SouthConsonantMap[0]]
vir = Sinhala.ViramaMap[0]
ZWJ = "\u200D"
conjoining = [
(0, 28),
(2, 18),
(9, 5),
(10, 11),
(15, 16),
(15, 28),
(17, 18),
(17, 28),
(19, 16),
(19, 17),
(19, 18),
(19, 28),
]
for x, y in conjoining:
Strng = Strng.replace(
ListC[x] + vir + ListC[y], ListC[x] + vir + ZWJ + ListC[y]
)
for x in ListC:
Strng = Strng.replace(ListC[26] + vir + x, ListC[26] + vir + ZWJ + x)
for x in ListC:
for y in ListC:
Strng = Strng.replace(x + vir + y, x + ZWJ + vir + y)
Strng = Strng.replace("ර්ය", "ර්ය")
return Strng
def SinhalaPali(Strng, reverse=False):
EOLong = (
Sinhala.VowelMap[10:11]
+ Sinhala.VowelMap[12:13]
+ Sinhala.VowelSignMap[9:10]
+ Sinhala.VowelSignMap[11:12]
)
EOShort = Sinhala.SouthVowelMap + Sinhala.SouthVowelSignMap
for x, y in zip(EOLong, EOShort):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
return Strng
def UrduAlternateUU(Strng):
Strng = Strng.replace("\\u064F\\u0648", "\u0648\u0657")
return Strng
def TibetanNada(Strng):
Strng = Strng.replace("\u0F83", "\u0F82")
return Strng
def TibetanTsheg(Strng):
Strng = Strng.replace("\u0F0B", " ")
return Strng
def TibetanRemoveVirama(Strng):
Strng = Strng.replace(Tibetan.ViramaMap[0], "")
return Strng
def TibetanRemoveBa(Strng):
Strng = VaToBa(Strng, "Tibetan")
Strng = Strng.replace("ཪྺ", "རྦ")
Strng = Strng.replace("བྺ", "བྦ")
Strng = Strng.replace("ྦྺ", "ྦྦ")
return Strng
def ThaiLaoTranscription(
Strng, Script, shortA, shortAconj, reverse=False, anusvaraChange=True
):
Strng = Strng.replace("\u02BD", "")
cons = "|".join(
GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1]
)
if Script == "Thai":
cons = "|".join(
GM.CrunchSymbols(GM.Consonants, Script)
+ GM.CrunchList("VowelMap", Script)[0:1]
+ ["ฮ", "บ", "ฝ", "ด"]
)
if Script == "Lao":
cons = "|".join(
GM.CrunchSymbols(GM.Consonants, Script)
+ GM.CrunchList("VowelMap", Script)[0:1]
+ ["ດ", "ບ", "ຟ"]
)
consnA = cons[:-2]
listVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Script))
vir = GM.CrunchList("ViramaMap", Script)[0]
AIUVir = "".join(GM.CrunchList("VowelSignMap", Script)[0:5] + [vir])
EAIO = "".join(
GM.CrunchList("VowelSignMap", Script)[9:12]
+ GM.CrunchList("SinhalaVowelSignMap", Script)[:]
)
Anu = GM.CrunchList("AyogavahaMap", Script)[1]
ng = GM.CrunchList("ConsonantMap", Script)[4]
vowA = GM.CrunchList("VowelMap", Script)[0]
if anusvaraChange:
Strng = AnusvaraToNasal(Strng, Script)
if not reverse:
if Script == "Thai":
Strng = re.sub(
"([" + EAIO + "])" + "(" + cons + ")" + "(" + vir + ")",
r"\2\3\1",
Strng,
)
Strng = Strng.replace("\u0E33", "\u0E32\u0E4D").replace(
"\u0E36", "\u0E34\u0E4D"
)
if Script == "LaoPali":
Strng = Strng.replace("ຳ", "າໍ")
if anusvaraChange:
Strng = Strng.replace(Anu, ng + vir)
Strng = re.sub(
"(?<![" + EAIO + "])" + "(" + cons + ")" + "(?![" + AIUVir + "])",
r"\1" + shortA,
Strng,
)
Strng = re.sub(
"(" + shortA + ")" + "(?=(" + cons + ")" + "(" + vir + "))",
shortAconj,
Strng,
)
Strng = Strng.replace(shortAconj + "ห" + vir, "ห" + vir)
Strng = re.sub(
"(" + shortAconj + ")" + "(.)(" + vir + ")([รล])", shortA + r"\2\3\4", Strng
)
consswap = "|".join(GM.CrunchSymbols(GM.Consonants, "Thai"))
Strng = re.sub(
"(" + consswap + ")" + "(" + vir + ")" + "([" + EAIO + "])" + "([รล])",
r"\3\1\2\4",
Strng,
)
Strng = re.sub(shortAconj + "([" + EAIO + "])", shortA + r"\1", Strng)
Strng = Strng.replace(vir, "")
Strng = Strng.replace(shortAconj + "ร", "รร")
else:
consOnly = "|".join(GM.CrunchSymbols(GM.Consonants, Script))
aVow = GM.CrunchList("VowelMap", Script)[0]
Strng = re.sub(
"(" + consnA + ")" + "(?!" + listVS + "|" + shortA + "|" + shortAconj + ")",
r"\1" + vir,
Strng,
)
if Script == "Lao":
Strng = re.sub(
"(?<!ໂ)"
+ "(?<!ແ)"
+ "(?<!ເ)"
+ "("
+ aVow
+ ")"
+ "(?<!ເ)"
+ shortA
+ "|"
+ shortAconj,
r"\1",
Strng,
)
Strng = re.sub(
"(" + consOnly + ")" + "(?<!າ|ໂ|ແ|ເ)" + shortA + "|" + shortAconj,
r"\1",
Strng,
)
Strng = Strng.replace("຺ຳ", "ຳ")
else:
Strng = re.sub(
"(?<!โ)"
+ "(?<!แ)"
+ "(?<!เ)"
+ "("
+ aVow
+ ")"
+ "(?<!เ)"
+ shortA
+ "|"
+ shortAconj,
r"\1",
Strng,
)
Strng = re.sub(
"(" + consOnly + ")" + "(?<!า|โ|แ|เ)" + shortA + "|" + shortAconj,
r"\1",
Strng,
)
Strng = re.sub(vir + "รฺรฺ", "รฺ", Strng)
Strng = re.sub(vir + "หฺ", "หฺ", Strng)
return Strng
def LaoTranscription(Strng):
Strng = CF.LaoPaliTranscribe(Strng)
Strng = Strng.replace("ະ໌", "໌")
return Strng
def ThaiVisargaSaraA(Strng):
Strng = Strng.replace("ห์", "ะ")
return Strng
def ThamTallADisable(Strng):
Strng = Strng.replace("\u1A64", "\u1A63")
return Strng
def ThamTallAOthers(Strng):
TallACons = "|".join(["ᨧ", "ᨻ", "ᩁ", "ᨽ"])
Strng = FixTallA(Strng, TallACons)
return Strng
def LaoPhonetic(Strng):
Strng = re.sub("(\u0EBA)([ໂເໄ]?)([ຍຣລວຫ])", "\u035C" + r"\2\3", Strng)
Strng = re.sub(
"([ຍຣລວຫ])" + "\u035C" + "([ໂເໄ]?)" + r"\1", r"\1" + "\u0EBA" + r"\2\1", Strng
)
Strng = Strng.replace("ຫ\u0EBA", "ຫ\u035C")
Strng = re.sub("([ຍຣລວຫ])" + "\u035C" + r"\1", r"\1" + "\u0EBA" + r"\1", Strng)
Strng = LaoTranscription(Strng)
Strng = Strng.replace("\u0EB0\u035C", "\u035C")
Strng = Strng.replace("ງ", "ງໍ")
Strng = Strng.replace("ທ", "ດ")
Strng = Strng.replace("ພ", "ບ")
return Strng
def RephaDoubleMalayalam(Strng):
repha = "[ർൎ]"
Target = "Malayalam"
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ConUnAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24, 25, 28, 29, 31]
]
ConUnAsp = ConUnAsp + ["ള"]
ConAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [1, 3, 6, 8, 11, 13, 16, 18, 21]
]
Strng = re.sub(
"(" + repha + ")" + "(" + "|".join(ConUnAsp) + ")", r"\1\2" + vir + r"\2", Strng
)
for i in range(len(ConAsp)):
Strng = re.sub(
"(" + repha + ")" + "(" + ConAsp[i] + ")",
r"\1" + ConUnAsp[i] + vir + r"\2",
Strng,
)
return Strng
def DograShaKha(Strng):
Strng = Strng.replace("𑠨", "𑠋")
return Strng
def ThamShiftMaiKangLai(Strng):
Strng = re.sub("(\u1A58)(.)", r"\2\1", Strng)
ListV = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "TaiTham") + ["ᩤ"]) + ")"
Strng = re.sub("(\u1A58)([\u1A55\u1A56])", r"\2\1", Strng)
Strng = re.sub("(\u1A58)(\u1A60.)", r"\2\1", Strng)
Strng = re.sub("(\u1A58)" + ListV, r"\2\1", Strng)
Strng = re.sub("(\u1A58)" + ListV, r"\2\1", Strng)
return Strng
def FixTallA(Strng, TallACons):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TaiTham"))
Sub = ["\u1A55", "\u1A56"]
E = "ᩮ"
AA = "ᩣ"
Strng = re.sub(
"(?<!᩠)(" + TallACons + ")" + "(" + E + "?)" + AA, r"\1\2" + "ᩤ", Strng
)
Strng = re.sub(
"(" + TallACons + ")(᩠)(" + ListC + ")" + "(" + E + "?)" + AA,
r"\1\2\3\4" + "ᩤ",
Strng,
)
Strng = re.sub(
"("
+ TallACons
+ ")(᩠)("
+ ListC
+ ")"
+ "(᩠)("
+ ListC
+ ")"
+ "("
+ E
+ "?)"
+ AA,
r"\1\2\3\4\5\6" + "ᩤ",
Strng,
)
Strng = re.sub(
"(" + TallACons + ")" + "(" + "|".join(Sub) + ")" + "(" + E + "?)" + AA,
r"\1\2\3" + "ᩤ",
Strng,
)
reverseSub = "([" + "".join(["ᨥ", "ᨫ", "ᨬ", "ᨰ", "ᨸ", "ᩈ", "ᨿ", "ᩇ", "ᨹ"]) + "])"
Strng = re.sub(
"(\u1A60)" + reverseSub + "(\u1A6E\u1A64)", r"\1\2" + "\u1A6E\u1A63", Strng
)
Strng = re.sub("(\u1A60)" + reverseSub + "(\u1A64)", r"\1\2" + "\u1A63", Strng)
return Strng
def ThaiSajjhayaOrthography(Strng, Script="Thai"):
Strng = CF.ThaiReverseVowelSigns(Strng, True)
Strng = CF.ThaiDigraphConjuncts(Strng, True)
Strng = CF.ThaiReverseVowelSigns(Strng)
if Script == "Thai":
Strng = Strng.replace("ฺ", "์")
if Script == "LaoPali":
Strng = Strng.replace("຺", "์")
cons = "|".join(
GM.CrunchSymbols(GM.Consonants, Script) + GM.CrunchList("VowelMap", Script)[0:1]
)
EAIO = "".join(
GM.CrunchList("VowelSignMap", Script)[9:12]
+ GM.CrunchList("SinhalaVowelSignMap", Script)[:]
)
Strng = re.sub(
"(?<![" + EAIO + "])" + "(" + cons + ")" + "(" + cons + ")" + "(์)",
r"\1" + "ั" + r"\2\3",
Strng,
)
if Script == "Thai":
cons_others = "([ยรลวศษสหฬ])"
if Script == "LaoPali":
cons_others = "([ຍຣລວຨຩສຫຬ])"
Strng = re.sub(
"(?<![" + EAIO + "])" + "(" + cons + ")" + "(" + cons + ")" + "(์)",
r"\1" + "ั" + r"\2\3",
Strng,
)
Strng = re.sub(
"(" + cons + ")" + "(์)" + "([" + EAIO + "]?)" + cons_others,
r"\1" + "๎" + r"\3\4",
Strng,
)
Strng = re.sub(
cons_others + "(์)" + "([" + EAIO + "]?)" + "(" + cons + ")",
r"\1" + "๎" + r"\3\4",
Strng,
)
Strng = re.sub(
cons_others + "(๎)" + "([" + EAIO + "]?)" + r"\1", r"\1" + "์" + r"\3\1", Strng
)
Strng = re.sub(
"(" + cons + ")" + "(๎)" + "([" + EAIO + "])" + "(" + cons + ")",
r"\3\1\2\4",
Strng,
)
if Script == "Thai":
Strng = Strng.replace("ง์", "ง")
Strng = re.sub("(\u0E31)(.)(\u0E4E)", r"\2\3", Strng)
if Script == "LaoPali":
Strng = Strng.replace("ั", "ັ")
Strng = Strng.replace("ງ์", "ງ")
Strng = Strng.replace("์", "໌")
Strng = re.sub("(\u0EB1)(.)(\u0E4E)", r"\2\3", Strng)
Strng = Strng.replace("\u0E4E", "\u0ECE")
return Strng
def ThaiTranscription(Strng, anusvaraChange=True):
Strng = CF.ThaiReverseVowelSigns(Strng, True)
Strng = CF.ThaiDigraphConjuncts(Strng, True)
Strng = CF.ThaiReverseVowelSigns(Strng)
Strng = ThaiLaoTranscription(
Strng, "Thai", "\u0E30", "\u0E31", anusvaraChange=anusvaraChange
)
Strng = Strng.replace("ะ์", "์")
Strng = Strng.replace("ะงัง", "\u0E31งํ")
return Strng
def AvestanConventions(Strng):
extraCons = ["\U00010B33", "\U00010B32", "\U00010B1D", "\U00010B12", "𐬣", "𐬝"]
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Avestan") + extraCons)
ListV = "|".join(GM.CrunchSymbols(GM.Vowels, "Avestan"))
ListA = "|".join(
GM.CrunchSymbols(GM.Vowels + GM.Consonants, "Avestan") + extraCons + ["𐬄", "𐬅"]
)
ii = Avestan.VowelMap[2] * 2
uu = Avestan.VowelMap[4] * 2
i = Avestan.VowelMap[2]
a = Avestan.VowelMap[0]
kha = Avestan.ConsonantMap[1]
nga = Avestan.ConsonantMap[4]
ya = Avestan.ConsonantMap[25]
va = Avestan.ConsonantMap[28]
ta = Avestan.ConsonantMap[15]
tha = Avestan.ConsonantMap[16]
dha = Avestan.ConsonantMap[18]
na = Avestan.ConsonantMap[19]
ma = Avestan.ConsonantMap[24]
kb = "|".join([Avestan.ConsonantMap[0], Avestan.ConsonantMap[22]])
nna = Avestan.ConsonantMap[14]
sha = Avestan.ConsonantMap[29]
VelarDental = "|".join(Avestan.ConsonantMap[0:4] + Avestan.ConsonantMap[15:19])
Strng = Strng.replace(nga + i, "𐬣" + i)
Strng = re.sub(a + "([" + na + ma + "])" + "(?!" + ListA + ")", "𐬆" + r"\1", Strng)
Strng = re.sub("(" + na + ")" + "(" + VelarDental + ")", nna + r"\2", Strng)
Strng = re.sub("(" + kha + ")" + "(?=" + ii + ")", "\U00010B12", Strng)
Strng = re.sub("(" + sha + ")" + "(?=" + ii + ")", "\U00010B33", Strng)
Strng = re.sub("(" + tha + "|" + dha + ")" + "(" + uu + ")", r"\1" "𐬡", Strng)
Strng = re.sub(
"(" + ta + ")" + "(?!" + "((" + ListV + ")" + "|" + "(" + ListC + "))" + ")",
"\U00010B1D",
Strng,
)
Strng = re.sub("(" + ta + ")" + "(?=" + "(" + kb + ")" + ")", "\U00010B1D", Strng)
return Strng
def TaiThamO(Strng):
Strng = Strng.replace("\u1A6E\u1A63", "\u1A70")
return Strng
def TaiThamHighNga(Strng):
Strng = Strng.replace("\u1A58", "\u1A59")
return Strng
def TaiThamMoveNnga(Strng):
Strng = re.sub("(.)(\u1A58|\u1A50)", r"\2\1", Strng)
return Strng
def UrduRemoveShortVowels(Strng):
ShortVowels = ["\u0652", "\u064E", "\u0650", "\u064F"]
for vow in ShortVowels:
Strng = Strng.replace(vow, "")
return Strng
def PhagsPaRearrange(Strng, Target):
vir = GM.CrunchList("ViramaMap", Target)[0]
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Target))
ListV = "|".join(GM.CrunchSymbols(GM.Vowels, Target))
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target))
Strng = re.sub(
"(?<!( |" + vir + "))" + "(" + ListC + ")" + "(?= )", r"\2" + vir, Strng
)
Strng = Strng.replace(" ", "").replace("᠂", " ").replace("᠃", " ")
return Strng
def DevanagariAVowels(Strng):
oldVowels = Devanagari.VowelMap[2:12] + Devanagari.SouthVowelMap[:1]
a = Devanagari.VowelMap[0]
newAVowels = [
a + x for x in Devanagari.VowelSignMap[1:11] + Devanagari.SouthVowelSignMap[:1]
]
for x, y in zip(oldVowels, newAVowels):
Strng = Strng.replace(x, y)
return Strng
def AnusvaraToNasalIPA(Strng):
Strng = Strng.replace("̃k", "ŋk")
Strng = Strng.replace("̃g", "ŋg")
Strng = Strng.replace("̃c", "ɲc")
Strng = Strng.replace("̃j", "ɲj")
Strng = Strng.replace("̃t̪", "n̪t̪")
Strng = Strng.replace("̃d̪", "n̪d̪")
Strng = Strng.replace("̃ɖ", "ɳɖ")
Strng = Strng.replace("̃ʈ", "ɳʈ")
Strng = Strng.replace("̃ːk", "ːŋk")
Strng = Strng.replace("̃ːg", "ːŋg")
Strng = Strng.replace("̃ːc", "ːɲc")
Strng = Strng.replace("̃ːj", "ːɲj")
Strng = Strng.replace("̃ːt̪", "ːn̪t̪")
Strng = Strng.replace("̃ːd̪", "ːn̪d̪")
Strng = Strng.replace("̃ːɖ", "ːɳɖ")
Strng = Strng.replace("̃ːʈ", "ːɳʈ")
return Strng
def IPARemoveCross(Strng):
Strng = Strng.replace("×", "")
return Strng
def ChakmaAVowels(Strng):
return Strng
def ZanabazarSquareContextual(Strng):
yrlv = ZanabazarSquare.ConsonantMap[25:29]
yrlv_sub = ["\U00011A3B", "\U00011A3C", "\U00011A3D", "\U00011A3E"]
for x, y in zip(yrlv, yrlv_sub):
Strng = Strng.replace("\U00011A47" + x, y)
Strng = re.sub("(?<!\U00011A47)" + yrlv[1] + "\U00011A47", "\U00011A3A", Strng)
return Strng
def ZanabazarSquareAiAu(Strng):
Strng = Strng.replace("\U00011A04\U00011A0A", "\U00011A07")
Strng = Strng.replace("\U00011A06\U00011A0A", "\U00011A08")
return Strng
def ZanabazarSquareMongolianFinal(Strng):
Strng = Strng.replace(ZanabazarSquare.ViramaMap[0], "\U00011A33")
return Strng
def TamilRemoveApostrophe(Strng):
Strng = Strng.replace("ʼ", "")
return Strng
def TamilRemoveNumbers(Strng):
numerals = ["²", "³", "⁴", "₂", "₃", "₄"]
for num in numerals:
Strng = Strng.replace(num, "")
return Strng
def NewaSpecialTa(Strng):
Strng = Strng.replace("𑐟𑑂", "𑐟𑑂")
return Strng
def TamilDisableSHA(Strng):
Strng = Strng.replace("ஶ", "ஷ²")
Strng = CF.ShiftDiacritics(Strng, "Tamil")
return Strng
def swapEe(Strng):
Strng = Strng.replace("e", "X@X@")
Strng = Strng.replace("e", "E")
Strng = Strng.replace("X@X@")
return Strng
def capitalizeSentence(Strng):
Strng = re.sub(
r"(\A\w)|"
+ "(?<!\.\w)([\.?!]\s*)\w|"
+ "\w(?:\.\w)|"
+ "(\n)\w|"
+ "(\n(\"|\“|'|\‘))\w|"
+ "(?<=\w\.)\w",
lambda x: x.group().upper(),
Strng,
)
Strng = re.sub(r"(@)(.)", lambda x: x.groups()[1].upper(), Strng)
return Strng
def NewaDisableRepha(Strng):
Strng = Strng.replace("𑐬𑑂", "𑐬𑑂\u200D")
return Strng | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/post_processing.py | post_processing.py |
import importlib, string
import re
from functools import reduce
import data as fb
def ScriptPath(Script):
if Script in MainIndic:
return "Core." + Script
elif Script in EastIndic:
return "East." + Script
elif Script in Roman:
return "Roman." + Script
elif Script in NonIndic:
return "Other." + Script
def retCharList(charList):
return globals()[charList]
def CrunchSymbols(Part, Script):
ModScript = importlib.import_module(ScriptPath(Script))
return reduce(lambda x, y: x + y, [getattr(ModScript, Var) for Var in Part])
def CrunchList(List, Script):
try:
ModScript = importlib.import_module(ScriptPath(Script))
except:
import logging
logging.exception("The script " + Script + " cannot be found")
return ""
return getattr(ModScript, List)
def EscapeChar(Strng):
punct = "".join(["\\" + x for x in string.punctuation])
return re.sub("(" + punct + ")", r"\\" + r"\1", Strng)
VedicSvaras = "(" + "|".join(["᳚", "॑", "॒"]) + ")?"
VedicSvarasList = ["᳚", "॑", "॒"]
Vowels = ["VowelMap", "SouthVowelMap", "ModernVowelMap", "SinhalaVowelMap"]
VowelSignsNV = [
"VowelSignMap",
"SouthVowelSignMap",
"ModernVowelSignMap",
"SinhalaVowelSignMap",
]
VowelSigns = [
"ViramaMap",
"VowelSignMap",
"SouthVowelSignMap",
"ModernVowelSignMap",
"SinhalaVowelSignMap",
]
CombiningSigns = ["AyogavahaMap", "NuktaMap"]
Consonants = [
"ConsonantMap",
"SouthConsonantMap",
"NuktaConsonantMap",
"SinhalaConsonantMap",
]
Signs = ["SignMap"]
Numerals = ["NumeralMap"]
Aytham = ["Aytham"]
om = ["OmMap"]
virama = ["ViramaMap"]
MainIndic = [
"Nandinagari",
"Shahmukhi",
"TamilExtended",
"MasaramGondi",
"GunjalaGondi",
"Dogra",
"Ranjana",
"Khojki",
"GranthaGrantamil",
"Multani",
"Ahom",
"Mahajani",
"SiddhamDevanagari",
"Vatteluttu",
"GranthaPandya",
"Khudawadi",
"Bhaiksuki",
"Sharada",
"Newa",
"SylotiNagri",
"Takri",
"Tirhuta",
"Modi",
"Kaithi",
"Kharoshthi",
"Lepcha",
"Chakma",
"Brahmi",
"MeeteiMayek",
"Limbu",
"Assamese",
"Bengali",
"Devanagari",
"Grantha",
"Gujarati",
"Gurmukhi",
"Kannada",
"Malayalam",
"Oriya",
"Saurashtra",
"Sinhala",
"Tamil",
"TamilBrahmi",
"TamilGrantha",
"Telugu",
"Urdu",
]
EastIndic = [
"BurmeseALALC",
"Makasar",
"Kawi",
"Pallava",
"LaoTham",
"LueTham",
"KhuenTham",
"Marchen",
"Soyombo",
"KhomThai",
"KhamtiShan",
"TaiLaing",
"Mon",
"Shan",
"ZanabazarSquare",
"Rejang",
"Lao2",
"Buhid",
"Hanunoo",
"Siddham",
"Tibetan",
"Lao",
"TaiTham",
"Cham",
"BatakKaro",
"BatakPakpak",
"BatakSima",
"BatakToba",
"BatakManda",
"LaoPali",
"PhagsPa",
"Buginese",
"Tagbanwa",
"Tagalog",
"Sundanese",
"Balinese",
"Burmese",
"Javanese",
"Khmer",
"Siddham",
"Ranjana",
"Thaana",
"Thai",
"BurmeseALALC",
]
NonIndic = ["OldPersian", "Hebrew"]
Roman = [
"IASTLOC",
"RomanSemitic",
"RomanColloquial",
"ISOPali",
"RomanKana",
"BarahaNorth",
"BarahaSouth",
"Mongolian",
"SLP1",
"Wancho",
"Mro",
"IASTPali",
"HanifiRohingya",
"Ariyaka",
"RomanReadable",
"Aksharaa",
"WarangCiti",
"SoraSompeng",
"WX-kok",
"Avestan",
"HK",
"IAST",
"ISO",
"Itrans",
"Titus",
"Titus",
"Velthuis",
"WX",
"Inter",
"IPA",
"TolongSiki",
"Santali",
"RussianCyrillic",
]
RomanDiacritic = [
"IAST",
"Titus",
"ISO",
"IPA",
"IASTPali",
"ISOPali",
"IASTLOC",
"ISOLOC",
"RomanSemitic",
]
ScriptCategory = {}
ScriptCategory["IndianMain"] = [
"GranthaGrantamil",
"Assamese",
"Bengali",
"Devanagari",
"Gujarati",
"Gurmukhi",
"Kannada",
"Malayalam",
"Oriya",
"Sinhala",
"Tamil",
"Telugu",
"Urdu",
]
ScriptCategory["IndianMinority"] = [
"Brahmi",
"Chakma",
"Grantha",
"Lepcha",
"Limbu",
"MeeteiMayek",
"Saurashtra",
"TamilBrahmi",
"TamilGrantha",
"Kaithi",
]
ScriptCategory["EastAsianPaliSans"] = [
"Balinese",
"Burmese",
"Cham",
"Javanese",
"Khmer",
"LaoPali",
"Lao",
"PhagsPa",
"TaiTham",
"Thaana",
"Thai",
"Tibetan",
]
ScriptCategory["EastAsianIndFili"] = [
"BatakKaro",
"BatakManda",
"BatakPakpak",
"BatakSima",
"BatakToba",
"Buginese",
"Sundanese",
"Tagalog",
"Tagbanwa",
]
ScriptCategory["IndianAlpha"] = ["Santali", "TolongSiki"]
ScriptCategory["RomanDiacritic"] = ["IAST", "IPA", "ISO", "Titus", "IASTPali"]
ScriptCategory["RomanNonDiacritic"] = [
"HK",
"Itrans",
"RussianCyrillic",
"Velthuis",
"WX",
]
ScriptCategory["NonIndic"] = ["Avestan", "OldPersian"]
Inter = "Inter"
Characters = Vowels + VowelSigns + CombiningSigns + Consonants
CharactersNV = Vowels + VowelSignsNV + CombiningSigns + Consonants
Diacritics = [
"ʽ",
"\u00B7",
"\u00B9",
"\u00B2",
"\u00B3",
"\u2074",
"\u2081",
"\u2082",
"\u2083",
"\u2084",
]
DiacriticsRemovable = ["ʼ", "ˇ", "ˆ", "˘", "\u00B7"]
DiacriticsRemovableTamil = ["ˇ", "ˆ", "˘", "\u00B7"]
ScriptAll = [
"Aytham",
"Signs",
"CombiningSigns",
"VowelSigns",
"Vowels",
"Consonants",
"Numerals",
]
IndicScripts = [
"RomanSemitic",
"Makasar",
"Nandinagari",
"Kawi",
"Shahmukhi",
"Pallava",
"Hebrew",
"LaoTham",
"LueTham",
"KhuenTham",
"TamilExtended",
"Marchen",
"MasaramGondi",
"GunjalaGondi",
"Soyombo",
"Dogra",
"KhomThai",
"KhamtiShan",
"TaiLaing",
"Mon",
"Khojki",
"Shan",
"Ranjana",
"ZanabazarSquare",
"Rejang",
"GranthaGrantamil",
"Devanagari",
"Multani",
"Ahom",
"Mahajani",
"Lao2",
"Hanunoo",
"Buhid",
"Siddham",
"SiddhamDevanagari",
"GranthaPandya",
"Vatteluttu",
"Khudawadi",
"Bhaiksuki",
"Sharada",
"Newa",
"Takri",
"SylotiNagri",
"Tirhuta",
"Modi",
"Kaithi",
"Kharoshthi",
"Telugu",
"Kannada",
"Malayalam",
"Gujarati",
"Bengali",
"Oriya",
"Gurmukhi",
"Tamil",
"Assamese",
"Saurashtra",
"TamilBrahmi",
"Grantha",
"TamilGrantha",
"Sinhala",
"Khmer",
"Burmese",
"Urdu",
"Balinese",
"Javanese",
"Thaana",
"Tibetan",
"Thai",
"OldPersian",
"Limbu",
"Lepcha",
"Sundanese",
"Tagalog",
"Tagbanwa",
"Buginese",
"Chakma",
"PhagsPa",
"MeeteiMayek",
"LaoPali",
"BatakKaro",
"BatakPakpak",
"BatakSima",
"BatakToba",
"BatakManda",
"Cham",
"TaiTham",
"Lao",
"Brahmi",
]
SiddhamRanjana = ["Ranjana"]
LatinScripts = [
"IASTLOC",
"RomanColloquial",
"ISOPali",
"RomanKana",
"BarahaNorth",
"BarahaSouth",
"Mongolian",
"SLP1",
"Wancho",
"Mro",
"IASTPali",
"HanifiRohingya",
"Ariyaka",
"RomanReadable",
"Aksharaa",
"WarangCiti",
"SoraSompeng",
"WX-kok",
"Avestan",
"ISO",
"IAST",
"HK",
"Titus",
"Itrans",
"Velthuis",
"WX",
"Inter",
"IPA",
"TolongSiki",
"Santali",
"RussianCyrillic",
]
Gemination = {
"Gurmukhi": "\u0A71",
"Thaana": "\u0787\u07B0",
"Urdu": "\u0651",
"Shahmukhi": "\u0651",
"Grantha": "𑌂",
"Malayalam": "ം",
"Khojki": "\U00011237",
"Buginese": "",
"Buhid": "",
"Tagbanwa": "",
"Makasar": "",
}
Transliteration = [
"IASTPali",
"RomanReadable",
"Aksharaa",
"ISO",
"IAST",
"HK",
"Titus",
"Itrans",
"Velthuis",
"WX",
"IPA",
"RussianCyrillic",
]
SemiticScripts = [
"Arab-Pa",
"Syrj",
"Syrn",
"Syre",
"Thaa",
"Arab-Ur",
"Type",
"Hebr-Ar",
"Arab-Fa",
"Latn",
"Arab",
"Ethi",
"Armi",
"Brah",
"Chrs",
"Egyp",
"Elym",
"Grek",
"Hatr",
"Hebr",
"Mani",
"Narb",
"Nbat",
"Palm",
"Phli",
"Phlp",
"Phnx",
"Prti",
"Samr",
"Sarb",
"Sogd",
"Sogo",
"Ugar",
]
SemiticConsonants = [
"ʾ",
"b",
"v",
"g",
"j",
"d",
"h",
"w",
"z",
"ḥ",
"ṭ",
"y",
"k",
"l",
"m",
"n",
"s",
"ʿ",
"f",
"ṣ",
"q",
"r",
"š",
"t",
"ḍ",
"ḏ",
"ḫ",
"ġ",
"ṯ",
"ẓ",
"p",
"č",
"ž",
"ɖ",
"ʈ",
"ʂ",
"ɭ",
"ɲ",
"ɳ",
"ɽ",
"ʰ",
]
SemiticVowels = ["a", "ā", "i", "ī", "u", "ū", "ē", "ō", "e", "o", "#", "\u033D"]
semiticVowelsAll = "꞉ a ā i ī u ū e ē o ō a̮ ̽ ā̮ ĕ ă ŏ aŷ aŵ a aⁿ uⁿ iⁿ".split(" ")
vowelsInitialAll = "ˀā̮̂ ā̮̂ â ā̂ î ī̂ û ū̂ ê ē̂ ô ō̂ âŷ âŵ ˀâ ˀî".split(" ")
semiticISO = {
"ISO259": "Hebrew",
"HebrewSBL": "Hebrew",
"ISO233": "Arab",
"PersianDMG": "Arab-Fa",
}
ReversibleScripts = [
"Devanagari",
"Tamil",
"Telugu",
"Kannada",
"Sinhala",
"Oriya",
"Gujarati",
"Bengali",
"Assamese",
"Malayalam",
"Gurmukhi",
]
CharmapLists = ["VowelMap", "VowelSignMap", "ConsonantMap", "SignMap", "AyogavahaMap"]
def add_additional_chars(script_char_map, file_script):
for charlist in CharmapLists:
mapping_char = getattr(fb, charlist)
ModScript = importlib.import_module(ScriptPath(file_script))
for char, mapping in mapping_char.items():
if file_script in mapping.keys():
script_char_map[charlist].append(mapping[file_script])
else:
if file_script in ReversibleScripts:
if mapping["OthersRev"][0] != -1:
script_char_map[charlist].append(
script_char_map[charlist][mapping["OthersRev"][0]]
+ mapping["OthersRev"][1]
)
else:
script_char_map[charlist].append(mapping["OthersRev"][1])
else:
if mapping["OthersNonRev"][0] != -1:
script_char_map[charlist].append(
script_char_map[charlist][mapping["OthersNonRev"][0]]
+ "\u02BD\u02BD"
)
else:
script_char_map[charlist].append("\u02BD\u02BD") | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/Map.py | Map.py |
import Map as GM
from Roman import Avestan, IAST
from Core import (
Ahom,
Tamil,
TamilGrantha,
Limbu,
MeeteiMayek,
Urdu,
Lepcha,
Chakma,
Kannada,
Gurmukhi,
Newa,
)
from East import (
Lao,
TaiTham,
Tibetan,
Burmese,
Khmer,
Balinese,
Javanese,
Thai,
Sundanese,
PhagsPa,
Cham,
Thaana,
Rejang,
ZanabazarSquare,
Makasar,
)
import post_processing
import re
def lenSort(x, y):
if len(x[0]) > len(y[0]):
return -1
else:
return 0
def OriyaIPAFixPre(Strng):
Strng = Strng.replace("ଂ", "ଙ୍")
Strng = Strng.replace("ଃ", "ହ୍")
return Strng
def SinhalaIPAFix(Strng):
consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "IPA")) + ")"
Strng = re.sub("^" + consonants + "(ə)", r"\1ʌ", Strng)
Strng = re.sub("(\s)" + consonants + "(ə)", r"\1\2ʌ", Strng)
Strng = re.sub("^" + consonants + consonants + "(ə)", r"\1ʌ", Strng)
Strng = re.sub("(\s)" + consonants + consonants + "(ə)", r"\1\2ʌ", Strng)
Strng = re.sub("^" + consonants + consonants + consonants + "(ə)", r"\1ʌ", Strng)
Strng = re.sub(
"(\s)" + consonants + consonants + consonants + "(ə)", r"\1\2ʌ", Strng
)
return Strng
def OriyaIPAFix(Strng):
Strng = Strng.replace("ə", "ɔ")
Strng = Strng.replace("j", "d͡ʒ")
Strng = Strng.replace("\u1E8F", "j")
Strng = Strng.replace("kʂ", "kʰ")
Strng = Strng.replace("ʂ", "s̪")
Strng = Strng.replace("ʃ", "s̪")
Strng = Strng.replace("ʋ", "u̯")
Strng = Strng.replace("t͡s", "t͡ʃ")
Strng = Strng.replace("ɪ", "i")
Strng = Strng.replace("iː", "i")
Strng = Strng.replace("uː", "u")
Strng = Strng.replace("eː", "e")
Strng = Strng.replace("oː", "o")
Strng = Strng.replace("ɾɨ", "ɾu")
Strng = Strng.replace("ɾɨː", "ɾu")
Strng = Strng.replace("lɨ", "lu")
Strng = Strng.replace("lɨː", "lu")
return Strng
def VedicSvarasLatinIndic(Strng, Source):
Strng = Strng.replace("{\\m+}", "ꣳ")
Strng = Strng.replace("\\m++", "ꣴ")
Strng = Strng.replace("\\m+", "ꣳ")
Strng = Strng.replace("\\`", "\\_")
Strng = Strng.replace("\\''", '\\"')
Ayogavaha = GM.CrunchList("AyogavahaMap", Source)
Svaras = ["\\_", '\\"', "\\'"]
for x in Ayogavaha:
for y in Svaras:
Strng = Strng.replace(y + x, x + y)
Strng = Strng.replace('\\"', "᳚")
Strng = Strng.replace("\\'", "॑")
Strng = Strng.replace("\\_", "॒")
return Strng
def VedicSvarsIndicLatin(Strng):
Strng = Strng.replace("᳚", '\\"')
Strng = Strng.replace("॑", "\\'")
Strng = Strng.replace("॒", "\\_")
Strng = Strng.replace("ꣳ", "\\m+")
Strng = Strng.replace("ꣴ", "\\m++")
return Strng
def VedicSvarasOthers(Strng, Target):
Strng = Strng.replace('\\"', "↑↑").replace("\\_", "↓").replace("\\'", "↑")
anu = GM.CrunchList("AyogavahaMap", Target)[1]
Strng = Strng.replace("\\m++", "ꣴ")
Strng = Strng.replace("\\m+", "ꣳ")
Ayogavaha = GM.CrunchList("AyogavahaMap", Target)
return Strng
def VedicSvarasDiacrtics(Strng, Target):
Strng = Strng.replace("\\'", "̍")
Strng = Strng.replace('\\"', "̎")
Strng = Strng.replace("\\_", "̱")
Strng = Strng.replace("\\m++", "gͫ̄")
Strng = Strng.replace("\\m+", "gͫ")
if Target == "ISO" or Target == "ISOPali":
Strng = Strng.replace("\\’’", "̎")
Strng = Strng.replace("\\’", "̍")
Ayogavaha = GM.CrunchList("AyogavahaMap", Target)
Svaras = ["̍", "̎", "̱"]
for x in Ayogavaha:
for y in Svaras:
Strng = Strng.replace(x + y, y + x)
return Strng
def VedicSvarasCyrillic(Strng, Target):
Strng = Strng.replace("\\'", "̍")
Strng = Strng.replace('\\"', "̎")
Strng = Strng.replace("\\_", "̱")
Strng = Strng.replace("\\м++", "г\u0361м")
Strng = Strng.replace("\\м+", "г\u035Cм")
Strng = Strng.replace("\\m++", "г\u0361м")
Strng = Strng.replace("\\m+", "г\u035Cм")
Ayogavaha = GM.CrunchList("AyogavahaMap", Target)
Svaras = ["̍", "̎", "̱"]
for x in Ayogavaha:
for y in Svaras:
Strng = Strng.replace(x + y, y + x)
return Strng
def VedicSvarasNonDiacritic(Strng):
Strng = Strng.replace("̍", "\\'")
Strng = Strng.replace("̎", '\\"')
Strng = Strng.replace("̱", "\\_")
Strng = Strng.replace("gͫ̄", "\\m++")
Strng = Strng.replace("gͫ", "\\m+")
Strng = Strng.replace("г\u0361м", "\\m++")
Strng = Strng.replace("г\u035Cм", "\\m+")
return Strng
def FixRomanOutput(Strng, Target):
Schwa = "\uF000"
DepV = "\u1E7F"
VowelSignList = (
"|".join(GM.CrunchSymbols(GM.VowelSigns, Target))
.replace("^", "\^")
.replace(".", "\.")
)
VowelList = (
"|".join(GM.CrunchSymbols(GM.Vowels, Target))
.replace("^", "\^")
.replace(".", "\.")
)
Virama = "".join(GM.CrunchSymbols(["ViramaMap"], Target))
Nukta = "".join(GM.CrunchSymbols(["NuktaMap"], Target))
VowelA = GM.CrunchSymbols(["VowelMap"], Target)[0]
VowelIU = "|".join(
GM.CrunchSymbols(["VowelMap"], Target)[2]
+ GM.CrunchSymbols(["VowelMap"], Target)[4]
)
TargetCons = GM.CrunchSymbols(["ConsonantMap"], Target)
ConsH = TargetCons[32]
UnAspCons = (
"|".join([TargetCons[i] for i in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22]])
.replace("^", "\^")
.replace(".", "\.")
)
if Target in ["IAST", "ISO", "ISOPali", "Titus"]:
Strng = Strng.replace("u" + Virama, Virama + "ŭ")
Strng = re.sub("(?<=" + Schwa + DepV + ")" + "(" + VowelIU + ")", r"_\1", Strng)
Strng = re.sub("(?<=ṿ" + VowelA + "ṿ)" + "(" + VowelIU + ")", r"_\1", Strng)
Strng = re.sub(
"(" + UnAspCons + ")" "(" + Schwa + Virama + ")(" + ConsH + ")", r"\1_\3", Strng
)
Strng = re.sub(
"(" + Schwa + ")(" + Virama + ")(?=" + VowelList + ")", r"_\2", Strng
)
Strng = re.sub("(" + Schwa + ")(" + Nukta + ")", r"\2\1", Strng)
Strng = re.sub("(" + Schwa + ")(?=" + VowelSignList + ")", "", Strng)
Strng = Strng.replace(Schwa, VowelA)
Strng = Strng.replace(DepV, "")
Strng = Strng.replace(Virama, "")
return Strng
def FixVedic(Strng, Target):
Strng = Strng.replace("{\\m+}", "\\m+")
Strng = Strng.replace("\\`", "\\_")
Strng = Strng.replace("\\''", '\\"')
Strng = Strng.replace("\\\\м", "\\м")
Strng = Strng.replace("\\\\m", "\\m")
Strng = Strng.replace("\\\\'", "\\'")
Strng = Strng.replace('\\\\"', '\\"')
Strng = Strng.replace("\\\\_", "\\_")
vedicDiacRoman = ["IAST", "IASTPali", "ISO", "Titus", "ISOPali"]
vedicnonDiacRoman = ["HK", "Itrans", "Velthuis", "SLP1", "WX"]
if Target in vedicDiacRoman:
Strng = VedicSvarasDiacrtics(Strng, Target)
elif Target == "IPA":
Strng = Strng.replace('\\"', "↑↑").replace("\\_", "↓").replace("\\'", "↑")
Strng = Strng.replace("\\m++", "gͫ̄")
Strng = Strng.replace("\\m+", "gͫ")
elif Target == "RomanReadable" or Target == "RomanColloquial":
Strng = Strng.replace('\\"', "").replace("\\_", "").replace("\\'", "")
Strng = Strng.replace("\\m++", "ggum")
Strng = Strng.replace("\\m+", "gum")
elif Target in vedicnonDiacRoman:
pass
elif Target == "RussianCyrillic":
Strng = VedicSvarasCyrillic(Strng, Target)
else:
Strng = VedicSvarasOthers(Strng, Target)
return Strng
def PostFixRomanOutput(Strng, Source, Target):
Strng = Strng.replace("\u02BD", "")
Strng = FixVedic(Strng, Target)
if Target in ["IAST", "ISO", "ISOPali", "Titus"]:
Strng = (
Strng.replace("uʼ", "ü")
.replace("ūʼ", "ǖ")
.replace("aʼ", "ö")
.replace("āʼ", "ȫ")
)
if Source == "Sinhala" and Target == "IPA":
Strng = SinhalaIPAFix(Strng)
if Target == "IPA":
Strng = FixIPA(Strng)
if Target == "Santali":
Strng = FixSantali(Strng)
if Target == "Avestan":
Strng = FixAvestan(Strng)
if Target == "SoraSompeng":
Strng = FixSoraSompeng(Strng)
if Target == "WarangCiti":
Strng = FixWarangCiti(Strng)
if Target == "Wancho":
Strng = FixWancho(Strng)
if Target == "Mro":
Strng = FixMro(Strng)
if Target == "RomanReadable":
Strng = FixRomanReadable(Strng)
if Source == "Tamil":
Strng = Strng.replace("t", "th").replace("d", "dh").replace("h'", "")
if Target == "RomanColloquial":
if Source == "Tamil":
Strng = Strng.replace("t", "th").replace("d", "dh").replace("h'", "")
if Source == "Oriya":
Strng = Strng.replace("ksh", "x")
Strng = re.sub("x(?=[aeiou])", "ksh", Strng)
Strng = Strng.replace("jny", "gy").replace("sh", "s").replace("r'", "d")
if Source == "Bengali":
Strng = Strng.replace("m'", "ng")
Strng = FixRomanColloquial(Strng)
if Target == "IAST" or Target == "IASTPali":
Strng = Strng.replace("a_i", "aï")
Strng = Strng.replace("a_u", "aü")
if Target == "ISO" or Target == "ISOPali":
Strng = Strng.replace("\\’", "\\'")
Strng = Strng.replace("\\’\u02BD", "\\'")
Strng = Strng.replace("a_i", "a:i")
Strng = Strng.replace("a_u", "a:u")
if Target == "Velthuis" or Target == "Itrans":
Strng = Strng.replace("\\.a", "\\'")
if Target == "Aksharaa":
Strng = Strng.replace("\\a;", "\\'")
if Target == "HanifiRohingya":
Strng = FixHanifiRohingya(Strng)
if Target == "Mongolian":
Strng = FixMongolian(Strng)
if Source == "RomanSemitic":
pass
return Strng
def FixSemiticOutput(Strng, Source, Target):
Strng = Strng.replace("\u02DE", "")
try:
Strng = globals()["Fix" + Target.replace("-", "_")](Strng, Source)
except KeyError:
pass
return Strng
def FixIndicOutput(Strng, Source, Target):
vir = GM.CrunchList("ViramaMap", Target)[0]
Strng = Strng.replace(vir + "_", vir)
try:
Strng = globals()["Fix" + Target](Strng)
except KeyError:
pass
Strng = Strng.replace("\u02BD", "")
Strng = ShiftDiacritics(Strng, Target, reverse=False)
vedicScripts = [
"Assamese",
"Bengali",
"Devanagari",
"Gujarati",
"Kannada",
"Malayalam",
"Oriya",
"Gurmukhi",
"Tamil",
"Telugu",
"TamilExtended",
"Grantha",
"Sharada",
]
if Target not in vedicScripts:
Strng = Strng.replace("॒", "↓")
Strng = Strng.replace("᳚", "↑↑")
Strng = Strng.replace("॑", "↑")
return Strng
def FixHebr(Strng, Source, reverse=False):
vowelsigns = (
"(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["\u05BC"]) + ")"
)
vowelsigns2 = (
"(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["\u05BC"]) + ")?"
)
if not reverse:
Strng = re.sub("(׳)" + vowelsigns + vowelsigns2, r"\3\2\1", Strng)
Strng = re.sub("(וֹ)(׳)", r"\2\1", Strng)
Strng = re.sub("(וּ)(׳)", r"\2\1", Strng)
Strng = re.sub("(׳)(\u05b7)", r"\2\1", Strng)
Strng = re.sub("(׳)(\u05b7)", r"\1", Strng)
else:
vowels = [
"ְ",
"ֱ",
"ֲ",
"ֳ",
"ִ",
"ֵ",
"ֶ",
"ַ",
"ָ",
"ֹ",
"ֺ",
"ֻ",
"ׇ",
"\u05BC",
]
vowelsR = "(" + "|".join(vowels + ["וֹ", "וּ"]) + ")"
Strng = re.sub(vowelsR + "(׳)", r"\2\1", Strng)
Strng = re.sub(vowelsR + "(׳)", r"\2\1", Strng)
Strng = re.sub(vowelsR + "(׳)", r"\2\1", Strng)
Strng = re.sub(vowelsR + "(׳)", r"\2\1", Strng)
return Strng
def FixHebrew(Strng, reverse=False):
vowelsigns = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew")) + ")"
consonants = (
"(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Hebrew") + ["צּ", "גּ"]) + ")"
)
vowelsignsA = (
"(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["ַ"]) + ")"
)
vowelsignsAD = (
"(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["ַ", "ּ"]) + ")"
)
vowelsignsADShin = (
"("
+ "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["ַ", "ּ", "ׁ"])
+ ")"
)
vowelsignsADShinG = (
"("
+ "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["ַ", "ּ", "ׁ", "׳"])
+ ")"
)
finalCons = ["כ", "מ", "נ", "פ", "צ", "פּ", "כּ"]
finals = ["ך", "ם", "ן", "ף", "ץ", "ףּ", "ךּ"]
otherCons = "ב,ח,ע,צ,ש,ת".split(",")
consonantsAll = (
"("
+ "|".join(
GM.CrunchSymbols(GM.Consonants, "Hebrew") + finals + otherCons + ["׳"]
)
+ ")"
)
if not reverse:
Strng = Strng.replace("\u02BD", "")
Strng = Strng.replace("\u02BE", "")
Strng = Strng.replace("ג׳ְג׳", "גּ׳").replace("צ׳ְצ׳", "צּ׳")
Strng = re.sub("מְ" + "\u02BC" + "([גדזטכצקת])", "נְ" + r"\1", Strng)
Strng = re.sub("מְ" + "\u02BC", "מְ", Strng)
Strng = re.sub(
consonants + "(?!" + vowelsigns + ")", r"\1" + "\u05B7" + r"\2", Strng
)
Strng = Strng.replace("\u05b7\u05Bc", "\u05Bc\u05b7")
Strng = Strng.replace("\u05b7\u05b7", "\u05B7")
Strng = Strng.replace("\u05b7\u05bc\u05B0", "\u05bc\u05B0")
Strng = re.sub("(׳)" + vowelsigns, r"\2\1", Strng)
Strng = re.sub("(וֹ)(׳)", r"\2\1", Strng)
Strng = re.sub("(וּ)(׳)", r"\2\1", Strng)
Strng = re.sub("(׳)(\u05b7)", r"\2\1", Strng)
Strng = re.sub("(׳)(\u05b7)", r"\1", Strng)
Strng = re.sub("(\u05b7)" + vowelsigns, r"\2", Strng)
Strng = re.sub("(\u05b7)" + "(\u05BC)" + vowelsigns, r"\2\3", Strng)
Strng = re.sub(
"([" + "ושרקסנמליזטײ" + "הגדת" + "])(ְ)" + r"\1", r"\1" + "ּ", Strng
)
Strng = re.sub("(שׁ)(ְ)" + r"\1", r"\1" + "ּ", Strng)
Strng = (
Strng.replace("כְּכּ", "קּ").replace("פְּפּ", "פּ").replace("בְּבּ", "בּ")
)
shortVowels = (
"("
+ "|".join(
[
"\u05B7",
"\u05B8",
"\u05B4",
"\u05BB",
"\u05B5",
"\u05B6",
"\u05B9",
"\u05B0",
]
)
+ ")"
)
vowelsAll = (
"("
+ "|".join(
[
"\u05B7",
"\u05B8",
"\u05B4",
"\u05BB",
"\u05B5",
"\u05B6",
"\u05B9",
"\u05B0",
"י",
"וֹ",
"וּ",
]
+ ["׳"]
)
+ ")"
)
for c, f in zip(finalCons, finals):
Strng = re.sub(
vowelsAll
+ "("
+ c
+ ")"
+ shortVowels
+ "(׳?)"
+ "(?!"
+ consonantsAll
+ "|י|ו)",
r"\1" + f + r"\3" + r"\4",
Strng,
)
Strng = re.sub(
"(?<!ה)(ְ)(׳?)" + "(?!" + consonantsAll + "|י|ו)", r"\2\3", Strng
)
Strng = Strng.replace("װ" + "\u05B9", "\u05D5\u05BA")
Strng = Strng.replace("װ", "\u05D5")
Strng = Strng.replace("ײ", "י")
Strng = Strng.replace("\u02BC", "")
else:
vowels = ["ְ", "ֱ", "ֲ", "ֳ", "ִ", "ֵ", "ֶ", "ַ", "ָ", "ֹ", "ֺ", "ֻ", "ׇ"]
vowelsR = "(" + "|".join(vowels + ["וֹ", "וּ"]) + ")"
for f, c in zip(finals, finalCons):
Strng = Strng.replace(f, c)
Strng = re.sub(vowelsR + "([ּׁׂ])", r"\2\1", Strng)
Strng = Strng.replace("אֲ", "אַ")
Strng = Strng.replace("עֲ", "אַ")
Strng = (
Strng.replace("\u05B1", "\u05B6")
.replace("\u05B3", "\u05B9")
.replace("\u05B2", "\u05b7")
)
Strng = re.sub("(?<=[ֵֶַָֹ])([א])" + "(?!" + vowelsignsA + ")", "", Strng)
Strng = re.sub("(?<=[ִֵֶַָֹֻ])([ה])" + "(?!" + vowelsignsAD + ")", "", Strng)
Strng = re.sub("(?<=[ֵֶ])([י])" + "(?!" + vowelsR + vowelsigns + ")", "", Strng)
Strng = Strng.replace("הּ", "ה")
Strng = re.sub("([" + "שרקסנמליזט" + "])(ּ)", r"\1" + "ְ" + "ְ" + r"\1", Strng)
Strng = re.sub("([דתצה])(ּ)", r"\1" + "ְ" + "ְ" + r"\1", Strng)
Strng = (
Strng.replace("ת", "ט")
.replace("ח", "כ")
.replace("ע", "א")
.replace("שׂ", "ס")
)
Strng = re.sub("ש(?![ׂׄ])", "שׁ", Strng)
Strng = Strng.replace("ׁׁ", "ׁ")
Strng = re.sub("ב(?!ּ)", "װ", Strng)
Strng = re.sub(vowelsR + "(׳)", r"\2\1", Strng)
Strng = Strng.replace("גּ׳", "ג׳ְג׳").replace("צּ׳", "צ׳ְצ׳")
Strng = re.sub("צ" + "(?!׳)", "טְְס", Strng)
Strng = re.sub("(\s|^|\.|,|א)" + "(וֹ|וּ)", "א" + r"\1\2", Strng)
Strng = re.sub("(וּ)" + vowelsignsA, "װְװ" + r"\2", Strng)
Strng = re.sub("י" + "(?=" + vowelsigns + "|ַ)", "ײ", Strng)
Strng = re.sub("ו" + "(?=" + "[ְִֵֶַָׇֺֻ]" + "|ַ)", "װ", Strng)
Strng = re.sub("(?<!ִ)(י)", "ײ", Strng)
Strng = re.sub("(ו)(?![ֹֺּ])", "װ", Strng)
Strng = Strng.replace("ֺ", "ֹ")
Strng = re.sub("[א](?!" + vowelsR + ")", "", Strng)
Strng = re.sub(
consonantsAll + "(?!" + vowelsignsADShinG + ")", r"\1" + "ְ" + r"\2", Strng
)
Strng = Strng.replace("אְ", "")
if "" in Strng:
Strng = Strng.replace("", "")
Strng = Strng.replace("ְ" + "ְ", "ְ")
else:
Strng = re.sub("(\s|\.|,|^)" + consonantsAll + "(ְ)", r"\1\2" + "ֶ", Strng)
Strng = re.sub("(ּ)" + "(ְ)", r"\1" + "ֶ", Strng)
Strng = re.sub(
consonantsAll + "(" "ְ" + "ְ" + ")" + "(" + r"\1" + ")(" + "ְ" + ")",
r"\1\2\3" + "ֶ",
Strng,
)
Strng = re.sub(
consonantsAll + "(ְ)" + "(" + r"\1" + ")" + "(?!(\s|\.|\n|,|$))",
r"\1" + "ֶ" + r"\3",
Strng,
)
Strng = re.sub(
consonantsAll + "(ְ)" + consonantsAll + "(ְ)" + "(?!(\s|\.|\n|,|$))",
r"\1\2" + r"\3" + "ֶ",
Strng,
)
Strng = Strng.replace("ְ" + "ְ", "ְ")
Strng = Strng.replace("ֶ" + "ְ", "ְ")
Strng = re.sub("(?<![אע])\u05B7", "", Strng)
Strng = Strng.replace("", "")
return Strng
def FixMongolian(Strng, reverse=False):
vowels = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Mongolian")) + ")"
consonants = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Mongolian")) + ")"
if not reverse:
Strng = re.sub("(\u180B)" + consonants, r"\2", Strng)
Strng = re.sub("(\u180B)" + vowels, r"\2", Strng)
Strng = re.sub(
consonants + consonants + consonants + vowels + "(\u1880)",
r"\5\1\2\3\4",
Strng,
)
Strng = re.sub(
consonants + consonants + vowels + "(\u1880)", r"\4\1\2\3", Strng
)
Strng = re.sub(consonants + "?" + vowels + "(\u1880)", r"\3\1\2", Strng)
Strng = Strng.replace(" \u02BC", "\u200B")
Strng = Strng.replace("\u02BC", "\u200B")
else:
Strng = re.sub("(ᠠ)(?<!\u180B)", r"\1" + "\u180B", Strng)
return Strng
def FixHanifiRohingya(Strng, reverse=False):
consList = (
"("
+ "|".join(
GM.CrunchSymbols(GM.Consonants, "HanifiRohingya")
+ ["\U00010D17", "\U00010D19"]
)
+ ")"
)
vowList = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "HanifiRohingya")) + ")"
vowListNotA = (
"(" + "|".join(GM.CrunchSymbols(GM.Vowels, "HanifiRohingya")[1:]) + ")"
)
consListLookBehind = "".join(
map(
lambda x: "(?<!" + x + ")",
GM.CrunchSymbols(GM.Consonants, "HanifiRohingya"),
)
)
if not reverse:
Strng = re.sub(consListLookBehind + vowList, "\U00010D00" + r"\1", Strng)
Strng = re.sub(consList + r"\1", r"\1" + "\U00010D27", Strng)
Strng = re.sub(vowListNotA + "𐴀𐴟", r"\1" + "\U00010D17", Strng)
Strng = re.sub(vowListNotA + "𐴀𐴞", r"\1" + "\U00010D19", Strng)
Strng = Strng.replace("\U00010D24\\", "\U00010D25")
Strng = Strng.replace("\U00010D24/", "\U00010D26")
Strng = Strng.replace("_", "\U00010D22")
else:
tones = "([\U00010D24\U00010D25\U00010D26])"
Strng = re.sub("(\U00010D00)" + tones + vowList, r"\1\3\2", Strng)
Strng = re.sub(consList + tones + vowList, r"\1\3\2", Strng)
Strng = re.sub(
vowListNotA.replace("\U00010D00", "") + "\U00010D17", r"\1" + "𐴀𐴟", Strng
)
Strng = re.sub(
vowListNotA.replace("\U00010D00", "") + "\U00010D19", r"\1" + "𐴀𐴞", Strng
)
Strng = Strng.replace("\U00010D00", "")
Strng = re.sub("(.)" + "\U00010D27", r"\1\1", Strng)
Strng = Strng.replace("\U00010D25", "\U00010D24\\")
Strng = Strng.replace("\U00010D26", "\U00010D24/")
Strng = re.sub(consList + "\U00010D17", r"\1" + "\U00010D16\u02BE", Strng)
Strng = re.sub(consList + "\U00010D19", r"\1" + "\U00010D18\u02BE", Strng)
Strng = Strng.replace("\U00010D22", "_")
Strng = Strng.replace("𐴜", "𐴖")
if not reverse:
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(x, y)
else:
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(y, x)
return Strng
def FixMasaramGondi(Strng, reverse=False):
consList = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "MasaramGondi")) + ")"
if not reverse:
Strng = Strng.replace("𑴌𑵅𑴪", "\U00011D2E")
Strng = Strng.replace("𑴓𑵅𑴕", "\U00011D2F")
Strng = Strng.replace("𑴛𑵅𑴦", "\U00011D30")
Strng = re.sub(consList + "\U00011D45\U00011D26", r"\1" + "\U00011D47", Strng)
Strng = re.sub("\U00011D26\U00011D45" + consList, "\U00011D46" + r"\1", Strng)
Strng = re.sub("\U00011D45(?!" + consList + ")", "\U00011D44", Strng)
else:
Strng = Strng.replace("\U00011D2E", "𑴌𑵅𑴪")
Strng = Strng.replace("\U00011D2F", "𑴓𑵅𑴕")
Strng = Strng.replace("\U00011D30", "𑴛𑵅𑴦")
Strng = Strng.replace(
"\U00011D47",
"\U00011D45\U00011D26",
)
Strng = Strng.replace("\U00011D46", "\U00011D26\U00011D45")
Strng = Strng.replace("\U00011D44", "\U00011D45")
return Strng
def FixGunjalaGondi(Strng, reverse=False):
consList = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "GunjalaGondi")) + ")"
if not reverse:
Strng = re.sub(
"(\U00011D7A\u02BE)([\U00011D7B\U00011D7C\U00011D80\U00011D81])",
"\U00011D95" + r"\1",
Strng,
)
Strng = re.sub(
"(\U00011D7A\u02BF)([\U00011D7D\U00011D7E\U00011D82\U00011D83])",
"\U00011D95" + r"\1",
Strng,
)
Strng = Strng.replace("\u02BE", "")
Strng = Strng.replace("\u02BF", "")
Strng = re.sub("\U00011D97(?!" + consList + ")", "", Strng)
else:
pass
return Strng
def FixSoyombo(Strng, reverse=False):
finVir = [
"\U00011A5E\U00011A99",
"\U00011A5C\U00011A99",
"\U00011A60\U00011A99",
"\U00011A6D\U00011A99",
"\U00011A6F\U00011A99",
"\U00011A72\U00011A99",
"\U00011A74\U00011A99",
"\U00011A7C\U00011A99",
"\U00011A7D\U00011A99",
"\U00011A7F\U00011A99",
"\U00011A81\U00011A99",
]
fin = [
"\U00011A8A",
"\U00011A8B",
"\U00011A8C",
"\U00011A8D",
"\U00011A8E",
"\U00011A8F",
"\U00011A90",
"\U00011A91",
"\U00011A92",
"\U00011A93",
"\U00011A94",
]
consList = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Soyombo")) + ")"
if not reverse:
Strng = Strng.replace("𑩜𑪙𑪀", "\U00011A83")
Strng = re.sub(
"\U00011A7F\U00011A99" + "(?=" + consList + ")", "\U00011A88", Strng
)
Strng = re.sub("(?<!𑪙)(.)𑪙" + r"\1", r"\1" + "\U00011A98", Strng)
if "\u02BE" in Strng:
for x, y in zip(finVir, fin):
Strng = re.sub(x + "(?!" + consList + ")", y, Strng)
Strng = re.sub("𑪈(?!" + consList + ")", "\U00011A93", Strng)
Strng = Strng.replace("\u02BE", "")
Strng = re.sub("\U00011A99(?!" + consList + ")", "", Strng)
else:
Strng = Strng.replace("\U00011A9A", " ")
Strng = Strng.replace("\U00011A83", "𑩜𑪙𑪀")
Strng = re.sub("(.)\U00011A98", r"\1" + "\U00011A99" + r"\1", Strng)
viraCon = [
"\U00011A7C\U00011A99",
"\U00011A7D\U00011A99",
"\U00011A81\U00011A99",
"\U00011A7F\U00011A99",
]
initial = ["\U00011A86", "\U00011A87", "\U00011A89", "\U00011A88"]
for x, y in zip(viraCon, initial):
Strng = Strng.replace(y, x)
tsaSeries = ["𑩵", "𑩶", "𑩷"]
caSeries = ["𑩡", "𑩢", "𑩣"]
for x, y in zip(tsaSeries, caSeries):
Strng = Strng.replace(y, x)
for x, y in zip(finVir, fin):
Strng = Strng.replace(y, x)
return Strng
def FixKharoshthi(Strng, reverse=False):
Strng = KharoshthiNumerals(Strng, reverse)
return Strng
def FixMarchen(Strng, reverse=False):
subjoinCons = "𑱲 𑱳 𑱴 𑱵 𑱶 𑱷 𑱸 𑱹 𑱺 𑱻 𑱼 𑱽 𑱾 𑱿 𑲀 𑲁 𑲂 𑲃 𑲄 𑲅 𑲆 𑲇 𑲉 𑲊 𑲋 𑲌 𑲍 𑲎".split(" ")
subjoined = "𑲒 𑲓 𑲔 𑲕 𑲖 𑲗 𑲘 𑲙 𑲚 𑲛 𑲜 𑲝 𑲞 𑲟 𑲠 𑲡 𑲢 𑲣 𑲤 𑲥 𑲦 𑲧 𑲩 𑲪 𑲫 𑲬 𑲭 𑲮".split(" ")
if not reverse:
for x, y in zip(subjoinCons, subjoined):
Strng = Strng.replace("ʾ" + x, y)
Strng = Strng.replace("ʾ", "")
Strng = Strng.replace("\u02BF", "")
else:
tsaSeries = ["\U00011C82", "\U00011C83", "\U00011C84"]
jaSereis = ["\U00011C76", "\U00011C77", "\U00011C78"]
for x, y in zip(tsaSeries, jaSereis):
Strng = Strng.replace(y, x)
for x, y in zip(subjoinCons, subjoined):
Strng = Strng.replace(y, "ʾ" + x)
return Strng
def FixMro(Strng, reverse=False):
extracons = [
"\U00016A4E",
"\U00016A59",
"\U00016A5A",
"\U00016A5B",
"\U00016A5C",
"\U00016A5E",
]
consnormaldig = ["𖩃𖩢", "𖩌𖩢", "𖩍𖩢", "𖩍𖩣", "𖩉𖩢", "𖩀𖩢"]
consnormal = ["𖩃", "𖩌", "𖩍", "𖩍", "𖩉", "𖩀"]
if not reverse:
for x, y in zip(consnormaldig, extracons):
Strng = Strng.replace(x, y)
else:
for x, y in zip(extracons, consnormal):
Strng = Strng.replace(x, y)
return Strng
def FixWancho(Strng, reverse=False):
tonemarks = ["\U0001E2EC", "\U0001E2ED", "\U0001E2EE", "\U0001E2EF"]
tonewri = ["\\_", "\\-", "\\!", "\\;"]
nasalization = [
"\U0001E2E6",
"\U0001E2E7",
"\U0001E2E8",
"\U0001E2EA",
]
nasvowels = ["\U0001E2D5", "\U0001E2DB", "\U0001E2C0", "\U0001E2DE"]
Anusvaras = ["\U0001E2E2", "\U0001E2E3", "\U0001E2E4", "\U0001E2E5"]
AnusvaraVowels = ["\U0001E2D5", "\U0001E2C0", "\U0001E2C1", "\U0001E2DC"]
if not reverse:
for x, y in zip(tonemarks, tonewri):
Strng = Strng.replace(y, x)
for x, y in zip(nasvowels, nasalization):
Strng = Strng.replace(x + "ʿ", y)
Strng = Strng.replace("ʿ", "𞋉")
for x, y in zip(AnusvaraVowels, Anusvaras):
Strng = Strng.replace(x + "ʾ", y)
Strng = Strng.replace("ʾ", "𞋝")
Strng = Strng.replace("𞋋𞋗", "\U0001E2E1")
Strng = Strng.replace("𞋋𞋎", "\U0001E2E0")
Strng = Strng.replace("𞋓Ø", "\U0001E2D2")
Strng = Strng.replace("Ø", "")
else:
for x, y in zip(tonemarks, tonewri):
Strng = Strng.replace(x, y)
for x, y in zip(nasvowels, nasalization):
Strng = Strng.replace(y, x + "ʿ")
for x, y in zip(AnusvaraVowels, Anusvaras):
Strng = Strng.replace(y, x + "ʾ")
Strng = Strng.replace("\U0001E2E1", "𞋋𞋗")
Strng = Strng.replace("\U0001E2E0", "𞋋𞋎")
Strng = Strng.replace("\U0001E2D2", "𞋓Ø")
return Strng
def FixSiddham(Strng, reverse=False):
if not reverse:
pass
else:
Strng = Strng.replace("𑗜", "𑖲")
Strng = Strng.replace("𑗝", "𑖳")
Strng = Strng.replace("𑗛", "𑖄")
Strng = Strng.replace("𑗘", "𑖂")
Strng = Strng.replace("𑗙", "𑖂")
Strng = Strng.replace("𑗚", "𑖃")
return Strng
def FixBhaiksuki(Strng, reverse=False):
if not reverse:
Strng = Strng.replace(" ", "𑱃")
else:
Strng = Strng.replace("𑱃", " ")
return Strng
def FixKhudawadi(Strng, reverse=False):
sindhi = ["𑊽", "𑋃", "𑋉", "𑋕"]
sindhiapprox = ["ˍ𑊼", "ˍ𑋂", "ˍ𑋈", "ˍ𑋔"]
if not reverse:
for x, y in zip(sindhi, sindhiapprox):
Strng = Strng.replace(y, x)
else:
for x, y in zip(sindhi, sindhiapprox):
Strng = Strng.replace(x, y)
return Strng
def FixTamil(Strng, reverse=False):
Strng = CorrectRuLu(Strng, "Tamil", reverse)
ava = Tamil.SignMap[0]
avaA = "\u0028\u0B86\u0029"
VedicSign = ["॑", "॒", "᳚"]
TamilDiacritic = ["ʼ", "ˮ", "꞉"]
if not reverse:
Strng = Strng.replace(ava + ava, avaA)
Strng = post_processing.RetainDandasIndic(Strng, "Tamil", True)
Strng = post_processing.RetainIndicNumerals(Strng, "Tamil", True)
for x in TamilDiacritic:
for y in VedicSign:
Strng = Strng.replace(x + y, y + x)
else:
Strng = Strng.replace(avaA, ava + ava)
Strng = Strng.replace("ஷ²", "ஶ")
Strng = Strng.replace("𑌃", "꞉")
for x in TamilDiacritic:
for y in VedicSign:
Strng = Strng.replace(y + x, x + y)
return Strng
def FixOriya(Strng, reverse=False):
if not reverse:
pass
else:
Strng = Strng.replace("ଵ", "ୱ")
return Strng
def FixGurmukhi(Strng, reverse=False):
Strng = CorrectRuLu(Strng, "Gurmukhi", reverse)
ava = Gurmukhi.SignMap[0]
avaA = "\u0028\u0A06\u0029"
if not reverse:
Strng = Strng.replace(ava + ava, avaA)
Strng = post_processing.InsertGeminationSign(Strng, "Gurmukhi")
Strng = post_processing.RetainIndicNumerals(Strng, "Gurmukhi", True)
Vedicomp = "([" + "".join(GM.VedicSvarasList) + "])"
Strng = re.sub(
Vedicomp + "\u0A71" + "(.)",
r"\1" + r"\2" + Gurmukhi.ViramaMap[0] + r"\2",
Strng,
)
else:
Strng = Strng.replace(avaA, ava + ava)
Strng = post_processing.ReverseGeminationSign(Strng, "Gurmukhi")
Strng = Strng.replace("ੰਨ", "ਨ੍ਨ")
Strng = Strng.replace("ੰਮ", "ਮ੍ਮ")
Strng = Strng.replace("\u0A70", "\u0A02")
Strng = post_processing.GurmukhiYakaash(Strng, True)
return Strng
def CorrectRuLu(Strng, Target, reverse=False):
ra = GM.CrunchList("ConsonantMap", Target)[26]
la = GM.CrunchList("ConsonantMap", Target)[27]
uuu = GM.CrunchSymbols(GM.VowelSigns, Target)[4:6]
ap = "\u02BC"
ruCons = [ra + x + ap for x in uuu] + [la + x + ap for x in uuu]
for x, y in zip(ruCons, GM.CrunchSymbols(GM.Vowels, Target)[6:10]):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
return Strng
def ShiftDiacritics(Strng, Target, reverse=False):
VS = "|".join(GM.CrunchSymbols(GM.VowelSigns, Target))
Diac = "|".join(GM.Diacritics)
if not reverse:
Strng = re.sub("(" + Diac + ")" + "(" + VS + ")", r"\2\1", Strng)
if Target == "Tamil":
Strng = Strng.replace(
"³்",
"்³",
)
else:
if Target == "Tamil":
Strng = Strng.replace(
"்³",
"³்",
)
Strng = re.sub("(" + VS + ")" + "(" + Diac + ")", r"\2\1", Strng)
return Strng
def FixTamilExtended(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("ക്ഷ", "ക്ഷ")
Strng = Strng.replace("ശ്ര", "ശ്ര")
Strng = Strng.replace("ൗ", "ൌ")
for svara in GM.VedicSvarasList:
Strng = Strng.replace("\u200C" + svara, svara + "\u200C")
else:
for svara in GM.VedicSvarasList:
Strng = Strng.replace(svara + "\u200C", "\u200C" + svara)
Strng = Strng.replace("\u0D4D", "\u0D4D\u200C")
return Strng
def FixTamilGrantha(Strng, reverse=False):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TamilGrantha"))
ListEAI = "|".join(
TamilGrantha.VowelSignMap[9:11] + TamilGrantha.SouthVowelSignMap[0:1]
)
ListOAU = TamilGrantha.VowelSignMap[11:13] + TamilGrantha.SouthVowelSignMap[1:2]
if not reverse:
Strng = re.sub(
"(" + ListC + ")" + "(" + ListEAI + ")",
"\u200B\u200C\u200D\u200C" + r"\2\1",
Strng,
)
Strng = re.sub(
"(" + ListC + ")" + "(" + ListOAU[0] + ")",
"\u200B\u200C\u200D\u200C"
+ TamilGrantha.VowelSignMap[9]
+ r"\1"
+ TamilGrantha.VowelSignMap[0],
Strng,
)
Strng = re.sub(
"(" + ListC + ")" + "(" + ListOAU[2] + ")",
"\u200B\u200C\u200D\u200C"
+ TamilGrantha.SouthVowelSignMap[0]
+ r"\1"
+ TamilGrantha.VowelSignMap[0],
Strng,
)
Strng = re.sub(
"(" + ListC + ")" + "(" + ListOAU[1] + ")",
"\u200B\u200C\u200D\u200C"
+ TamilGrantha.SouthVowelSignMap[0]
+ r"\1"
+ Tamil.SouthConsonantMap[0],
Strng,
)
Strng = re.sub(
"(\u200B\u200C\u200D\u200C.)" + "(" + ListC + ")" + "(்ˆ)", r"\2\3\1", Strng
)
else:
Strng = re.sub(
"\u200B"
+ TamilGrantha.VowelSignMap[9]
+ "("
+ ListC
+ ")"
+ TamilGrantha.VowelSignMap[0],
r"\1" + ListOAU[0],
Strng,
)
Strng = re.sub(
"\u200B"
+ TamilGrantha.SouthVowelSignMap[0]
+ "("
+ ListC
+ ")"
+ TamilGrantha.VowelSignMap[0],
r"\1" + ListOAU[2],
Strng,
)
Strng = re.sub(
"\u200B"
+ TamilGrantha.SouthVowelSignMap[0]
+ "("
+ ListC
+ ")"
+ Tamil.SouthConsonantMap[0],
r"\1" + ListOAU[1],
Strng,
)
Strng = re.sub(
"\u200B" + "(" + ListEAI + ")" + "(" + ListC + ")", r"\2\1", Strng
)
return Strng
def FixKhmer(Strng, reverse=False):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Khmer"))
ra = Khmer.ConsonantMap[26]
vir = Khmer.ViramaMap[0]
if not reverse:
Strng = re.sub(vir + "(" + ListC + ")", "\u17D2" + r"\1", Strng)
Strng = re.sub(
"(?<!\u17D2)(" + ra + ")" + "\u17D2" + "(" + ListC + ")",
r"\2" + "\u17CC",
Strng,
)
Strng = Strng.replace("\u1787\u17C6", "\u17B9")
else:
Strng = Strng.replace("\u17D2", vir)
Strng = re.sub(vir + "(?=[\u17AB\u17AC\u17AD\u17AE])", "\u17D2", Strng)
Strng = re.sub("(" + ListC + ")" + "\u17CC", ra + vir + r"\1", Strng)
Strng = Strng.replace("\u17B9", "\u1787\u17C6")
return Strng
def FixKhamtiShan(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("်ရ", "ြ")
Strng = Strng.replace("်ယ", "ျ")
Strng = Strng.replace("်ဝ", "ွ")
Strng = Strng.replace("\u103C\u103B", "\u103B\u103C")
Strng = Strng.replace("\u103D\u103B", "\u103B\u103D")
Strng = Strng.replace("ႂ\u103C", "\u103Cွ")
else:
Strng = Strng.replace("ꩳ", "ရ")
Strng = Strng.replace("\u103B\u103C", "\u103C\u103B")
Strng = Strng.replace("\u103B\u103D", "\u103D\u103B")
Strng = Strng.replace("\u103Cႂ", "ႂ\u103C")
Strng = Strng.replace("ြ", "်ꩳ")
Strng = Strng.replace("ꩳ", "ရ")
Strng = Strng.replace("ျ", "်ယ")
Strng = Strng.replace("ွ", "်ဝ")
return Strng
def FixTaiLaing(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("်ꩺ", "ြ")
Strng = Strng.replace("်ယ", "ျ")
Strng = Strng.replace("်ဝ", "ႂ")
Strng = Strng.replace("\u103C\u103B", "\u103B\u103C")
Strng = Strng.replace("\u103D\u103B", "\u103B\u103D")
Strng = Strng.replace("ႂ\u103C", "\u103Cႂ")
Strng = Strng.replace("ႂျ", "်၀ျ")
else:
Strng = Strng.replace("\u103B\u103C", "\u103C\u103B")
Strng = Strng.replace("\u103B\u103D", "\u103D\u103B")
Strng = Strng.replace("\u103Cႂ", "ႂ\u103C")
Strng = Strng.replace("ြ", "်ꩺ")
Strng = Strng.replace("ျ", "်ယ")
Strng = Strng.replace("ႂ", "်ဝ")
return Strng
def FixShan(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("်ရ", "ြ")
Strng = Strng.replace("်ယ", "ျ")
Strng = Strng.replace("်ဝ", "\u1082")
Strng = Strng.replace("်ႁ", "ှ")
Strng = re.sub("(ှ)" + "([ျြွ])", r"\2\1", Strng)
Strng = Strng.replace("\u103C\u103B", "\u103B\u103C")
Strng = Strng.replace("\u103D\u103B", "\u103B\u103D")
Strng = Strng.replace("ွ\u103C", "\u103Cွ")
else:
Strng = re.sub("([ျြွ])" + "(ှ)", r"\2\1", Strng)
Strng = Strng.replace("\u103B\u103C", "\u103C\u103B")
Strng = Strng.replace("\u103B\u103D", "\u103D\u103B")
Strng = Strng.replace("\u103Cွ", "ွ\u103C")
Strng = Strng.replace("ြ", "်ရ")
Strng = Strng.replace("ျ", "်ယ")
Strng = Strng.replace("ွ", "်ဝ")
Strng = Strng.replace("\u1082", "်ဝ")
Strng = Strng.replace("ှ", "်ႁ")
return Strng
def FixMon(Strng, reverse=False):
pairs = [("င", "ၚ"), ("ဉ", "ည"), ("ဈ", "ၛ")]
for x, y in pairs:
Strng = Strng.replace(y, x)
Strng = FixBurmese(Strng, reverse)
Strng = Strng.replace("ည", "\uE001")
for x, y in pairs:
Strng = Strng.replace(x, y)
Strng = Strng.replace("\uE001", "ည\u1039ည")
medials_cons_mon = ["\u1039န", "\u1039မ", "\u1039လ"]
medials_mon = ["ၞ", "ၟ", "ၠ"]
if not reverse:
for x, y in zip(medials_cons_mon, medials_mon):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ၠြ", "ြၠ")
for i, med1 in enumerate(medials_mon):
for j, med2 in enumerate(medials_mon):
Strng = Strng.replace(
med1 + med2, medials_cons_mon[i] + medials_cons_mon[j]
)
for i, med in enumerate(medials_mon):
Strng = Strng.replace(med + "ျ", medials_cons_mon[i] + "ျ")
Strng = Strng.replace("ရ်" + med, "ရ်" + medials_cons_mon[i])
Strng = Strng.replace("ၚ်" + med, "ၚ်" + medials_cons_mon[i])
else:
Strng = Strng.replace("်ရၠ", "ၠ်ရ")
for x, y in zip(medials_cons_mon, medials_mon):
Strng = Strng.replace(y, x)
Strng = Strng.replace("\u1039", "်")
return Strng
def FixBurmese(Strng, reverse=False):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Burmese"))
vir = Burmese.ViramaMap[0]
AA = Burmese.VowelSignMap[0]
E = Burmese.VowelSignMap[9]
yrvh = (
Burmese.ConsonantMap[25:27]
+ Burmese.ConsonantMap[28:29]
+ Burmese.ConsonantMap[32:33]
)
yrvhsub = ["\u103B", "\u103C", "\u103D", "\u103E"]
TallACons = "|".join([Burmese.ConsonantMap[x] for x in [1, 2, 4, 17, 20, 28]])
if not reverse:
Strng = re.sub("(?<!ာ)" + vir + "(" + ListC + ")", "\u1039" + r"\1", Strng)
Strng = re.sub(
"(" + Burmese.ConsonantMap[4] + ")" + "(" + "\u1039" + ")",
r"\1" + vir + r"\2",
Strng,
)
Strng = re.sub("(ရ)" + "(" + "\u1039" + ")", r"\1" + vir + r"\2", Strng)
Strng = re.sub(
"(?<!\u1039)(" + TallACons + ")" + "(" + E + "?)" + AA,
r"\1\2" + "\u102B",
Strng,
)
Strng = re.sub(
"(" + TallACons + ")(\u1039)(" + ListC + ")" + "(" + E + "?)" + AA,
r"\1\2\3\4" + "\u102B",
Strng,
)
Strng = re.sub(
"("
+ TallACons
+ ")(\u1039)("
+ ListC
+ ")"
+ "(\u1039)("
+ ListC
+ ")"
+ "("
+ E
+ "?)"
+ AA,
r"\1\2\3\4\5\6" + "\u102B",
Strng,
)
Strng = re.sub(
"(?<=်္)" + "(" + TallACons + ")" + "(" + E + "?)" + AA,
r"\1\2" + "\u102B",
Strng,
)
for x, y in zip(yrvh, yrvhsub):
Strng = re.sub("(?<!်)\u1039" + x, y, Strng)
Strng = re.sub("ျါ", "ျာ", Strng)
Strng = re.sub("(?<!ဂ)ြါ", "ြာ", Strng)
Strng = re.sub("ျေါ", "ျော", Strng)
Strng = re.sub("(?<!ဂ)ြေါ", "ြော", Strng)
Strng = Strng.replace("သ္သ", "ဿ")
Strng = Strng.replace("ဉ္ဉ", "ည")
Strng = Strng.replace("\u02F3", "့")
Strng = Strng.replace(
"့်",
"့်",
)
Strng = Strng.replace("ာ္", "ာ်")
Strng = re.sub("(ရ်္င်္)" + "(" + ListC + ")", "ရ်္င္" + r"\2", Strng)
Strng = Strng.replace("ါ္", "ါ်")
Strng = Strng.replace("\u103A\u1039\u101A", "\u103B")
Strng = Strng.replace("\u103C\u103A\u1039ဝ", "\u103Cွ")
Strng = re.sub("(ှ)" + "([ျြွ])", r"\2\1", Strng)
Strng = Strng.replace("\u103C\u103B", "\u103B\u103C")
Strng = Strng.replace("\u103D\u103B", "\u103B\u103D")
Strng = Strng.replace("ွ\u103C", "\u103Cွ")
Strng = Strng.replace("ရျ", "ရ်္ယ")
Strng = Strng.replace("ငျ", "င်္ယ")
else:
Strng = re.sub("([ျြွ])" + "(ှ)", r"\2\1", Strng)
Strng = Strng.replace("\u103B\u103C", "\u103C\u103B")
Strng = Strng.replace("\u103B\u103D", "\u103D\u103B")
Strng = Strng.replace("\u103Cွ", "ွ\u103C")
Strng = Strng.replace("ဿ", "သ္သ")
Strng = Strng.replace("ည", "ဉ္ဉ")
Strng = Strng.replace("့်", "့်")
Strng = Strng.replace("့", "\u02F3")
Strng = Strng.replace("\u1039", vir)
Strng = Strng.replace("\u102B", AA)
Strng = Strng.replace(
Burmese.ConsonantMap[4] + vir + vir, Burmese.ConsonantMap[4] + vir
)
Strng = Strng.replace("ရ" + vir + vir, "ရ" + vir)
for x, y in zip(yrvh, yrvhsub):
Strng = Strng.replace(y, vir + x)
return Strng
def AddRepha(Strng, Script, Repha, reverse=False):
vir = GM.CrunchSymbols(GM.VowelSigns, Script)[0]
ra = GM.CrunchSymbols(GM.Consonants, Script)[26]
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Script))
ListV = "|".join(GM.CrunchSymbols(GM.Vowels, Script))
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Script))
if not reverse:
Strng = re.sub(
"(" + ListC + "|" + ListV + "|" + ListVS + ")" + "(" + ra + vir + ")",
r"\1" + Repha,
Strng,
)
else:
Strng = Strng.replace(Repha, ra + vir)
return Strng
def FixTagbanwa(Strng, reverse=False):
if not reverse:
Strng = post_processing.InsertGeminationSign(Strng, "Tagbanwa")
else:
pass
return Strng
def FixBuhid(Strng, reverse=False):
if not reverse:
Strng = post_processing.InsertGeminationSign(Strng, "Buhid")
else:
pass
return Strng
def FixBuginese(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("ᨂ\u02BEᨀ", "ᨃ")
Strng = Strng.replace("ᨆ\u02BEᨄ", "ᨇ")
Strng = Strng.replace("ᨊ\u02BEᨑ", "ᨋ")
Strng = Strng.replace("ᨎ\u02BEᨌ", "ᨏ")
Strng = post_processing.InsertGeminationSign(Strng, "Buginese")
Strng = Strng.replace("\u02BE", "")
else:
Strng = Strng.replace("ᨃ", "ᨂ\u02BEᨀ")
Strng = Strng.replace("ᨇ", "ᨆ\u02BEᨄ")
Strng = Strng.replace("ᨋ", "ᨊ\u02BEᨑ")
Strng = Strng.replace("ᨏ", "ᨎ\u02BEᨌ")
return Strng
def FixBalinese(Strng, reverse=False):
Repha = "\u1B03"
Strng = AddRepha(Strng, "Balinese", Repha, reverse)
return Strng
def FixJavanese(Strng, reverse=False):
Repha = "\uA982"
vir = Javanese.ViramaMap[0]
ra, ya = Javanese.ConsonantMap[26], Javanese.ConsonantMap[25]
SubRa, SubYa = "\uA9BF", "\uA9BE"
Strng = AddRepha(Strng, "Javanese", Repha, reverse)
if not reverse:
Strng = Strng.replace(vir + ra, SubRa).replace(vir + ya, SubYa)
else:
Strng = Strng.replace(SubRa, vir + ra).replace(SubYa, vir + ya)
return Strng
def FixUrdu(Strng, reverse=False):
return FixUrduShahmukhi("Urdu", Strng, reverse)
def FixShahmukhi(Strng, reverse=False):
return FixUrduShahmukhi("Shahmukhi", Strng, reverse)
def FixUrduShahmukhi(Target, Strng, reverse=False):
Strng = Strng.replace("\u02BD", "")
vir = GM.CrunchSymbols(GM.VowelSigns, Target)[0]
ConUnAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 4, 9, 14, 19, 24]
+ list(range(25, 33))
]
ConUnAsp = (
ConUnAsp
+ GM.CrunchList("SouthConsonantMap", Target)
+ GM.CrunchList("NuktaConsonantMap", Target)
)
ShortVowels = "|".join(["\u0652", "\u064E", "\u0650", "\u064F"])
a = "\u064E"
ya = "\u06CC"
va = "\u0648"
yaBig = "\u06D2"
Aa = Urdu.VowelSignMap[0]
if not reverse:
ListVS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, Target)) + ")"
ListV = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, Target)) + ")"
ListVSA = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, Target) + [a]) + ")"
hamzaFull = "\u0621"
hamzaChair = "\u0626"
Strng = re.sub(ListVS + ListV, r"\1" + hamzaFull + r"\2", Strng)
Strng = re.sub(ListV + ListV, r"\1" + hamzaFull + r"\2", Strng)
Strng = re.sub(
"(" + a + ")" + ListV + "(?!" + ListVSA + ")",
r"\1" + hamzaFull + r"\2",
Strng,
)
Strng = re.sub("(" + a + ")" + "(" + ShortVowels + ")", r"\2", Strng)
Strng = re.sub(
"(?<!"
+ Aa
+ ")"
+ "("
+ a
+ ")"
+ "("
+ va
+ "|"
+ ya
+ ")"
+ "(?!"
+ ShortVowels
+ ")",
r"\2",
Strng,
)
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Target)).replace(a, "")
Ayoga = "|".join(Urdu.AyogavahaMap[0] + Urdu.AyogavahaMap[1])
Strng = Strng.replace(ya, yaBig)
Strng = re.sub(
"(" + yaBig + ")" + "(?=" + "|".join(ConUnAsp) + ShortVowels + ")",
ya,
Strng,
)
Strng = re.sub("(" + yaBig + ")" + "(" + ListC + ")", ya + r"\2", Strng)
Strng = re.sub("(" + yaBig + ")" + "(" + Ayoga + ")", ya + r"\2", Strng)
Strng = Strng.replace("\u0650" + yaBig, "\u0650" + ya)
ConAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23]
]
ConUnAsp_a = [x.replace("\u064e", "") for x in ConUnAsp]
Strng = re.sub(
"(" + "|".join(ConUnAsp_a) + ")" + "(" + vir + ")" + r"\1",
r"\1" + GM.Gemination[Target],
Strng,
)
Strng = re.sub("(.)(ّ)(\u06BE)", r"\1\3\2", Strng)
Strng = Strng.replace("ےے", "یے")
Strng = Strng.replace("ےی", "یی")
Strng = Strng.replace("ےْ", "یْ")
Strng = Strng.replace("ءاِی", "\u0626\u0650\u06CC")
Strng = Strng.replace("ءاے", "ئے")
Strng = Strng.replace("ءای", "ئی")
Strng = Strng.replace("ءاو", "ؤ")
Strng = Strng.replace("ءاُو", "\u0624\u064F")
Strng = Strng.replace("ءاُ", "\u0624\u064F")
Strng = re.sub("(" + hamzaFull + ")(اُو)", r"\1" + "\u0624\u064F", Strng)
Strng = re.sub("(" + hamzaFull + ")(اُ)", r"\1" + "\u0624\u064F", Strng)
Strng = re.sub("(" + hamzaFull + ")(او)", r"\1" + "\u0624", Strng)
Strng = Strng.replace("ءاِ", "\u0626\u0650")
Strng = Strng.replace("ئِءآ", "\u0626\u0650\u0627")
Strng = re.sub(
"(" + hamzaFull + ")(\u0627\u0650)", r"\1" + "\u0626\u0650", Strng
)
Strng = re.sub("(" + hamzaFull + ")(ا)(ے|ی)", r"\1" + "\u0626" + r"\3", Strng)
Strng = Strng.replace("ئِئ", "ئِ")
Strng = Strng.replace("ئِؤ", "ئِو")
Strng = Strng.replace("ࣇ", "لؕ")
if Target == "Shahmukhi":
Strng = re.sub("(ن|م|ی|ر|ل|و)(\u0652)(ہ)", r"\1" + "\u06BE", Strng)
else:
if True:
Strng = re.sub("(\s)\u06BE", r"\1" + "ہ", Strng)
Strng = Strng.replace("ۓ", "_\u06d2")
if Target == "Shahmukhi":
Strng = re.sub("(ن|م|ی|ر|ل|و)(\u06BE)", r"\1" + "\u0652ہ", Strng)
Strng = Strng.replace("لؕ", "ࣇ")
ListC = GM.CrunchSymbols(GM.Consonants, Target)
Strng = Strng.replace("ص", "س")
Strng = Strng.replace("ث", "س")
Strng = Strng.replace("ح", "ہ")
Strng = Strng.replace("ۃ", "ہ")
Strng = Strng.replace("ذ", "ز")
Strng = Strng.replace("ض", "ز")
Strng = Strng.replace("ظ", "ز")
Strng = Strng.replace("ط", "ت")
Strng = Strng.replace("ژ", "ز")
Strng = Strng.replace("ع", "اَ")
Strng = Strng.replace("ً", "نْ")
Strng = Strng.replace("ئ", "_" + ya)
Strng = Strng.replace("ؤ", "_" + va + a)
Strng = Strng.replace("ء", "_")
Strng = Strng.replace("یٰ", "ا")
Strng = Strng.replace("ك", "ک")
Strng = Strng.replace("ي", "ی")
Strng = re.sub("(\u06BE)(ّ)", r"\2\1", Strng)
Strng = re.sub("(" + ShortVowels + ")(ّ)", r"\2" + r"\1", Strng)
Strng = re.sub("(.)(ّ)", r"\1" + "ْ" + r"\1", Strng)
if "\u02BB\u02BB" in Strng:
Strng = Strng.replace("ا", "اَ")
Strng = Strng.replace("لؕ", "لَؕ")
for c in ListC:
Strng = Strng.replace(c.replace(a, ""), c)
Strng = Strng.replace(c + "اَ", c + "ا")
Strng = Strng.replace(c + "ا" + "و", c + "ا" + "\u200B" + "و")
Strng = Strng.replace(c + "ا" + "ی", c + "ا" + "\u200B" + "ی")
Strng = Strng.replace(a + "ھ", "ھ" + a)
Strng = Strng.replace("ھ" + a + "اَ", "ھ" + a + "ا")
Strng = Strng.replace(
"ھ" + a + "ا" + "و", "ھ" + a + "ا" + "\u200B" + "و"
)
Strng = Strng.replace(
"ھ" + a + "ا" + "ی", "ھ" + a + "ا" + "\u200B" + "ی"
)
Strng = Strng.replace(a + a, a)
Strng = Strng.replace("اَے", "اے")
Strng = Strng.replace(yaBig, ya)
Strng = Strng.replace("\u02BB\u02BB", "")
else:
ShortVowelsR = "|".join(["\u0652", "\u0650", "\u064F"])
longVowels = "|".join(["و", "ا", ya])
Strng = Strng.replace(yaBig, ya)
ListCR = "|".join(GM.CrunchSymbols(GM.Consonants, Target)).replace(
a, ""
)
Strng = re.sub(
"(" + ListCR + ")" + "(" + ShortVowelsR + ")",
r"\1" + a + r"\2",
Strng,
)
Strng = re.sub(
"("
+ ListCR
+ ")"
+ "("
+ longVowels
+ ")"
+ "(?!"
+ ShortVowels
+ ")",
r"\1" + a + r"\2",
Strng,
)
Strng = re.sub("(" + ListCR + ")" + "(_)", r"\1" + a + r"\2", Strng)
VowelVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, Target))
if not reverse:
pass
else:
pass
Strng = PersoArabicPuntuation(Strng, reverse)
return Strng
def PersoArabicPuntuation(Strng, reverse=False):
if not reverse:
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(x, y)
Strng = Strng.replace(".", "۔")
else:
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(y, x)
Strng = Strng.replace("۔", ".")
return Strng
def FixThaana(Strng, reverse=False):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Thaana"))
VowelVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Thaana"))
aBase = "\u0787"
if not reverse:
Strng = post_processing.InsertGeminationSign(Strng, "Thaana")
Strng = re.sub("(\u07A6)" + "(?=(" + VowelVS + "))", "", Strng)
Strng = Strng.replace("\u02BE", "")
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ʔ", "އް")
else:
Strng = Strng.replace("ޢ", "އ")
Strng = Strng.replace("ޡ", "ތ")
Strng = Strng.replace("ޥ", "ވ")
Strng = Strng.replace("ޠ", "ތ")
Strng = Strng.replace("ޟ", "ސ")
Strng = Strng.replace("ޞ", "ސ")
Strng = Strng.replace("ޜ", "ށ")
Strng = Strng.replace("ޛ", "ދ")
Strng = Strng.replace("ޘ", "ތ")
Strng = Strng.replace("ޛ", "ދ")
Strng = Strng.replace("ޙ", "ހ")
Strng = re.sub(
"(" + ListC.replace("ަ", "") + ")" + "(?!" + VowelVS + "|ަ" + ")",
r"\1" + "ް",
Strng,
)
Strng = re.sub(
"(?<!" + aBase + ")(?<!" + "\u02BD\u02BD\u02BD" + ")(" + VowelVS + ")",
"\u07A6" + r"\1",
Strng,
)
Strng = post_processing.ReverseGeminationSign(Strng, "Thaana")
Strng = Strng.replace("އް", "ʔ")
for x, y in zip([",", "?", ";"], ["،", "؟", "؛"]):
Strng = Strng.replace(y, x)
return Strng
def FixSaurashtra(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("ꢒ꣄ꢰ", "ꢒ꣄ꢰ")
else:
Strng = Strng.replace("ꢴ", "꣄ꢲ")
return Strng
def FixTibetan(Strng, reverse=False):
ListC = [Tibetan.ViramaMap[0] + chr(x) for x in range(0x0F40, 0x0F68)]
ListSubC = [chr(x + 80) for x in range(0x0F40, 0x0F68)]
SubC = ["ཝྭ", "ཡྱ", "རྱ", "རྭ", "ྺྭ"]
SubMinC = ["ཝྺ", "ཡྻ", "ཪྻ", "ཪྺ", "ྺྺ"]
if not reverse:
for x, y in zip(ListC, ListSubC):
Strng = Strng.replace(x, y)
for x, y in zip(SubC, SubMinC):
Strng = Strng.replace(x, y)
Strng = Strng.replace(" ", "\u0F0B")
Strng = Strng.replace("ཛྷ༹", "ཞ")
Strng = Strng.replace("(", "༺")
Strng = Strng.replace(")", "༻")
Strng = Strng.replace("{", "༼")
Strng = Strng.replace("}", "༽")
if reverse:
AspirateDecom = ["གྷ", "ཌྷ", "དྷ", "བྷ", "ཛྷ", "ྒྷ", "ྜྷ", "ྡྷ", "ྦྷ", "ྫྷ"]
AspirateAtomic = ["གྷ", "ཌྷ", "དྷ", "བྷ", "ཛྷ", "ྒྷ", "ྜྷ", "ྡྷ", "ྦྷ", "ྫྷ"]
Strng = Strng.replace("ཇྷ", "ཛྷ")
for x, y in zip(AspirateDecom, AspirateAtomic):
Strng = Strng.replace(x, y)
for x, y in zip(SubC, SubMinC):
Strng = Strng.replace(y, x)
for x, y in zip(ListC, ListSubC):
Strng = Strng.replace(y, x)
for x, y in zip(["྄རྀ", "྄རཱྀ", "྄ལྀ", "྄ལཱྀ"], ["ྲྀ", "ྲཱྀ", "ླྀ", "ླཱྀ"]):
Strng = Strng.replace(x, y)
Strng = Strng.replace("་", " ")
Strng = Strng.replace("༔", "།")
Strng = Strng.replace("༈", "།")
Strng = Strng.replace("༺", "(")
Strng = Strng.replace("༻", ")")
Strng = Strng.replace("༼", "{")
Strng = Strng.replace("༽", "}")
Strng = Strng.replace("འ", "ཨ")
Strng = Strng.replace("ཇ", "ཛ")
Strng = Strng.replace("ཞ", "ཛྷ༹")
return Strng
def ReverseVowelSigns(Strng, Script, reverse=False):
EAIO = "|".join(
sorted(
GM.CrunchSymbols(GM.VowelSignsNV, Script)[9:12]
+ GM.CrunchSymbols(GM.VowelSignsNV, Script)[17:],
key=len,
reverse=True,
)
)
cons = "|".join(GM.CrunchSymbols(GM.Consonants, Script))
a = GM.CrunchSymbols(GM.Vowels, Script)[0].split()[0]
consa = "|".join(GM.CrunchSymbols(GM.Consonants, Script) + [a])
if Script == "Thai":
EAIO += "|ใ"
cons = "|".join(
GM.CrunchSymbols(GM.Consonants, Script) + ["ฮ", "บ", "ฝ", "ด", "ฦ", "ฤ"]
)
if Script == "Lao":
cons = "|".join(GM.CrunchSymbols(GM.Consonants, Script) + ["ດ", "ບ", "ຟ"])
a = GM.CrunchSymbols(GM.Vowels, Script)[0]
if not reverse:
Strng = re.sub("(" + consa + ")(" + EAIO + ")", r"\2\1", Strng)
else:
Strng = re.sub("(" + EAIO + ")" + "(" + consa + ")", r"\2\1", Strng)
return Strng
def FixKhomThai(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("โ", "เา")
Strng = ThaiReverseVowelSigns(Strng, reverse)
Strng = re.sub("(.\u0E3A)(.\u0E3A)(ใ)", r"\3\1\2", Strng)
Strng = re.sub("(.\u0E3A)(ใ)", r"\2\1", Strng)
Strng = re.sub("((.\u0E3A)+)(เ)", r"\3\1", Strng)
Strng = re.sub("(.\u0E3A)?(.)(ฺร)", r"\3\1\2", Strng)
Strng = Strng.replace("เอา", "โอ")
Strng = Strng.replace("เอำ", "เาอํ")
Strng = Strng.replace("เาอํ", "โอํ")
else:
Strng = re.sub("(ใ)(.\u0E3A)(.\u0E3A)", r"\2\3\1", Strng)
Strng = re.sub("(ใ)(.\u0E3A)", r"\2\1", Strng)
Strng = re.sub("(ฺร)(.\u0E3A)?(.)", r"\2\3\1", Strng)
Strng = re.sub("(เ)((.\u0E3A)+)", r"\2\1", Strng)
Strng = ThaiReverseVowelSigns(Strng, reverse)
Strng = Strng.replace("เา", "โ")
return Strng
def FixThai(Strng, reverse=False):
Strng = ThaiReverseVowelSigns(Strng, reverse)
Strng = ThaiDigraphConjuncts(Strng, reverse)
if "\u02BB\u02BB" in Strng:
Strng = post_processing.ThaiLaoTranscription(
Strng, "Thai", "\u0E30", "\u0E31", True
)
Strng = Strng.replace("\u02BB\u02BB", "")
Strng = Strng.replace("หฺ์", "ห์")
return Strng
def ThaiReverseVowelSigns(Strng, reverse=False):
Strng = ReverseVowelSigns(Strng, "Thai", reverse)
if not reverse:
Strng = Strng.replace("\u0E32\u0E4D", "\u0E33").replace(
"\u0E34\u0E4D", "\u0E36"
)
else:
Strng = Strng.replace("\u0E33", "\u0E32\u0E4D").replace(
"\u0E36", "\u0E34\u0E4D"
)
return Strng
def FixLaoPali(Strng, reverse=False):
Strng = ReverseVowelSigns(Strng, "LaoPali", reverse)
if "\u02BB\u02BB" in Strng:
Strng = LaoPaliTranscribe(Strng, True)
Strng = Strng.replace("\u02BB\u02BB", "")
Strng = Strng.replace("ຫ຺໌", "ຫ໌")
if not reverse:
Strng = Strng.replace("\u0EB2\u0ECD", "\u0EB3")
else:
Strng = Strng.replace("\u0EB3", "\u0EB2\u0ECD")
return Strng
def FixMakasar(Strng, reverse=False):
ListC = "|".join(Makasar.ConsonantMap)
ListV = "|".join(Makasar.VowelSignMap)
Anka = "\U00011EF2"
if not reverse:
Strng = post_processing.InsertGeminationSign(Strng, "Makasar")
Strng = Strng.replace("\u02BE", "")
Strng = re.sub(
"(" + ListC + ")" + "(" + ListV + ")?" + r"\1", r"\1" + r"\2" + Anka, Strng
)
else:
Strng = re.sub(
"(" + ListC + ")" + "(" + ListV + ")?" + Anka, r"\1" + r"\2" + r"\1", Strng
)
return Strng
def FixAvestan(Strng, reverse=False):
extraCons = ["\U00010B33", "\U00010B32", "\U00010B1D", "\U00010B12", "𐬣", "𐬝"]
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Avestan") + extraCons)
ListV = "|".join(GM.CrunchSymbols(GM.Vowels, "Avestan"))
ya = Avestan.ConsonantMap[25]
va = Avestan.ConsonantMap[28]
ii = Avestan.VowelMap[2] * 2
uu = Avestan.VowelMap[4] * 2
if not reverse:
Strng = Strng.replace("𐬀𐬩", "𐬄")
Strng = Strng.replace("𐬁𐬩", "𐬅")
Strng = re.sub(
"((" + ListV + ")" + "|" + "(" + ListC + "))" + "(" + ya + ")",
r"\1" + ii,
Strng,
)
Strng = re.sub(
"((" + ListV + ")" + "|" + "(" + ListC + "))" + "(" + va + ")",
r"\1" + uu,
Strng,
)
Strng = Strng.replace(Avestan.ConsonantMap[15] + "\u02BF", "\U00010B1D")
Strng = Strng.replace(va + "\u02BF", "\U00010B21")
Strng = Strng.replace("𐬰\u02BF", "𐬲").replace("𐬱\u02BF", "𐬲")
Strng = Strng.replace("𐬢\u02BF", "𐬤")
Strng = Strng.replace("𐬁_𐬋", "𐬃")
Strng = Strng.replace("\u02BF", "")
else:
Strng = Strng.replace("𐬄", "𐬀𐬩")
Strng = Strng.replace("𐬅", "𐬁𐬩")
Strng = Strng.replace(ii, ya).replace(uu, va)
Strng = Strng.replace("\U00010B1D", Avestan.ConsonantMap[15] + "\u02BF")
Strng = Strng.replace("𐬣", Avestan.ConsonantMap[4])
Strng = Strng.replace("\U00010B12", Avestan.ConsonantMap[1])
Strng = Strng.replace("\U00010B33", Avestan.ConsonantMap[29])
Strng = Strng.replace("𐬡", va + "\u02BF")
Strng = Strng.replace("𐬲", "𐬰\u02BF")
Strng = Strng.replace("𐬤", "𐬢\u02BF")
Strng = Strng.replace("𐬃", "𐬁_𐬋")
return Strng
def FixLao(Strng, reverse=False):
if reverse:
Strng = Strng.replace("ດ", "ທ\uEB0A")
Strng = Strng.replace("ບ", "ປ\uEB0A")
Strng = Strng.replace("ຟ", "ພ\uEB0A")
Strng = Strng.replace("ັ", "ະ")
if not reverse:
Strng = Strng.replace("ທ\uEB0A", "ດ")
Strng = Strng.replace("ປ\uEB0A", "ບ")
Strng = Strng.replace("ພ\uEB0A", "ຟ")
Strng = re.sub("(?<!ດ)(?<!ບ)(?<!ຟ)\uEB0A", "", Strng)
Strng = ReverseVowelSigns(Strng, "Lao", reverse)
Strng = LaoTranscribe(Strng, reverse)
if not reverse:
Strng = Strng.replace("\u0EB2\u0ECD", "\u0EB3")
Strng = Strng.replace("\uEB0A", "")
else:
Strng = Strng.replace("\u0EB3", "\u0EB2\u0ECD")
Strng = Strng.replace("\u0EBA\uEB0A", "\uEB0A\u0EBA")
Strng = Strng.replace("຺ະ", "")
Strng = Strng.replace("ອ\u0EBAົ", "ອົ")
return Strng
def ThaiDigraphConjuncts(Strng, reverse=False):
EAIO = "".join(Thai.VowelSignMap[9:12])
cons = "|".join(GM.CrunchSymbols(GM.Consonants, "Thai"))
yrlvh = "|".join(
GM.CrunchSymbols(GM.Consonants, "Thai")[25:29]
+ GM.CrunchSymbols(GM.Consonants, "Thai")[32:33]
)
sh = "|".join(Thai.ConsonantMap[31:33])
vir = Thai.ViramaMap[0]
if not reverse:
Strng = re.sub(
"(?<=\s)("
+ cons
+ ")"
+ "("
+ vir
+ ")"
+ "(["
+ EAIO
+ "])"
+ "("
+ cons
+ ")",
r"\3\1\2\4",
Strng,
)
Strng = re.sub(
"(" + cons + ")" + "(" + vir + ")" + "([" + EAIO + "])" + "(" + yrlvh + ")",
r"\3\1\2\4",
Strng,
)
Strng = re.sub(
"(" + sh + ")" + "(" + vir + ")" + "([" + EAIO + "])" + "(" + cons + ")",
r"\3\1\2\4",
Strng,
)
else:
Strng = re.sub(
"([" + EAIO + "])" + "(" + vir + ")" + "(" + cons + ")", r"\2\3\1", Strng
)
return Strng
def FixOldPersian(Strng, reverse=False):
Strng = OldPersianSyllable(Strng, reverse)
Strng = OldPersianNumeral(Strng, reverse)
return Strng
def OldPersianSyllable(Strng, reverse=True):
ICons = [
x + "\U000103A1"
for x in [
"\U000103AD",
"\U000103B6",
"\U000103A9",
"\U000103BA",
"\U000103AB",
"\U000103B4",
"\U000103BC",
]
]
ICons_ = [
x + "_\U000103A1"
for x in [
"\U000103AD",
"\U000103B6",
"\U000103A9",
"\U000103BA",
"\U000103AB",
"\U000103B4",
"\U000103BC",
]
]
ISyll = [
x + "\U000103A1"
for x in [
"\U000103AE",
"\U000103B7",
"\U000103AA",
"\U000103BB",
"\U000103AB",
"\U000103B4",
"\U000103BC",
]
]
UCons = [
x + "\U000103A2"
for x in [
"\U000103AD",
"\U000103B6",
"\U000103A3",
"\U000103A5",
"\U000103AB",
"\U000103B4",
"\U000103BC",
]
]
UCons_ = [
x + "_\U000103A2"
for x in [
"\U000103AD",
"\U000103B6",
"\U000103A3",
"\U000103A5",
"\U000103AB",
"\U000103B4",
"\U000103BC",
]
]
USyll = [
x + "\U000103A2"
for x in [
"\U000103AF",
"\U000103B8",
"\U000103A4",
"\U000103A6",
"\U000103AC",
"\U000103B5",
"\U000103BD",
]
]
ACons = [
x + "<\U000103A0"
for x in [
"\U000103AD",
"\U000103B6",
"\U000103A3",
"\U000103A5",
"\U000103A9",
"\U000103BA",
"𐎼",
"𐎴",
"𐎫",
]
]
ASyll = [
"\U000103AD",
"\U000103B6",
"\U000103A3",
"\U000103A5",
"\U000103A9",
"\U000103BA",
"𐎼",
"𐎴",
"𐎫",
]
SylAlpha = "([𐎧𐎨𐏂𐎰𐎱𐎳𐎲𐎹𐎾𐎿𐏀𐏁𐏃])"
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "OldPersian")) + ")"
if not reverse:
Strng = Strng.replace(" ", "\U000103D0").replace("_", "").replace("<", "")
for x, y in zip(ICons + UCons + ACons, ISyll + USyll + ASyll):
Strng = Strng.replace(x, y)
else:
Strng = re.sub("𐎻(?!\U000103A1)", "𐎻\U000103A1", Strng)
for x, y in zip(ICons_ + UCons_, ISyll + USyll):
Strng = Strng.replace(y, x)
Strng = re.sub(SylAlpha + "(𐎠𐎡)", r"\1<\2", Strng)
Strng = re.sub(SylAlpha + "(𐎠𐎢)", r"\1<\2", Strng)
Strng = re.sub(ListC + "\U000103A0", r"\1" + "_\U000103A0", Strng)
Strng = re.sub(SylAlpha + "([\U000103A1\U000103A2])", r"\1_\2", Strng)
Strng = re.sub(
"([" + "".join(ASyll) + "])" + "([\U000103A1\U000103A2])",
r"\1" + "<" + "\U000103A0" + r"\2",
Strng,
)
Strng = Strng.replace("𐏐", " ")
if not reverse:
pass
else:
pass
return Strng
def OldPersianNumeral(Strng, reverse=False):
One = "\U000103D1"
Two = "\U000103D2"
Ten = "\U000103D3"
Twenty = "\U000103D4"
Hundred = "\U000103D5"
Numbers = sorted(map(int, re.findall("\d+", Strng)), reverse=True)
if not reverse:
for num in Numbers:
hN = int(num / 100)
tW = int((num - (hN * 100)) / 20)
tN = int((num - (hN * 100) - (tW * 20)) / 10)
t2 = int((num - (hN * 100) - (tW * 20) - (tN * 10)) / 2)
n1 = int(num - (hN * 100) - (tW * 20) - (tN * 10) - (t2 * 2))
perNum = (
(Hundred * hN) + (Twenty * tW) + (Ten * tN) + (Two * t2) + (One * n1)
)
Strng = Strng.replace(str(num), perNum)
else:
Strng = Strng.replace(One, "1#")
Strng = Strng.replace(Two, "2#")
Strng = Strng.replace(Ten, "10#")
Strng = Strng.replace(Twenty, "20#")
Strng = Strng.replace(Hundred, "100#")
return Strng
def KharoshthiNumerals(Strng, reverse=False):
Numbers = sorted(map(int, re.findall("\d+", Strng)), reverse=True)
if not reverse:
for num in Numbers:
Strng = Strng.replace(str(num), kharoshthiNumber(num))
else:
one = "𐩀"
two = "𐩁"
three = "𐩂"
four = "𐩃"
ten = "𐩄"
twenty = "𐩅"
hundred = "𐩆"
thousand = "𐩇"
Strng = Strng.replace(one, "1#")
Strng = Strng.replace(two, "2#")
Strng = Strng.replace(three, "3#")
Strng = Strng.replace(four, "4#")
Strng = Strng.replace(ten, "10#")
Strng = Strng.replace(twenty, "20#")
Strng = Strng.replace(hundred, "100#")
Strng = Strng.replace(thousand, "1000#")
return Strng
def kharoshthiNumber(Strng):
one = "𐩀"
two = "𐩁"
three = "𐩂"
four = "𐩃"
ten = "𐩄"
twenty = "𐩅"
hundred = "𐩆"
thousand = "𐩇"
num = int(Strng)
kharnum = ""
thou = int(num / 1000)
if thou > 0:
if thou > 1:
kharnum += kharoshthiNumber(thou)
kharnum += thousand
hund = int((num - (thou * 1000)) / 100)
if hund > 0:
if hund > 1:
kharnum += kharoshthiNumber(hund)
kharnum += hundred
twen = int((num - (thou * 1000) - (hund * 100)) / 20)
if twen > 0:
kharnum += twenty * twen
tenn = int((num - (thou * 1000) - (hund * 100) - (twen * 20)) / 10)
if tenn > 0:
if tenn > 1:
kharnum += kharoshthiNumber(tenn)
kharnum += ten
ones = int((num - (thou * 1000) - (hund * 100) - (twen * 20) - (tenn * 10)))
if ones > 0:
if ones == 1:
kharnum += one
elif ones == 2:
kharnum += two
elif ones == 3:
kharnum += three
elif ones == 4:
kharnum += four
elif ones == 5:
kharnum += four + one
elif ones == 6:
kharnum += four + two
elif ones == 7:
kharnum += four + three
elif ones == 8:
kharnum += four + four
elif ones == 9:
kharnum += four + four + one
return kharnum
def FixSinhala(Strng, reverse=False):
Strng = post_processing.SinhalaDefaultConjuncts(Strng)
if not reverse:
Strng = Strng.replace("\u0DA2\u0DCA\u0DA4", "\u0DA5")
Strng = Strng.replace("(අ)(අ)", "(ආ)")
else:
Strng = Strng.replace("\u0DA5", "\u0DA2\u0DCA\u0DA4")
Strng = Strng.replace("", "")
Strng = Strng.replace("(ආ)", "(අ)(අ)")
return Strng
def FixSantali(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("ᱹᱸ", "ᱺ")
Strng = Strng.replace("ᱻᱸ", "ᱸᱻ")
else:
Strng = Strng.replace("ᱺ", "ᱹᱸ")
Strng = Strng.replace("ᱽ", "’")
Strng = Strng.replace("ᱸᱻ", "ᱻᱸ")
return Strng
def FixSoraSompeng(Strng, reverse=False):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "SoraSompeng")) + ")"
if not reverse:
Strng = re.sub(ListC + "(ə)", r"\1", Strng)
Strng = Strng.replace("ə", "\U000110E6\U000110E8")
else:
ListV = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "SoraSompeng")) + ")"
Strng = re.sub(ListC + "(?!" + ListV + ")", r"\1" + "ə", Strng)
Strng = Strng.replace("𑃔ə𑃨", "𑃔𑃨ə")
Strng = Strng.replace("𑃦𑃨", "ə")
Strng = Strng.replace("ə𑃨", "𑃨")
return Strng
def FixRomanReadable(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("\\n", "\uE001")
Strng = re.sub("([aiueo])nj([aeiou])", r"\1" + "ny" + r"\2", Strng)
Strng = re.sub("(\W)nj([aeiou])", r"\1" + "ny" + r"\2", Strng)
Strng = re.sub("^nj([aeiou])", "ny" + r"\1", Strng)
Strng = Strng.replace("njnj", "nny")
Strng = Strng.replace("Mk", "ngk")
Strng = Strng.replace("Mg", "ngg")
Strng = Strng.replace("Mc", "njc")
Strng = Strng.replace("Mj", "njj")
Strng = Strng.replace("Md", "nd")
Strng = Strng.replace("Mt", "nt")
Strng = Strng.replace("Mb", "mb")
Strng = Strng.replace("Mp", "mp")
Strng = Strng.replace("M", "m\u034F'")
Strng = Strng.replace("ngk", "nk")
Strng = Strng.replace("ngg", "ng")
Strng = Strng.replace("njc", "nc")
Strng = Strng.replace("njj", "nj")
Strng = Strng.replace("jnj", "jny")
Strng = Strng.replace("\uE001", "\\n")
else:
pass
return Strng
def FixRomanColloquial(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("\\n", "\uE001")
Strng = re.sub("([aiueo])nj([aeiou])", r"\1" + "ny" + r"\2", Strng)
Strng = re.sub("(\W)nj([aeiou])", r"\1" + "ny" + r"\2", Strng)
Strng = re.sub("^nj([aeiou])", "ny" + r"\1", Strng)
Strng = Strng.replace("njnj", "nny")
Strng = Strng.replace("Mk", "ngk")
Strng = Strng.replace("Mg", "ngg")
Strng = Strng.replace("Mc", "njc")
Strng = Strng.replace("Mj", "njj")
Strng = Strng.replace("Md", "nd")
Strng = Strng.replace("Mt", "nt")
Strng = Strng.replace("Mb", "mb")
Strng = Strng.replace("Mp", "mp")
Strng = Strng.replace("M", "m\u034F")
Strng = Strng.replace("ngk", "nk")
Strng = Strng.replace("ngg", "ng")
Strng = Strng.replace("njc", "nc")
Strng = Strng.replace("njj", "nj")
Strng = Strng.replace("jnj", "jny")
Strng = Strng.replace("\uE001", "\\n")
Strng = Strng.replace("'", "").replace("_", "")
else:
pass
return Strng
def FixWarangCiti(Strng, reverse=False):
ListC = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "WarangCiti")) + ")"
ListV = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "WarangCiti") + ["\u200D"]) + ")"
if not reverse:
Strng = re.sub(ListC + ListC, r"\1" + "\u200D" + r"\2", Strng)
Strng = re.sub(ListC + "(\U000118C1\U000118D9\u02BE)", r"\1" + "\u00BD", Strng)
Strng = re.sub(ListC + "(\U000118C1)", r"\1", Strng)
Strng = re.sub(ListV + "(\U000118C0)", r"\1" + "\u200D" + r"\2", Strng)
Strng = Strng.replace("\u02BE", "")
Strng = Strng.replace("𑣟\u02BF", "𑣙𑣗")
Strng = Strng.replace("\u00BD", "\U000118C1")
Strng = Strng.replace("\u02BF", "")
else:
Strng = Strng.lower()
Strng = Strng.replace("𑣙𑣗", "𑣟\u02BF")
Strng = Strng.replace("\u00D7", "\u200D")
Strng = re.sub(ListC + "(\U000118C1)", r"\1" + "\u00BD", Strng)
Strng = re.sub("(\u02BF)" + "(\U000118C1)", r"\1" + "\U000118C1\u00BD", Strng)
Strng = re.sub(ListC + "(?!" + ListV + ")", r"\1" + "\U000118C1", Strng)
Strng = re.sub(
"([\U000118D4\U000118D5\U000118CC\U000118CB\U000118CF\U000118CE\U000118D2\U000118D1\U000118D5\U000118D4\U000118D8\U000118D7\U000118DB])(\u200D)(𑣙)",
r"\1" + "\u00D6" + r"\3",
Strng,
)
Strng = Strng.replace("\u200D", "")
Strng = Strng.replace("\u00D6", "\u200D")
Strng = re.sub("(𑣁)" + "(\u02BF)" + ListV, r"\2\3", Strng)
Strng = Strng.replace("𑣁" + "\u02BB", "")
Strng = Strng.replace("\U000118C1\u00BD", "\U000118C1\U000118D9\u02BE")
return Strng
def FixLimbu(Strng, reverse=False):
vir = Limbu.ViramaMap[0]
SCons = [vir + x for x in [Limbu.ConsonantMap[x] for x in [25, 26, 28]]]
SubCons = ["\u1929", "\u192A", "\u192B"]
for x, y in zip(SCons, SubCons):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
signAll = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Limbu")
)
FCons = [
x + vir for x in [Limbu.ConsonantMap[x] for x in [0, 4, 15, 19, 20, 24, 26, 27]]
]
FinalCons = [
"\u1930",
"\u1931",
"\u1933",
"\u1934",
"\u1935",
"\u1936",
"\u1937",
"\u1938",
]
if reverse:
Strng = re.sub(
"(" + "|".join(FinalCons) + ")" + "(?=[ᤕᤖᤘ])", r"\1" + "\u200C", Strng
)
Strng = re.sub("([ᤀᤁᤂᤃᤄᤅᤆᤇᤈᤉᤊᤋᤌᤍᤎᤏᤐᤑᤒᤓᤔᤕᤖᤗᤘᤚᤛᤜᤠᤣᤥᤧᤨᤩᤪᤫ])᤺", r"\1" + "꞉", Strng)
else:
Strng = Strng.replace("꞉", "᤺")
for x, y in zip(FCons, FinalCons):
if not reverse:
Strng = re.sub(
"(" + signAll + ")" + "(\u193A?)" + "(" + x + ")", r"\1\2" + y, Strng
)
else:
Strng = Strng.replace(y, x)
if not reverse:
Strng = Strng.replace("ʔ", "᤹")
Strng = Strng.replace("!", "᥄")
Strng = Strng.replace("?", "᥅")
else:
Strng = Strng.replace("᤹", "ʔ")
Strng = Strng.replace("᥄", "!")
Strng = Strng.replace("᥅", "?")
return Strng
def FixDevanagari(Strng, reverse=False):
Sindhi = ["ॻ", "ॼ", "ॾ", "ॿ"]
SindhiApprox = ["ˍग", "ˍज", "ˍड", "ˍब"]
if not reverse:
Strng = Strng.replace("ʔ", "ॽ")
for x, y in zip(Sindhi, SindhiApprox):
Strng = Strng.replace(y, x)
Strng = Strng.replace("ज़़", "ॹ")
Strng = Strng.replace("श़", "ॹ")
Strng = Strng.replace("ऱ्", "ऱ्")
Strng = Strng.replace("ऱ्य", "ऱ्य")
Strng = Strng.replace("ऱ्ह", "ऱ्ह")
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Devanagari"))
Strng = re.sub("(" + ListC + ")" + "ʼ", r"\1" + "\u093A", Strng)
Strng = Strng.replace("\u093Eʼ", "\u093B")
else:
Strng = Strng.replace("\u0954", "")
Strng = post_processing.DevanagariPrishtamatra(Strng, reverse=True)
Strng = Strng.replace("ॽ", "ʔ")
Strng = Strng.replace("ॹ", "ज़़")
for x, y in zip(Sindhi, SindhiApprox):
Strng = Strng.replace(x, y)
return Strng
def FixKaithi(Strng, reverse=False):
if not reverse:
Strng = Strng.replace(" ", "⸱")
else:
Strng = Strng.replace("⸱", " ")
return Strng
def FixLao2(Strng, reverse=False):
return FixLao(Strng, reverse)
def FixNandinagari(Strng, reverse=False):
if not reverse:
pass
else:
Strng = post_processing.NandinagariPrishtamatra(Strng, reverse=True)
return Strng
def FixLepcha(Strng, reverse=False):
vir = Lepcha.ViramaMap[0]
la = Lepcha.ConsonantMap[27]
conLa = [
x + vir + la
for x in [Lepcha.ConsonantMap[c] for c in [0, 2, 20, 22, 24, 32]]
+ [Lepcha.NuktaConsonantMap[6]]
]
conL = ["\u1C01", "\u1C04", "\u1C0F", "\u1C14", "\u1C16", "\u1C1E", "\u1C12"]
for x, y in zip(conLa, conL):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
yr = [vir + x for x in Lepcha.ConsonantMap[25:27]]
yrSub = ["\u1C24", "\u1C25"]
for x, y in zip(yr, yrSub):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
listNF = [
Lepcha.ConsonantMap[x]
for x in [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
16,
17,
18,
21,
22,
23,
29,
30,
31,
]
]
listF = [
(Lepcha.ConsonantMap + Lepcha.AyogavahaMap)[x]
for x in [
0,
0,
0,
34,
0,
0,
0,
0,
19,
15,
15,
15,
15,
19,
15,
15,
15,
20,
20,
20,
15,
15,
15,
]
]
listNF += Lepcha.ConsonantMap[25:26] + Lepcha.ConsonantMap[28:29]
listF += Lepcha.VowelMap[2:3] + Lepcha.VowelMap[4:5]
if not reverse:
Strng = Strng.replace(Lepcha.NuktaMap[0] + vir, vir)
Strng = Strng.replace(Lepcha.ConsonantMap[32] + vir, "")
consAll = (
"("
+ "|".join(Lepcha.ConsonantMap + Lepcha.VowelMap + Lepcha.VowelSignMap)
+ ")"
)
for x, y in zip(listNF, listF):
Strng = re.sub(consAll + "(" + x + vir + ")", r"\1" + y + vir, Strng)
else:
pass
conFinal = [
x + vir for x in [Lepcha.ConsonantMap[c] for c in [0, 15, 19, 20, 24, 26, 27]]
]
conF = [
"\u1C2D",
"\u1C33",
"\u1C30",
"\u1C31",
"\u1C2E",
"\u1C32",
"\u1C2F",
]
signAll = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Lepcha")
)
for x, y in zip(conFinal, conF):
if not reverse:
Strng = re.sub("(" + signAll + ")" + "(" + x + ")", r"\1" + y, Strng)
else:
Strng = Strng.replace(y, x)
signVow = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, "Lepcha"))
if not reverse:
Strng = Strng.replace(vir, "")
Strng = re.sub(
"(" + signVow + ")" + "(" + Lepcha.AyogavahaMap[1] + ")",
r"\1" + "\u1C35",
Strng,
)
Strng = Strng.replace("ᰧᰶᰵ", "ᰧᰵᰶ")
else:
Strng = Strng.replace("\u1C35", Lepcha.AyogavahaMap[1])
Strng = Strng.replace("ᰧᰵᰶ", "ᰧᰶᰵ")
return Strng
def FixSundanese(Strng, reverse=False):
vir = Sundanese.ViramaMap[0]
r = Sundanese.ConsonantMap[26] + vir
ListC = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Sundanese")
)
if not reverse:
Strng = re.sub("(" + ListC + ")" + r, r"\1" + "\u1B81", Strng)
else:
Strng = Strng.replace("\u1B81", r)
Strng = post_processing.SundaneseHistoricConjuncts(Strng, reverse)
yrl = [vir + x for x in Sundanese.ConsonantMap[25:28]]
yrlSub = ["\u1BA1", "\u1BA2", "\u1BA3"]
for x, y in zip(yrl, yrlSub):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
return Strng
def FixRejang(Strng, reverse=False):
vir = Rejang.ViramaMap[0]
r = Rejang.ConsonantMap[26] + vir
n = Rejang.ConsonantMap[19] + vir
ListC = "|".join(
GM.CrunchSymbols(GM.Consonants + GM.Vowels + GM.VowelSignsNV, "Rejang")
)
if not reverse:
Strng = re.sub("(" + ListC + ")" + r, r"\1" + "\uA951", Strng)
Strng = re.sub("(" + ListC + ")" + n, r"\1" + "\uA950", Strng)
else:
Strng = Strng.replace("\uA951", r)
Strng = Strng.replace("\uA950", n)
return Strng
def FixChakma(Strng, reverse=False):
listC = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1],
key=len,
reverse=True,
)
)
+ ")"
)
listV = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.VowelSigns, "Chakma")
+ Chakma.ViramaMap
+ ["\U00011133"],
key=len,
reverse=True,
)
)
+ ")"
)
Strng = Strng.replace("\u02BD", "")
if not reverse:
Strng = re.sub("(" + listC + ")" + "(?!" + listV + ")", r"\1" "\u02BE", Strng)
Strng = Strng.replace("\U00011127", "")
Strng = Strng.replace("\u02BE", "\U00011127")
Strng = Strng.replace("𑄣𑄳𑄦", "𑅄")
Strng = Strng.replace("𑄣𑄴𑄦", "𑅄")
Strng = re.sub("(" + listC + ")" + "(𑄃𑄨)", r"\1" + "\U0001112D", Strng)
Strng = Strng.replace(
"\U0001112C𑄃𑄨ʼ",
"\U00011146",
)
else:
Strng = post_processing.ChakmaGemination(Strng, reverse=True)
Strng = Strng.replace("𑅄", "𑄣𑄳𑄦")
Strng = Strng.replace("\U00011133\U00011103", "\U00011145")
Strng = Strng.replace("\U00011133\U00011104", "\U00011146")
Strng = Strng.replace("\U0001112D", "𑄃𑄨")
Strng = Strng.replace("\U00011146", "\U0001112C𑄃𑄨ʼ")
Strng = Strng.replace("\U00011127", "\u02BE")
Strng = re.sub(
"(" + listC + ")" + "(?!" + listV + "|\u02BE" + ")",
r"\1" "\U00011127",
Strng,
)
Strng = Strng.replace("\u02BE", "")
yrlvn = (
"(" + "|".join(Chakma.ConsonantMap[19:20] + Chakma.ConsonantMap[26:29]) + ")"
)
if not reverse:
Strng = re.sub("\U00011134" + "(?=" + yrlvn + ")", "\U00011133", Strng)
Strng = post_processing.ChakmaGemination(Strng)
else:
Strng = Strng.replace("\U00011133", "\U00011134")
vowelDepA = ["𑄃𑄨", "𑄃𑄪", "𑄃𑄬"]
vowelIndep = ["\U00011104", "\U00011105", "\U00011106"]
for x, y in zip(vowelDepA, vowelIndep):
Strng = Strng.replace(y, x)
return Strng
def FixIAST(Strng, reverse=False):
if reverse:
Strng = Strng.replace("ṁ", IAST.AyogavahaMap[1])
return Strng
def FixIPA(Strng, reverse=False):
colon_tilde = "\u02D0\u0303"
tilde_colon = "\u0303\u02D0"
if not reverse:
Strng = Strng.replace(colon_tilde, tilde_colon)
Strng = re.sub("(.)(\u02D0?)(\u0068)", r"\1\2\3\1" + "\u0306", Strng)
Strng = Strng.replace("ə̸ə̸", "ɑ̷ː")
else:
Strng = Strng.replace("ɑ̷ː", "ə̸ə̸")
Strng = Strng.replace(tilde_colon, colon_tilde)
Strng = re.sub("(.)(\u02D0?)(\u0068)" + r"\1" + "\u0306", r"\1\2\3", Strng)
return Strng
def FixPhagsPa(Strng, reverse=False):
candraBindu = PhagsPa.AyogavahaMap[0]
ListC = "|".join(sorted(PhagsPa.ConsonantMap, key=len, reverse=True))
ListV = "|".join(sorted(PhagsPa.VowelMap, key=len, reverse=True))
ListVS = "|".join(sorted(PhagsPa.VowelSignMap, key=len, reverse=True))
vir = PhagsPa.ViramaMap[0]
Virrvy = [vir + x for x in [PhagsPa.ConsonantMap[c] for c in [25, 26, 28]]]
Subrvy = ["\uA868", "\uA871", "\uA867"]
SubrvyE = ["ꡱꡨ"] + Subrvy
if not reverse:
for x, y in zip(Virrvy, Subrvy):
Strng = Strng.replace(x, y)
Strng = re.sub("(" + ListV + ")" + "(" + candraBindu + ")", r"\2\1", Strng)
Strng = re.sub(
"("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?)"
+ "("
+ candraBindu
+ ")",
r"\6\1\2\4",
Strng,
)
else:
ListV = ListV.replace("\u1E7F", "")
Strng = Strng.replace("ꡖꡘꡟ", "ꡱꡖꡟ")
Aspirate = [
("\uA842\uA85C", "\u1E7E\uA842\u1E7E\uA85C\u1E7E"),
("\uA852\uA85C", "\u1E7E\uA852\u1E7E\uA85C\u1E7E"),
("\uA86B\uA85C", "\u1E7E\uA86B\u1E7E\uA85C\u1E7E"),
("\uA84A\uA85C", "\u1E7E\uA84A\u1E7E\uA85C\u1E7E"),
("\uA84E\uA85C", "\u1E7E\uA84E\u1E7E\uA85C\u1E7E"),
]
for x, y in Aspirate:
Strng = Strng.replace(x, y)
Strng = re.sub(
"(" + PhagsPa.VowelSignMap[0] + ")" + "([" + "".join(Subrvy[1]) + "])",
r"\2\1",
Strng,
)
Strng = re.sub(
"("
+ candraBindu
+ ")"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?)",
r"\2\3\5\1",
Strng,
)
Strng = re.sub("(" + candraBindu + ")" + "(" + ListV + ")", r"\2\1", Strng)
for x, y in zip(Virrvy, Subrvy):
Strng = Strng.replace(y, x)
Strng = re.sub("(" + ListV + ")", "\u1E7F" r"\1", Strng)
Strng = re.sub("(" + ListC + "|ꡖ)" + "(" + "\u1E7F" + ")", r"\1", Strng)
if not reverse:
Strng = Strng.replace(" ", "᠂")
Strng = Strng.replace("\u02BD", "")
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(?!"
+ vir
+ ")",
r"\1 \8",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(?!"
+ vir
+ ")",
r"\1 \8",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(?!"
+ vir
+ ")",
r"\1 \4",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(?!"
+ vir
+ ")",
r"\1 \4",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")",
r"\1 \8",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?))"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")",
r"\1 \8",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")",
r"\1 \4",
Strng,
)
Strng = re.sub(
"(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")"
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))"
+ "(?!"
+ vir
+ ")",
r"\1 \4",
Strng,
)
Strng = Strng.replace("\n", "\n")
Strng = "\u12BA᠂" + Strng
ListCE = ListC + "|" + "|".join(SubrvyE)
Strng = re.sub(
'(?:(?<!\n)(?<!᠂)(?<![,\."\?\&\(\)]))'
+ "(?<!"
+ vir
+ ")"
+ "("
+ ListC
+ ")"
+ vir
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListC
+ "))",
r"\1 \2",
Strng,
)
Strng = re.sub(
"(?<!᠂)"
+ "("
+ ListC
+ ")"
+ vir
+ "(("
+ candraBindu
+ ")?"
+ "("
+ ListV
+ "))",
r" \1",
Strng,
)
Strng = Strng.replace(vir, "")
Strng = Strng.replace("\u1E7F", "")
Strng = Strng.replace("\u1E7E", "")
Strng = Strng.replace("\u12BA᠂", "")
Strng = Strng.replace("᠂", " ᠂ ")
else:
Strng = Strng.replace("ꡆ", "ꡒ")
for x, y in zip(Virrvy, Subrvy):
Strng = Strng.replace(x, y)
Strng = re.sub(
"(("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(?!"
+ ListVS
+ "))"
+ "((("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ "))"
+ "("
+ candraBindu
+ ")?))",
r"\1" + vir + r"\6",
Strng,
)
Strng = re.sub(
"((("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(("
+ ListVS
+ ")?)"
+ "("
+ candraBindu
+ ")?)"
+ "(("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(?!"
+ ListVS
+ ")))",
r"\1" + vir,
Strng,
)
Strng = re.sub(
"((("
+ ListV
+ ")"
+ "("
+ candraBindu
+ ")?)"
+ "(("
+ ListC
+ ")"
+ "(("
+ "|".join(SubrvyE)
+ ")?)"
+ "(?!"
+ ListVS
+ ")))",
r"\1" + vir,
Strng,
)
for x, y in zip(Virrvy, Subrvy):
Strng = Strng.replace(y, x)
Strng = Strng.replace(" ", "")
Strng = Strng.replace("᠂", " ")
Strng = Strng.replace("᠃", " ")
Strng = Strng.replace(vir + vir, vir)
return Strng
def FixLatn(Strng, Source, reverse=False):
vir = ""
if not reverse:
Strng = re.sub("([aiuāīū" + vir + "])(꞉)", r"\2\1", Strng)
Strng = re.sub("(꞉)(\u033D)", r"\2\1", Strng)
Strng = Strng.replace("aʰ", "ʰ")
else:
Strng = re.sub("([aiuāīū" + vir + "])(꞉)", r"\2\1", Strng)
Strng = re.sub("(\u033D)(꞉)", r"\2\1", Strng)
return Strng
def FixArab(Strng, Source, reverse=False):
Strng = PersoArabicPuntuation(Strng, reverse)
if not reverse:
pass
else:
Strng = Strng.replace("آ", "آ").replace("ـ", "")
Strng = Strng.replace("\u064E\u0651", "\u0651\u064E")
return Strng
def FixThaa(Strng, Source, reverse=False):
Strng = PersoArabicPuntuation(Strng, reverse)
if not reverse:
pass
else:
pass
return Strng
def FixArab_Ph(Strng, Source, reverse=False):
return FixArab(Strng, Source, reverse)
def FixArab_Fa(Strng, Source, reverse=False):
Strng = FixArab(Strng, Source, reverse)
return Strng
def FixArab_Ur(Strng, Source, reverse=False):
Strng = FixArab(Strng, Source, reverse)
if not reverse:
if Source != "Type":
pass
else:
pass
return Strng
def FixUgar(Strng, Source, reverse=False):
if not reverse:
Strng = Strng.replace("𐎒²", "𐎝")
Strng = Strng.replace(" ", "𐎟")
else:
Strng = Strng.replace("𐎟", "")
return Strng
def FixSogd(Strng, Source, reverse=False):
if not reverse:
Strng = Strng.replace("𐼹²", "𐽄")
else:
pass
return Strng
def FixMalayalam(Strng, reverse=False):
Strng = post_processing.MalayalamChillu(Strng, reverse)
if not reverse:
Strng = post_processing.RetainDandasIndic(Strng, "Malayalam", True)
Strng = post_processing.RetainIndicNumerals(Strng, "Malayalam", True)
Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E", "ഩ്"]
Anu = GM.CrunchSymbols(GM.CombiningSigns, "Malayalam")[1]
return Strng
def FixTelugu(Strng, reverse=False):
if not reverse:
Strng = post_processing.RetainDandasIndic(Strng, "Telugu", True)
Strng = post_processing.RetainIndicNumerals(Strng, "Telugu", True)
else:
Strng = Strng.replace("ఁ", "ఀ")
return Strng
def FixMeeteiMayek(Strng, reverse=False):
vir = MeeteiMayek.ViramaMap[0]
listC = [
x + vir
for x in [MeeteiMayek.ConsonantMap[x] for x in [0, 27, 24, 20, 19, 15, 4, 25]]
]
finalC = [
"\uABDB",
"\uABDC",
"\uABDD",
"\uABDE",
"\uABDF",
"\uABE0",
"\uABE1",
"\uABE2",
]
for x, y in zip(listC, finalC):
if not reverse:
Strng = re.sub(x, y, Strng)
else:
Strng = Strng.replace(y, x)
return Strng
def FixBatakSima(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("ᯙᯮ", "ᯙᯯ")
else:
Strng = Strng.replace("ᯙᯯ", "ᯙᯮ")
return Strng
def FixCham(Strng, reverse=False):
Strng = Strng.replace("\u02BD", "")
ListCAll = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Cham")) + ")"
ListVow = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Cham")) + ")"
ListVowS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, "Cham")) + ")"
vir = Cham.ViramaMap[0]
nja = Cham.ConsonantMap[9] + vir + Cham.ConsonantMap[7]
listC = [vir + x for x in Cham.ConsonantMap[25:29]]
SubC = [
"\uAA33",
"\uAA34",
"\uAA35",
"\uAA36",
]
for x, y in zip(listC, SubC):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
listNF = [
Cham.ConsonantMap[x] for x in [1, 3, 6, 7, 8, 9, 16, 17, 18, 21, 22, 23, 31, 29]
]
listF = [
Cham.ConsonantMap[x]
for x in [0, 2, 5, 5, 5, 19, 15, 15, 15, 20, 20, 20, 30, 30]
]
for x, y in zip(listNF, listF):
if not reverse:
Strng = Strng.replace(x + vir, y + vir)
else:
pass
listC = [
x + vir
for x in [
Cham.ConsonantMap[x] for x in [0, 2, 4, 5, 15, 19, 20, 25, 26, 27, 30, 24]
]
]
finalC = [
"\uAA40",
"\uAA41",
"\uAA42",
"\uAA44",
"\uAA45",
"\uAA46",
"\uAA47",
"\uAA48",
"\uAA49",
"\uAA4A",
"\uAA4B",
"\uAA4C",
]
for x, y in zip(listC, finalC):
if not reverse:
Strng = Strng.replace(x, y)
Strng = re.sub(
"(" + ListCAll + "|" + ListVow + "|" + ListVowS + ")" + "ꨨ" + vir,
r"\1" + "ꩍ",
Strng,
)
else:
Strng = Strng.replace("ꩍ", "ꨨ" + vir)
if y not in Cham.AyogavahaMap:
Strng = Strng.replace(y, x)
va = Cham.ConsonantMap[28]
if not reverse:
Strng = Strng.replace(va + vir, va)
else:
pass
return Strng
def FixTaiTham(Strng, reverse=False):
vir = TaiTham.ViramaMap[0]
Cons = [vir + x for x in [TaiTham.ConsonantMap[x] for x in [26, 27]]]
Sub = ["\u1A55", "\u1A56"]
for x, y in zip(Cons, Sub):
if not reverse:
Strng = Strng.replace(x, y)
else:
Strng = Strng.replace(y, x)
if not reverse:
pass
else:
pass
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TaiTham"))
ng = TaiTham.ConsonantMap[4] + vir
if not reverse:
Strng = re.sub("(" + ng + ")" + "(" + ListC + ")", "\u1A58" + r"\2", Strng)
Strng = re.sub(vir + "(" + ListC + ")", "\u1A60" + r"\1", Strng)
Strng = Strng.replace("ᩈ᩠ᩈ", "ᩔ")
TallACons = "|".join(["ᩅ", "ᨴ", "ᨵ", "ᨣ"])
Strng = post_processing.FixTallA(Strng, TallACons)
Strng = Strng.replace("\u1A55\u1A60\u1A3F", "\u1A60\u1A3F\u1A55")
Strng = Strng.replace("\u1A60\u1A47", vir + "\u1A47")
else:
AA = "ᩣ"
Strng = Strng.replace("ᩔ", "ᩈ᩠ᩈ")
Strng = re.sub("(" + ListC + ")" + "\u1A58", r"\1" + ng, Strng)
Strng = Strng.replace("\u1A60", vir)
Strng = Strng.replace("ᩤ", AA)
Strng = Strng.replace("\u1A60\u1A3F\u1A55", "\u1A55\u1A60\u1A3F")
return Strng
def FixLaoTham(Strng, reverse=False):
Strng = FixTaiTham(Strng, reverse)
return Strng
def FixLueTham(Strng, reverse=False):
Strng = FixTaiTham(Strng, reverse)
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TaiTham"))
if not reverse:
E = "ᩮ"
AA = "ᩣ"
TallACons = "|".join(["ᩅ", "ᨴ", "ᨵ", "ᨣ"])
Strng = re.sub(
"(" + TallACons + ")(᩠)(" + ListC + ")" + "(" + E + "?)" + AA,
r"\1\2\3\4" + "ᩤ",
Strng,
)
Strng = re.sub(
"("
+ TallACons
+ ")(᩠)("
+ ListC
+ ")"
+ "(᩠)("
+ ListC
+ ")"
+ "("
+ E
+ "?)"
+ AA,
r"\1\2\3\4\5\6" + "ᩤ",
Strng,
)
else:
pass
return Strng
def FixKhuenTham(Strng, reverse=False):
Strng = FixTaiTham(Strng, reverse)
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "TaiTham"))
if not reverse:
E = "ᩮ"
AA = "ᩣ"
TallACons = "|".join(["ᩅ", "ᨴ", "ᨵ", "ᨣ"])
Strng = re.sub(
"(" + TallACons + ")(᩠)(" + ListC + ")" + "(" + E + "?)" + AA,
r"\1\2\3\4" + "ᩤ",
Strng,
)
Strng = re.sub(
"("
+ TallACons
+ ")(᩠)("
+ ListC
+ ")"
+ "(᩠)("
+ ListC
+ ")"
+ "("
+ E
+ "?)"
+ AA,
r"\1\2\3\4\5\6" + "ᩤ",
Strng,
)
else:
pass
return Strng
def LaoTranscribe(Strng, reverse=False):
import post_processing as pp
shortA, conjA = "\u0EB0", "\u0EB1"
if not reverse:
Strng = pp.ThaiLaoTranscription(Strng, "Lao", shortA, conjA)
else:
Strng = pp.ThaiLaoTranscription(Strng, "Lao", shortA, conjA, reverse=True)
return Strng
def LaoPaliTranscribe(Strng, reverse=False, anusvaraChange=True):
import post_processing as pp
shortA, conjA = "\u0EB0", "\u0EB1"
if not reverse:
Strng = pp.ThaiLaoTranscription(
Strng, "LaoPali", shortA, conjA, anusvaraChange=anusvaraChange
)
else:
Strng = pp.ThaiLaoTranscription(Strng, "LaoPali", shortA, conjA, reverse=True)
return Strng
def FixBengali(Strng, reverse=False):
Virama = "".join(GM.CrunchSymbols(["ViramaMap"], "Bengali"))
ba = "ব"
if not reverse:
Strng = re.sub("(?<![রবম])" + Virama + ba, Virama + "\u200C" + ba, Strng)
Strng = Strng.replace("\u09CD\u09AD\u09BC", "\u09CD\u09AC")
else:
pass
Strng = post_processing.KhandaTa(Strng, "Bengali", reverse)
return Strng
def FixAssamese(Strng, reverse=False):
Ra = "\u09B0"
AssRa = "\u09F0"
Strng = post_processing.KhandaTa(Strng, "Assamese", reverse)
if not reverse:
Strng = Strng.replace(Ra, AssRa)
else:
Strng = Strng.replace(AssRa, Ra)
return Strng
def FixSharada(Strng, reverse=False):
Strng = post_processing.KhandaTa(Strng, "Assamese", reverse)
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Sharada"))
Nukta = "|".join(GM.CrunchSymbols(GM.CombiningSigns, "Sharada")[-1])
Virama = "".join(GM.CrunchSymbols(["ViramaMap"], "Sharada"))
if not reverse:
Strng = Strng.replace(Nukta + Virama, Nukta + Virama + "\u200C")
Strng = re.sub(
"(" + Virama + ")" + "(" + ListC + ")" + "(" + Nukta + ")",
r"\1" + "\u200C" + r"\2\3",
Strng,
)
else:
pass
return Strng
def FixKannada(Strng, reverse=False):
if not reverse:
Strng = post_processing.RetainDandasIndic(Strng, "Kannada", True)
Strng = post_processing.RetainIndicNumerals(Strng, "Kannada", True)
Strng = re.sub(
"(\u0CCD)([^\u0CAB\u0C9C])(\u0CBC)", r"\1" + "\u200C" + r"\2\3", Strng
)
return Strng
def FixGrantha(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("॑", "᳴")
Strng = Strng.replace("᳚", "॑")
Strng = Strng.replace("ꣳ", "𑍞")
Strng = Strng.replace("ꣴ", "𑍟")
Strng = Strng.replace("𑌼𑍍", "𑌼𑍍\u200C")
else:
Strng = Strng.replace("𑌼𑍍\u200C", "𑌼𑍍")
Strng = Strng.replace("॑", "᳚")
Strng = Strng.replace("᳴", "॑")
Strng = Strng.replace("𑍞", "ꣳ")
Strng = Strng.replace("𑍟", "ꣴ")
return Strng
def FixMahajani(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("𑅰𑅳ʾ𑅭ʿ𑅑", "\U00011176")
Strng = Strng.replace("\u02BE", "").replace("\u02BF", "")
else:
Strng = Strng.replace("\U00011176", "𑅰𑅳ʾ𑅭ʿ𑅑")
return Strng
def FixAhom(Strng, reverse=False):
ListVS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, "Ahom")) + ")"
Anu = "(" + GM.CrunchList("AyogavahaMap", "Ahom")[1] + ")"
if not reverse:
Strng = Strng.replace("\U0001172B\U0001170D", "\U0001171E")
Strng = Strng.replace("\U0001172B\U0001170E", "\U0001171D")
Strng = re.sub(ListVS + Anu, r"\2\1", Strng)
Strng = re.sub(Anu + "(𑜦)", r"\2\1", Strng)
else:
Strng = Strng.replace("\U0001171E", "\U0001172B\U0001170D")
Strng = Strng.replace("\U0001171D", "\U0001172B\U0001170E")
vir = Ahom.ViramaMap[0]
anu = Ahom.AyogavahaMap[1]
Strng = re.sub(
anu + "\U00011727" + "(?!\U00011728)",
"\U00011726\U00011727\U0001172A",
Strng,
)
Strng = re.sub(
"(\U00011726)(.)(" + vir + ")", "\U00011726\U00011727" + r"\2\3", Strng
)
Strng = re.sub(
"(\U00011728)(.)(" + vir + ")", "\U00011726\U00011721" + r"\2\3", Strng
)
Strng = Strng.replace(anu + "\U00011728", "\U00011726\U00011721\U0001172A")
Strng = re.sub(Anu + ListVS, r"\2\1", Strng)
return Strng
def FixMultani(Strng, reverse=False):
if not reverse:
Strng = Strng.replace("\u02BE", "").replace("\u02BF", "")
Strng = Strng.replace("ˍ\U0001128C", "\U0001128D").replace(
"ˍ\U00011282", "\U00011293"
)
else:
Strng = Strng.replace("\U0001128D", "ˍ\U0001128C").replace(
"\U00011293", "ˍ\U00011292"
)
return Strng
def FixGujarati(Strng, reverse=False):
if not reverse:
Strng = post_processing.RetainDandasIndic(Strng, "Gujarati", True)
Strng = Strng.replace("જ઼઼", "ૹ").replace("શ઼", "ૹ")
else:
pass
Strng = Strng.replace("ૹ", "જ઼઼").replace("ૹ", "શ઼")
return Strng
def FixZanabazarSquare(Strng, reverse=False):
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "ZanabazarSquare"))
yrlv = ZanabazarSquare.ConsonantMap[25:29]
yrlv_sub = ["\U00011A3B", "\U00011A3C", "\U00011A3D", "\U00011A3E"]
vir = ZanabazarSquare.ViramaMap[0]
if not reverse:
Strng = re.sub(vir + "(" + ListC + ")", "\U00011A47" + r"\1", Strng)
Strng = Strng.replace("𑨋𑩇𑨯", "𑨲")
else:
Strng = Strng.replace("\U00011A41", " ")
tsaSeries = ["𑨣", "𑨤", "𑨥"]
caSeries = ["𑨐", "𑨑", "𑨒"]
for x, y in zip(tsaSeries, caSeries):
Strng = Strng.replace(y, x)
for x, y in zip(yrlv, yrlv_sub):
Strng = Strng.replace(y, "\U00011A47" + x)
Strng = Strng.replace("\U00011A3A", yrlv[1] + "\U00011A47")
Strng = Strng.replace("𑨲", "𑨋𑩇𑨯")
Strng = Strng.replace("\U00011A07", "\U00011A04\U00011A0A")
Strng = Strng.replace("\U00011A08", "\U00011A06\U00011A0A")
Strng = Strng.replace("\U00011A33", vir)
Strng = Strng.replace("\U00011A47", vir)
return Strng
def FixKhojki(Strng, reverse=False):
sindhi = ["\U0001120B", "\U00011211", "\U0001121C", "\U00011222"]
sindhiapprox = ["ˍ\U0001120A", "ˍ\U00011210", "ˍ\U00011216", "ˍ\U00011221"]
if not reverse:
for x, y in zip(sindhi, sindhiapprox):
Strng = Strng.replace(y, x)
Strng = post_processing.InsertGeminationSign(Strng, "Khojki")
Strng = re.sub("(\U00011237)(.)", r"\2\1", Strng)
Strng = Strng.replace("𑈷𑈶", "𑈶𑈷")
Strng = Strng.replace(" ", "\U0001123A")
else:
Strng = Strng.replace("\U0001123A", " ")
for x, y in zip(sindhi, sindhiapprox):
Strng = Strng.replace(x, y)
Strng = Strng.replace("𑈶𑈷", "𑈷𑈶")
Strng = re.sub("(.)(\U00011237)", r"\2\1", Strng)
Strng = post_processing.ReverseGeminationSign(Strng, "Khojki")
return Strng
def FixNewa(Strng, reverse=False):
if not reverse:
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, "Newa"))
ra = Newa.ConsonantMap[26]
vir = Newa.ViramaMap[0]
else:
pass
return Strng | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/fixer.py | fixer.py |
import post_processing as PP
import Map as GM
def ApplyScriptDefaults(Strng, Source, Target, PostOptions=[]):
Options = []
if Target in GM.IndicScripts:
Options += []
if Target == "Telugu":
Options += [
"NasalToAnusvara",
"MToAnusvara",
"TeluguRemoveNukta",
"TeluguRemoveAeAo",
]
elif Target == "Kannada":
Options += ["NasalToAnusvara", "MToAnusvara"]
elif Target == "Nandinagari":
Options += ["NasalToAnusvara"]
elif Target == "Hebrew":
Options += ["HebrewVetVav", "HebrewnonFinalShort"]
elif Target == "Malayalam":
Options += [
"MalayalamAnusvaraNasal",
"MToAnusvara",
"MalayalamremoveHistorical",
]
elif Target == "Tamil":
Options += ["TamilNaToNNa", "AnusvaraToNasal", "TamilpredictDentaNa"]
elif Target == "Bengali":
if "BengaliRaBa" in PostOptions:
Options += ["YaToYYa", "AnusvaraToNasal", "BengaliConjunctVB"]
else:
Options += ["VaToBa", "YaToYYa", "AnusvaraToNasal", "BengaliConjunctVB"]
elif Target == "MeeteiMayek":
Options += ["MeeteiMayekremoveHistorical"]
elif Target == "Limbu":
Options += ["LimburemoveHistorical"]
elif Target == "Assamese":
Options += ["YaToYYa", "AnusvaraToNasal"]
elif Target == "Oriya":
Options += ["OriyaVa", "YaToYYa", "AnusvaraToNasal"]
elif Target == "Chakma":
Options += ["YaToYYa"]
elif Target == "Gurmukhi":
Options += [
"GurmukhiCandrabindu",
"GurmukhiTippiBindu",
"GurmukhiTippiGemination",
]
elif Target == "Saurashtra":
Options += ["SaurashtraHaru"]
elif Target == "Ahom":
Options += ["AhomClosed"]
elif Target == "Tibetan":
Options += ["TibetanRemoveVirama", "TibetanRemoveBa"]
elif Target == "Thaana":
Options += ["ThaanaRemoveHistorical"]
elif Target == "Avestan":
Options += ["AvestanConventions"]
elif Target == "Sundanese":
Options += ["SundaneseRemoveHistoric"]
elif Target == "Multani":
Options += ["MultaniAbjad"]
elif Target == "Modi":
Options += ["ModiRemoveLong"]
elif Target == "WarangCiti":
Options += ["WarangCitiModernOrthogaphy"]
elif Target == "Latn":
if Source == "Arab":
Options += ["arabizeLatn"]
elif Source == "Arab-Ur" or Source == "Arab-Pa" or Source == "Arab-Fa":
Options += ["urduizeLatn"]
elif Source == "Syrn":
Options += ["syricizeLatn"]
elif Source == "Syrj" or Source == "Hebr":
Options += ["hebraizeLatn"]
elif Target == "Arab":
Options += ["ArabRemoveAdditions"]
else:
Options += []
for Option in Options:
if Option.find(Target) != -1:
Strng = getattr(PP, Option)(Strng)
else:
Strng = getattr(PP, Option)(Strng, Target)
return Strng | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/post_options.py | post_options.py |
import os
import regex as re
from pathlib import Path
import logging
import json
import langcodes
from fontTools import unicodedata as ucd
from collections import Counter
cwd = Path(Path(__file__).parent)
class Transliterator(object):
def __init__(self):
with open(Path(cwd, "data.json"), "r", encoding="utf-8") as f:
data = json.load(f)
self.db = data["ssub"]
self.db_ccmp = data["ccmp"]
self.db_simplify = data["simp"]["general"]
self.db_fina = data["fina"]
self.db_liga = data["liga"]
self.vocalized = [
"Hebr",
"Hebr-Ar",
"Syrj",
"Syrn",
"Arab-Fa",
"Arab-Pa",
"Latn",
"Latn-No",
"Type",
"Arab",
"Arab-Ur",
"Thaa",
]
def auto_script(self, text):
sc_count = Counter([ucd.script(c) for c in text])
sc = sc_count.most_common(1)[0][0]
if not sc:
sc = "Zyyy"
return sc
def _tr(self, text, sc, to_sc):
t = text
if sc != "Latn":
t = self._preprocess(t, sc, to_sc)
t = self._convert(t, sc, to_sc)
t = self._postprocess(t, to_sc)
return t
def _preprocess(self, text, sc, to_sc):
t = text
for rule_i, rule_o in self.db_ccmp.get(sc, {}).items():
t = t.replace(rule_i, rule_o)
if sc not in self.vocalized or to_sc not in self.vocalized:
t = ucd.normalize("NFD", t)
t = re.sub(r"(?![\u05C4\u0308])\p{M}", "", t)
logging.debug(f"Pre: {list(t)}")
return t
def _postprocess(self, text, sc):
t = text
for rule_i, rule_o in self.db_fina.get(sc, {}).items():
t = re.subf(
rf"(\p{{L}})(\p{{M}}*?)({rule_i})(\p{{M}}*?)([^\p{{L}}])",
f"{{1}}{{2}}{rule_o}{{4}}{{5}}",
t,
)
t = re.subf(
rf"(\p{{L}})(\p{{M}}*?)({rule_i})(\p{{M}}*?)$",
f"{{1}}{{2}}{rule_o}{{4}}",
t,
)
t = re.subf(rf"({rule_o})(\p{{M}}+)(\p{{L}})", f"{rule_i}{{2}}{{3}}", t)
for rule_i, rule_o in self.db_liga.get(sc, {}).items():
t = t.replace(rule_i, rule_o)
logging.debug(f"Post: {list(t)}")
return t
def _to_latin(self, text, sc, to_sc):
chars = list(self.db[sc]["Latn"])
chars.sort(key=len, reverse=True)
for char in chars:
text = text.replace(char, self.db[sc]["Latn"][char])
return text
def _from_latin(self, text, sc, to_sc):
chars = list(self.db["Latn"][to_sc])
chars.sort(key=len, reverse=True)
chars_missing = [x for x in self.db_simplify if x not in chars]
for char in chars_missing:
if char in self.db_simplify:
if "\u033D" in chars_missing and "\u033D" in self.db_simplify[char]:
text = text.replace(char, self.db_simplify[char])
text = text.replace("\u033D", self.db_simplify["\u033D"])
else:
text = text.replace(char, self.db_simplify[char])
for char in chars:
text = text.replace(char, self.db["Latn"][to_sc][char])
return text
def _convert(self, text, sc, to_sc):
if to_sc == "Latn":
return self._to_latin(text, sc, to_sc)
elif sc == "Latn":
return self._from_latin(text, sc, to_sc)
else:
txt_latn = self._to_latin(text, sc, to_sc)
return self._from_latin(txt_latn, sc, to_sc)
def tr(self, text, sc=None, to_sc="Latn"):
if not sc:
sc = self.auto_script(text)
logging.debug(
{
"script": sc,
"to_script": to_sc,
}
)
logging.debug(f"Text: {list(text)}")
if sc != to_sc:
res = self._tr(text, sc, to_sc)
else:
res = text
return res
def tr(text, sc=None, to_sc="Latn"):
tr = Transliterator()
if sc != to_sc:
return tr.tr(text, sc, to_sc)
else:
return text | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/trans.py | trans.py |
from ast import Str
from asyncio import constants
import Map as GM
import re
import string
import post_processing
import fixer as CF
from East import PhagsPa, Burmese
from Core import Tamil, Malayalam, Limbu, Chakma
def BengaliSubojinedVa(Strng):
Strng = re.sub("(?<![মব])(্ব)", "্ভ়", Strng)
return Strng
def IASTLOCBurmeseSource(Strng):
chars_misc = {"e*": "၏", "n*": "၌", "r*": "၍", "l*": "၎"}
for lat, bur in chars_misc.items():
Strng = Strng.replace(lat, bur)
Strng = Strng.replace("ṁ", "ṃ")
Strng = Strng.replace(",", "၊").replace(".", "။")
Strng = Strng.replace("˝", "ʺ").replace("ʺ", "ḥ")
Strng = Strng.replace("´", "ʹ").replace("ʹ", "˳")
vowelSigns = "|".join(GM.CrunchSymbols(GM.VowelSignsNV, "IAST"))
Strng = re.sub("(ʼ)(a|" + vowelSigns + ")", "’" + r"\2", Strng)
consonants = "|".join(GM.CrunchSymbols(GM.Consonants, "IAST"))
Strng = re.sub("(" + consonants + ")(‘)", r"\1" + "ʻ", Strng)
Strng = Strng.replace("o‘", "oʻ")
Strng = Strng.replace("oʻ", "au")
return Strng
def segmentBurmeseSyllables(Strng):
myConsonant = r"က-အ"
otherChar = r"ဣဤဥဦဧဩဪဿ၌၍၏၀-၉၊။!-/:-@[-`{-~\s"
ssSymbol = r"္"
aThat = r"်"
BreakPattern = re.compile(
r"((?<!"
+ ssSymbol
+ r")["
+ myConsonant
+ r"](?!["
+ aThat
+ ssSymbol
+ r"])"
+ r"|["
+ otherChar
+ r"])",
re.UNICODE,
)
Strng = Strng.replace("့်", "့်")
Strng = BreakPattern.sub(" " + r"\1", Strng)
return Strng
def IASTLOCBurmeseTarget(Strng):
Strng = Strng.replace("\u103A\u1039", "\u1039")
Strng = Strng.replace("\u102D\u102F", "\u102F\u102D")
Strng = Strng.replace("ဪ", "ဩʻ")
yrvh = (
Burmese.ConsonantMap[25:27]
+ Burmese.ConsonantMap[28:29]
+ Burmese.ConsonantMap[32:33]
)
yrvhsub = ["\u103B", "\u103C", "\u103D", "\u103E"]
vir = Burmese.ViramaMap[0]
for x, y in zip(yrvh, yrvhsub):
Strng = Strng.replace(y, vir + vir + x)
Strng = Strng.replace("့်", "့်")
aThat = r"်"
Strng = Strng.replace(aThat, aThat + "ʻ")
Strng = Strng.replace("အ", "’အ")
vowDep = "အော် အော အိ အီ အု အူ အေ".split(" ")
vowIndep = "ဪ ဩ ဣ ဤ ဥ ဦ ဧ".split(" ")
for x, y in zip(vowDep, vowIndep):
Strng = Strng.replace(x, y)
Strng = Strng.replace("၊", ",").replace("။", ".")
return Strng
def insertViramaSyriac(Strng):
Strng += "\uF001"
return Strng
def BengaliSwitchYaYYa(Strng):
Strng = re.sub("(?<!\u09CD)য", "@#$", Strng)
Strng = re.sub("য়", "য", Strng)
Strng = Strng.replace("@#$", "য়")
return Strng
def removeFinalSchwaArab(Strng):
diacrtics = ["\u0652", "\u064E", "\u0650", "\u064F"]
Strng = re.sub(
"([\u0628-\u0647])(?![\u0652\u064E\u0650\u064F\u0651\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
Strng = re.sub(
"([\u0628-\u0647]\u0651)(?![\u0652\u064E\u0650\u064F\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
Strng = re.sub(
"(?<!\u0650)([\u064A])(?![\u0651\u0652\u064E\u0650\u064F\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
Strng = re.sub(
"(?<!\u0650)([\u064A]\u0651)(?![\u0652\u064E\u0650\u064F\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
Strng = re.sub(
"(?<!\u064F)([\u0648])(?![\u0651\u0652\u064E\u0650\u064F\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
Strng = re.sub(
"(?<!\u064F)([\u0648]\u0651)(?![\u0652\u064E\u0650\u064F\u064B\u064C\u064D\u0649])(?=(\W|$))",
r"\1" + "\u0652",
Strng,
)
return Strng
def AlephMaterLectionis(Strng, target="semitic"):
Strng += "\u05CD"
return Strng
def FixSemiticRoman(Strng, Source):
vir = "\u033D"
if "\u05CD" in Strng:
Strng = post_processing.AlephMaterLectionis(Strng)
if "\u05CC" in Strng:
Strng = post_processing.removeSemiticLetters(Strng)
AyinAlephInitial = [
("ʾa", "ʾ"),
("ʾā", "ā̂"),
("ʾi", "î"),
("ʾī", "ī̂"),
("ʾu", "û"),
("ʾū", "ū̂"),
("ʾe", "ê"),
("ʾē", "ē̂"),
("ʾo", "ô"),
("ʾō", "ō̂"),
]
for x, y in AyinAlephInitial:
Strng = Strng.replace(x, y)
if Source == "Arab":
Strng = post_processing.arabizeLatn(Strng, target="indic")
elif Source == "Arab-Ur" or Source == "Arab-Pa" or Source == "Arab-Fa":
Strng = post_processing.urduizeLatn(Strng, target="indic")
elif Source == "Syrn":
Strng = post_processing.syricizeLatn(Strng, target="indic")
elif Source == "Syrj" or Source == "Hebr":
Strng = post_processing.hebraizeLatn(Strng, target="indic")
Strng = Strng.replace("\u032A", "").replace("\u032E", "")
Strng = re.sub("([ʰ])(꞉)", r"\2\1", Strng)
Strng = re.sub("([aiuāīū])(꞉)", r"\2\1", Strng)
Strng = re.sub("(.)(꞉)", r"\1" + vir + r"\1", Strng)
Strng = Strng.replace("ʿ" + vir, "ʿ" + vir + "\u200B")
cons_prev = "|".join(GM.SemiticConsonants)
if "Syr" in Source:
consSyrc = "|".join(
[
"ʾ",
"b",
"v",
"g",
"ġ",
"d",
"ḏ",
"h",
"w",
"z",
"ḥ",
"ṭ",
"y",
"k",
"ḫ",
"l",
"m",
"n",
"s",
"ʿ",
"p",
"f",
"ṣ",
"q",
"r",
"š",
"t",
"ṯ",
"č",
"ž",
"j",
]
)
vowelSyrc = ["a", "ā", "e", "ē", "ū", "ō", "ī", "â", "ā̂", "ê", "ē̂"]
vowelsDepSyrc = "|".join(["a", "ā", "e", "ē", "u", "i", "o"])
vowelsInDepSyrc1 = ["i", "u", "o"]
vowelsInDepSyrc2 = ["ī̂", "û", "ô"]
if any([vow in Strng for vow in vowelSyrc]):
Strng = Strng.replace("ī", "i").replace("ū", "u").replace("ō", "o")
for vow1, vow2 in zip(vowelsInDepSyrc1, vowelsInDepSyrc2):
Strng = re.sub("(?<!\w)" + vow1, vow2, Strng)
Strng = Strng.replace("̂̂", "̂").replace("ô̂", "ô")
if "\uF001" in Strng:
Strng = re.sub(
"(" + consSyrc + ")" + "(?!" + vowelsDepSyrc + ")",
r"\1" + vir,
Strng,
)
Strng = re.sub("(?<=" + cons_prev + ")" + "a(?!\u0304)", "", Strng)
Strng = Strng.replace("\uF001", "")
if (
"Arab" in Source
or Source == "Latn"
or Source == "Hebr"
or Source == "Thaa"
or Source == "Type"
):
basic_vowels = (
"("
+ "|".join(["a", "ā", "i", "ī", "u", "ū", "ē", "ō", "e", "o", "#", vir])
+ ")"
)
Strng = re.sub("(ŵ)(?=" + basic_vowels + ")", "w", Strng)
Strng = re.sub("(ŷ)(?=" + basic_vowels + ")", "y", Strng)
Strng = re.sub("(?<=" + cons_prev + ")" + "a(?!(ŵ|ŷ|\u0304|\u032E))", "", Strng)
Strng = re.sub("(?<=ḧ)" + "a(?!(ŵ|ŷ|\u0304|\u032E))", "", Strng)
if "Arab" in Source:
simp_vow = "a ā i ī u ū".split(" ")
init_vow = "â ā̂ î ī̂ û ū̂".split(" ")
for x, y in zip(simp_vow, init_vow):
Strng = re.sub("ʔ" + x, y, Strng)
if "\u05CC" in Strng:
Strng = Strng.replace("ʔ", "")
SemiticIndic = [
("ṣ", "sQ"),
("ʿ", "ʾQ"),
("ṭ", "tQ"),
("ḥ", "hQ"),
("ḍ", "dQ"),
("p̣", "pQ"),
("ž", "šQ"),
("ẓ", "jʰQ"),
("ḏ", "dʰQ"),
("ṯ", "tʰQ"),
("w", "vQ"),
("ḵ", "k"),
("\u032A", ""),
("\u032E", ""),
("a̮", "ā"),
("\u0308", ""),
("ĕ\u0302", "ê"),
("ă\u0302", "â"),
("ŏ\u0302", "ô"),
("ĕ", "e"),
("ă", ""),
("ŏ", "o"),
("ḵ", "k"),
("ʾQā", "ā̂Q"),
("ʾQi", "îQ"),
("ʾQī", "ī̂Q"),
("ʾQu", "ûQ"),
("ʾQū", "ū̂Q"),
("ʾQe", "êQ"),
("ʾQē", "ē̂Q"),
("ʾQo", "ôQ"),
("ʾQō", "ō̂Q"),
("ⁿ", "n\u033D"),
("ʾā", "ā̂"),
]
for s, i in SemiticIndic:
Strng = Strng.replace(s, i)
if "Arab" in Source:
Strng = re.sub("(\u033D)([iuā])", r"\2", Strng)
Strng = re.sub("(\u033D)([a])", "", Strng)
Strng = Strng.replace("ʾ", "â")
return Strng
def perisanizeArab(Strng):
arabKafYe = "ك ي".split(" ")
persKafYe = "ک ی".split(" ")
for x, y in zip(arabKafYe, persKafYe):
Strng = Strng.replace(x, y)
return Strng
def ArabizePersian(Strng):
arabKafYe = "ك ي".split(" ")
persKafYe = "ک ی".split(" ")
for x, y in zip(arabKafYe, persKafYe):
Strng = Strng.replace(y, x)
return Strng
def semiticizeUrdu(Strng):
urduSpecific = "ے ڈ ٹ ہ".split(" ")
semitic = "ي د ت ه".split(" ")
for x, y in zip(urduSpecific, semitic):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ھ", "")
return Strng
def ShowChillus(Strng):
return post_processing.MalayalamChillu(Strng, True, True)
def ShowKhandaTa(Strng):
print(Strng)
Strng = Strng.replace("ৎ", "ত্ˍ")
print(Strng)
return Strng
def eiaudipthongs(Strng):
return Strng
def wasvnukta(Strng):
return Strng
def default(Strng):
return Strng
def SogdReshAyin(Strng):
Strng = Strng.replace("𐽀", "[\uEA01-\uEA02]")
return Strng
def SogoReshAyinDaleth(Strng):
Strng = Strng.replace("𐼘", "[\uEA01-\uEA02-\uEA06]")
return Strng
def PhlpMemQoph(Strng):
Strng = Strng.replace("𐮋", "[\uEA03-\uEA04]")
return Strng
def PhlpWawAyinResh(Strng):
Strng = Strng.replace("𐮅", "[\uEA05-\uEA02-\uEA01]")
return Strng
def PhliWawAyinResh(Strng):
Strng = Strng.replace("𐭥", "[\uEA05-\uEA02-\uEA01]")
return Strng
def HatrDalethResh(Strng):
Strng = Strng.replace("𐣣", "[\uEA06-\uEA01]")
return Strng
def MalayalamHalfu(Strng):
consu = "[കചടതറനണരലവഴളറ]"
vir = GM.CrunchSymbols(GM.VowelSigns, "Malayalam")[0]
consAll = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Malayalam")) + ")"
Strng = re.sub(
"(?<=" + consu + ")" + "(" + vir + ")" + "(?!" + consAll + ")",
r"\2" + "ു്",
Strng,
)
return Strng
def MalayalamTranscribe(Strng):
Strng = MalayalamHalfu(Strng)
script = "Malayalam"
ListC = GM.CrunchList("ConsonantMap", script)
ListSC = GM.CrunchList("SouthConsonantMap", script)
vir = GM.CrunchSymbols(GM.VowelSigns, script)[0]
ConUnVoiced = [ListC[x] for x in [0, 5, 10, 15, 20]]
ConVoicedJ = [ListC[x] for x in [2, 7, 12, 17, 22]]
ConVoicedS = [ListC[x] for x in [2, 5, 12, 17, 22]]
ConNasalsAll = "|".join([ListC[x] for x in [4, 9, 14, 19, 24]])
conNasalCa = "|".join([ListC[x] for x in [9]])
ConNasalsGroup = [
ConNasalsAll,
conNasalCa,
ConNasalsAll,
ConNasalsAll,
ConNasalsAll,
]
ConMedials = "|".join(ListC[25:28] + ListSC[0:2] + ListSC[3:4])
Vowels = "|".join(GM.CrunchSymbols(GM.Vowels + GM.VowelSignsNV, script))
Aytham = GM.CrunchList("Aytham", script)[0]
Consonants = "|".join(GM.CrunchSymbols(GM.Consonants, script))
NRA = ListSC[3] + vir + ListSC[2]
NDRA = ListC[14] + vir + ListC[12] + vir + ListC[26]
for i in range(len(ConUnVoiced)):
pass
Strng = re.sub(
"("
+ Vowels
+ "|"
+ Consonants
+ "|"
+ Aytham
+ ")"
+ ConUnVoiced[i]
+ "(?!"
+ vir
+ ")",
r"\1" + ConVoicedS[i],
Strng,
)
Strng = re.sub(
"(" + ConVoicedS[i] + ")" + ConUnVoiced[i] + "(?!" + vir + ")",
r"\1" + ConVoicedS[i],
Strng,
)
Strng = re.sub(
"(" + ConNasalsGroup[i] + ")" + "(" + vir + ")" + ConUnVoiced[i],
r"\1\2" + ConVoicedJ[i],
Strng,
)
Strng = re.sub(
"("
+ ConMedials
+ ")"
+ "("
+ vir
+ ")"
+ ConUnVoiced[i]
+ "(?!"
+ vir
+ ")",
r"\1\2" + ConVoicedS[i],
Strng,
)
Strng = Strng.replace("റ്റ", "ട്ട").replace("ന്റ", "ണ്ഡ")
return Strng
def retainLatin(Strng, reverse=False):
latn_basic_lower = (
"a b c d e f g h i j k l m n o p q r s t u v w x y z ḥ ṭ ṣ ʾ ʿ š ā ī ū ē ō"
)
latn_basic_upper = latn_basic_lower.upper()
latn_all = latn_basic_lower + latn_basic_upper
latn_all = latn_all.split(" ")
if not reverse:
for i, c in enumerate(latn_all):
Strng = Strng.replace(c, chr(60929 + i))
Strng = (
Strng.replace("\uEA01", "r")
.replace("\uEA02", "ʿ")
.replace("\uEA03", "m")
.replace("\uEA04", "q")
.replace("\uEA05", "w")
.replace("\uEA06", "d")
)
else:
for i, c in enumerate(latn_all):
Strng = Strng.replace(chr(60929 + i), c)
return Strng
def JapanesePreProcess(src, txt, preoptions):
import pykakasi
import convert
if src == "Hiragana" or src == "Katakana":
kks = pykakasi.kakasi()
txt = convert.convertScript(txt.lower(), "ISO", "Devanagari")
cv = kks.convert(txt)
txt = ""
for item in cv:
txt = txt + " " + item["hepburn"]
if "eiaudipthongs" in preoptions:
txt = txt.replace("ou", "o\u02BDu").replace("ei", "e\u02BDi")
txt = re.sub("(r)([aiueo])(\u309A\u309A)", "l" + r"\2\2", txt)
txt = re.sub("(r)([aāiīuūeēoō])(\u309A)", "l" + r"\2", txt)
txt = re.sub("(k)([aiueo])(\u309A\u309A)", "ṅ" + r"\2\2", txt)
txt = re.sub("(k)([aāiīuūeēoō])(\u309A)", "ṅ" + r"\2", txt)
txt = (
txt.replace("aa", "ā")
.replace("ii", "ī")
.replace("ee", "ē")
.replace("oo", "ō")
.replace("uu", "ū")
)
txt = (
txt.replace("a-", "ā")
.replace("i-", "ī")
.replace("e-", "ē")
.replace("o-", "ō")
.replace("u-", "ū")
)
txt = (
txt.replace("n'", "n_")
.replace("ch", "c")
.replace("sh", "ṣ")
.replace("sṣ", "ṣṣ")
.replace("ai", "a_i")
.replace("au", "a_u")
)
txt = txt.replace("w", "v")
txt = txt.replace("ou", "ō").replace("ei", "ē")
txt = txt.replace("、", ",").replace("。", ".")
txt = (
txt.replace("ng", "ṅg")
.replace("nk", "ṅk")
.replace("nk", "ṅk")
.replace("np", "mp")
.replace("nb", "mb")
.replace("nm", "mm")
)
if "wasvnukta" in preoptions:
txt = txt.replace("v", "v̈")
txt = txt.replace("、", ",").replace("。", ".")
return txt
def holamlong(Strng):
Strng = Strng.replace("ֹּ", "ֹּ")
Strng = re.sub("(?<!ו)ֹ", "וֹ", Strng)
return Strng
def novowelshebrewIndic(Strng):
Strng = novowelshebrewSemitic(Strng)
finals = ["ך", "ם", "ן", "ף", "ץ", "ףּ", "ךּ"]
otherCons = "ב,ח,ע,צ,ש,ת".split(",")
consonantsAll = (
"("
+ "|".join(
GM.CrunchSymbols(GM.Consonants, "Hebrew")
+ finals
+ otherCons
+ ["׳", "י", "ו", "א"]
)
+ ")"
)
vowelsignsADShinG = (
"("
+ "|".join(GM.CrunchSymbols(GM.VowelSigns, "Hebrew") + ["ַ", "ּ", "ׁ", "׳"])
+ ")"
)
Strng = re.sub(
consonantsAll + "(?!" + vowelsignsADShinG + ")", r"\1" + "ַ" + r"\2", Strng
)
return Strng
def novowelshebrewSemitic(Strng):
Strng = Strng.replace("כ", "כּ").replace("פ", "פּ").replace("ב", "בּ")
Strng = Strng.replace("ך", "ךּ").replace("ף", "ףּ")
return Strng
def shvanakhall(Strng):
Strng = Strng + " \u0BDE"
return Strng
def longEOISO(Strng):
Strng = Strng.replace("e", "ē").replace("o", "ō")
return Strng
def SanskritLexicaizeHK(Strng):
return Strng
def ThaiPhonetic(Strng):
Strng = Strng.replace("ด", "ท")
Strng = Strng.replace("บ", "พ")
Strng = Strng.replace("ก\u0325", "ค")
Strng = Strng.replace("จ\u0325", "ช")
Strng = Strng.replace("งํ", "ง")
Strng = Strng.replace("\u035C", "")
Strng = Strng.replace("\u0E47", "")
Strng += "\u02BB\u02BB"
return Strng
def LaoPhonetic(Strng):
Strng = Strng.replace("ດ", "ທ")
Strng = Strng.replace("ບ", "ພ")
Strng = Strng.replace("ງໍ", "ງ")
Strng = Strng.replace("\u035C", "")
Strng += "\u02BB\u02BB"
return Strng
def SaurastraHaaruColonTamil(Strng):
Strng = Strng.replace("ன", "ந")
ListVS = "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil"))
Strng = re.sub("(" + ListVS + ")" + "(:)", r"\2\1", Strng)
chars = "([நமரல])"
Strng = re.sub(chars + ":", r"\1" + "\uA8B4", Strng)
return Strng
def ChakmaPali(Strng):
Strng = Strng.replace("\U00011147", "𑄤")
Strng = Strng.replace("𑄠", "𑄡")
listC = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.Consonants, "Chakma") + Chakma.VowelMap[:1],
key=len,
reverse=True,
)
)
+ ")"
)
listV = (
"("
+ "|".join(
sorted(
GM.CrunchSymbols(GM.VowelSigns, "Chakma")
+ Chakma.ViramaMap
+ ["\U00011133"],
key=len,
reverse=True,
)
)
+ ")"
)
Strng = Strng.replace("\u02BD", "")
Strng = Strng.replace("\U00011102", "\U00011127")
Strng = re.sub("(" + listC + ")" + "(?!" + listV + ")", r"\1" "\u02BE", Strng)
Strng = Strng.replace("\U00011127", "")
Strng = Strng.replace("\u02BE", "\U00011127")
return Strng
def TakriArchaicKha(Strng):
return Strng.replace("𑚋", "𑚸")
def UrduShortNotShown(Strng):
Strng += "\u02BB\u02BB"
return Strng
def AnuChandraEqDeva(Strng):
return AnuChandraEq(Strng, "Devanagari")
def AnuChandraEq(Strng, script):
Chandrabindu = GM.CrunchList("AyogavahaMap", script)[0]
Anusvara = GM.CrunchList("AyogavahaMap", script)[1]
Strng = Strng.replace(Chandrabindu, Anusvara)
return Strng
def TamilNumeralSub(Strng):
ListC = "(" + "[கசடதபஜஸ]" + ")"
ListV = "(" + "|".join(GM.CrunchSymbols(GM.VowelSigns, "Tamil")) + ")"
Strng = re.sub(ListC + ListV + "2", r"\1\2" + "²", Strng)
Strng = re.sub(ListC + ListV + "3", r"\1\2" + "³", Strng)
Strng = re.sub(ListC + ListV + "4", r"\1\2" + "⁴", Strng)
Strng = re.sub(ListC + "2", r"\1" + "²", Strng)
Strng = re.sub(ListC + "3", r"\1" + "³", Strng)
Strng = re.sub(ListC + "4", r"\1" + "⁴", Strng)
Strng = Strng.replace("ரு'", "ருʼ")
Strng = Strng.replace("ரு’", "ருʼ")
Strng = Strng.replace("ம்'", "ம்ʼ")
Strng = Strng.replace("ம்’", "ம்ʼ")
return Strng
def swapEe(Strng):
Strng = Strng.replace("E", "X@X@")
Strng = Strng.replace("e", "E")
Strng = Strng.replace("X@X@", "e")
Strng = Strng.replace("O", "X@X@")
Strng = Strng.replace("o", "O")
Strng = Strng.replace("X@X@", "o")
return Strng
def swapEeItrans(Strng):
Strng = Strng.replace("^e", "X@X@")
Strng = Strng.replace("e", "^e")
Strng = Strng.replace("X@X@", "e")
Strng = Strng.replace("^o", "X@X@")
Strng = Strng.replace("o", "^o")
Strng = Strng.replace("X@X@", "o")
return Strng
def egrantamil(Strng):
return Strng
def siddhammukta(Strng):
return Strng
def TaiKuen(Strng):
return Strng
def TaiThamLao(Strng):
return Strng
def ThaiSajjhayaOrthography(Strng):
Script = "Thai"
Strng = Strng.replace("ัง", "ังฺ")
Strng = Strng.replace("์", "ฺ")
Strng = Strng.replace("๎", "ฺ")
Strng = Strng.replace("ั", "")
return Strng
def ThaiSajjhayawithA(Strng):
Strng = Strng.replace("ะ", "")
Strng = ThaiSajjhayaOrthography(Strng)
return Strng
def LaoSajhayaOrthography(Strng):
Strng = Strng.replace("ັງ", "ັງ຺")
Strng = re.sub("([ເໂໄ])(.๎)([ຍຣລວຨຩສຫຬ])", r"\2\1\3", Strng)
Strng = Strng.replace("໌", "຺")
Strng = Strng.replace("๎", "຺")
Strng = Strng.replace("ັ", "")
return Strng
def LaoSajhayaOrthographywithA(Strng):
Strng = Strng.replace("ະ", "")
Strng = LaoSajhayaOrthography(Strng)
return Strng
def RemoveSchwaHindi(Strng, showschwa=False):
VowI = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, "Devanagari")) + ")"
VowS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, "Devanagari")) + ")"
Cons = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, "Devanagari")) + ")"
Char = "(" + "|".join(GM.CrunchSymbols(GM.Characters, "Devanagari")) + ")"
Nas = "([ंःँ]?)"
ISyl = "((" + VowI + "|" + "(" + Cons + VowS + "?" + "))" + Nas + ")"
Syl = "((" + Cons + VowS + ")" + Nas + ")"
SylAny = "((" + Cons + VowS + "?" + ")" + Nas + ")"
if not showschwa:
vir = "्"
vir2 = "्"
else:
vir = "\u0954"
vir2 = "\u0954"
Strng = re.sub(
ISyl + Cons + Cons + SylAny + "(?!" + Char + ")",
r"\1\8" + vir + r"\9\10",
Strng,
)
Strng = re.sub(
ISyl + Cons + Syl + SylAny + "(?!" + Char + ")", r"\1\8" + vir + r"\9\15", Strng
)
Strng = re.sub(ISyl + Cons + Syl + "(?!" + Char + ")", r"\1\8" + vir + r"\9", Strng)
Strng = re.sub(ISyl + Cons + "(?!" + Char + ")", r"\1\8" + vir, Strng)
Cons_sss = "((" + Cons + vir + ")" + "([शषस]))"
Strng = re.sub(ISyl + Cons_sss + "(?!" + Char + ")", r"\1\8" + vir, Strng)
Target = "Devanagari"
ConUnAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [
0,
2,
5,
7,
10,
12,
15,
17,
20,
22,
4,
9,
14,
19,
24,
25,
26,
27,
28,
29,
30,
31,
32,
]
]
ConUnAsp = (
ConUnAsp
+ GM.CrunchList("SouthConsonantMap", Target)
+ GM.CrunchList("NuktaConsonantMap", Target)
)
ConAsp = [
GM.CrunchList("ConsonantMap", Target)[x]
for x in [1, 3, 6, 8, 11, 13, 16, 18, 21, 23]
]
Strng = re.sub(
ISyl
+ "("
+ "|".join(ConUnAsp)
+ ")"
+ "("
+ vir
+ ")("
+ r"\8"
+ ")(?!"
+ Char
+ ")",
r"\1\8\9\10" + vir,
Strng,
)
for i in range(len(ConAsp)):
Strng = re.sub(
ISyl
+ "("
+ ConUnAsp[i]
+ ")"
+ "("
+ vir
+ ")"
+ "("
+ ConAsp[i]
+ ")"
+ '(?!" + Char + ")',
r"\1\8\9\10" + vir,
Strng,
)
cons_pyramid = ["[यरलव]", "[नमण]", "[शषस]", "[कखपफगघबभ]", "[टठतथडढदध]", "[चछजझज़]"]
for c1, cons1 in enumerate(cons_pyramid):
for c2, cons2 in enumerate(cons_pyramid):
if c1 < c2:
Cons_pyr = "((" + cons1 + vir + ")" + "(" + cons2 + "))"
Strng = re.sub(
ISyl + Cons_pyr + "(?!" + Char + ")", r"\1\8" + vir, Strng
)
Strng = Strng.replace(vir, vir2)
return Strng
def RemoveFinal(Strng, Target):
if Target == "Bengali":
Strng = post_processing.KhandaTa(Strng, Target, True)
VowI = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, Target)) + ")"
VowS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) + ")"
Cons = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, Target)) + ")"
Char = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")"
Nas = "([" + "|".join(GM.CrunchList("AyogavahaMap", Target)) + "]?)"
ISyl = "((" + VowI + "|" + "(" + Cons + VowS + "?" + ")" + Nas + "))"
Syl = "((" + Cons + VowS + ")" + Nas + ")"
SylAny = "((" + Cons + VowS + "?" + ")" + Nas + ")"
vir = GM.CrunchList("ViramaMap", Target)[0]
if Target != "Bengali":
Cons2 = "((" + Cons + vir + ")?" + Cons + ")"
else:
Cons2 = "(()?" + Cons + ")"
Strng = re.sub(ISyl + Cons2 + "(?!" + Char + ")", r"\1\8" + vir, Strng)
Strng = re.sub(ISyl + Cons2 + "(?!" + Char + ")", r"\1\8" + vir, Strng)
return Strng
def SchwaFinalGurmukhi(Strng):
Strng = RemoveFinal(Strng, "Gurmukhi")
return Strng
def SchwaFinalGujarati(Strng):
Strng = RemoveFinal(Strng, "Gujarati")
return Strng
def SchwaFinalBengali(Strng):
Strng = RemoveFinal(Strng, "Bengali")
return Strng
def SchwaFinalWarangCiti(Strng):
Target = "WarangCiti"
VowI = "(" + "|".join(GM.CrunchSymbols(GM.Vowels, Target)) + ")"
VowS = "(" + "|".join(GM.CrunchSymbols(GM.VowelSignsNV, Target)) + ")"
Cons = "(" + "|".join(GM.CrunchSymbols(GM.Consonants, Target)) + ")"
Char = "(" + "|".join(GM.CrunchSymbols(GM.Characters, Target)) + ")"
Nas = "([" + "|".join(GM.CrunchList("AyogavahaMap", Target)) + "]?)"
ISyl = "((" + VowI + "|" + "(" + Cons + VowS + "?" + ")" + Nas + "))"
Syl = "((" + Cons + VowS + ")" + Nas + ")"
SylAny = "((" + Cons + VowS + "?" + ")" + Nas + ")"
vir = "\u02BB"
Cons2 = "((" + Cons + vir + ")?" + Cons + ")"
Strng = re.sub(ISyl + Cons2 + "(?!" + Char + ")", r"\1\8" + vir, Strng)
return Strng
def siddhamUnicode(Strng):
return Strng
def ThaiOrthography(Strng):
Strng += "\u02BB\u02BB"
return Strng
def LaoTranscription(Strng):
Strng += "\u02BB\u02BB"
return Strng
def LimbuDevanagariConvention(Strng):
Strng = Strng.replace("ए़", "ऎ")
Strng = Strng.replace("ओ़", "ऒ")
Strng = Strng.replace("े़", "ॆ")
Strng = Strng.replace("ो़", "ॊ")
Strng = Strng.replace("ः", "꞉")
return Strng
def LimbuSpellingSaI(Strng):
vir = Limbu.ViramaMap[0]
FCons = [
x + vir for x in [Limbu.ConsonantMap[x] for x in [0, 4, 15, 19, 20, 24, 26, 27]]
]
FinalCons = [
"\u1930",
"\u1931",
"\u1933",
"\u1934",
"\u1935",
"\u1936",
"\u1937",
"\u1938",
]
for x, y in zip(FCons, FinalCons):
Strng = Strng.replace(x, "\u193A" + y)
return Strng
def removeChillus(Strng):
Chillus = ["\u0D7A", "\u0D7B", "\u0D7C", "\u0D7D", "\u0D7E"]
vir = Malayalam.ViramaMap[0]
ConVir = [
Malayalam.ConsonantMap[14] + vir,
Malayalam.ConsonantMap[19] + vir,
Malayalam.ConsonantMap[26] + vir,
Malayalam.ConsonantMap[27] + vir,
Malayalam.SouthConsonantMap[0] + vir,
]
for x, y in zip(Chillus, ConVir):
Strng = Strng.replace(x, y)
return Strng
def SinhalaPali(Strng):
Strng = post_processing.SinhalaPali(Strng, reverse=True)
return Strng
def IASTPali(Strng):
Strng = Strng.replace("ḷ", "l̤")
return Strng
def CyrillicPali(Strng):
Strng = Strng.replace(
"л̣",
"л̤",
)
return Strng
def MalayalamPrakrit(Strng):
Strng = post_processing.ReverseGeminationSign(Strng, "Malayalam")
Strng = Strng.replace("ഀ", "ം")
return Strng
def GranthaPrakrit(Strng):
Strng = post_processing.ReverseGeminationSign(Strng, "Grantha")
Strng = Strng.replace("𑌀", "𑌂")
return Strng
def RomanPreFix(Strng, Source):
DepV = "\u1E7F"
Asp = "\u02B0"
Vir = GM.CrunchList("ViramaMap", Source)[0]
Nuk = GM.CrunchList("NuktaMap", Source)[0]
VowelA = GM.CrunchSymbols(["VowelMap"], Source)[0]
ListV = "|".join(GM.CrunchSymbols(GM.VowelSigns, Source))
ListC = "|".join(GM.CrunchSymbols(GM.Consonants, Source))
Strng = re.sub(
"(" + ListC + ")" + "(?!" + ListV + "|" + VowelA + ")",
r"\1" + DepV + Vir,
Strng,
)
Strng = re.sub(
"(" + ListC + "|" + Nuk + ")" + "(" + ListV + ")", r"\1" + DepV + r"\2", Strng
)
Strng = re.sub("(?<=" + ListC + ")" + "(" + VowelA + ")", r"", Strng)
Strng = Strng.replace(DepV + Vir + Nuk + VowelA, Nuk)
Strng = re.sub(DepV + Vir + Nuk + "(?=[" + DepV + "])", Nuk, Strng)
Strng = Strng.replace(DepV + Vir + Nuk, Nuk + DepV + Vir)
return Strng
def joinVowelCons(Strng, script):
consonantsAll = (
"("
+ "|".join(
sorted(GM.CrunchSymbols(GM.Consonants, script), key=len, reverse=True)
)
+ ")"
)
vowelsAll = (
"("
+ "|".join(sorted(GM.CrunchSymbols(GM.Vowels, script), key=len, reverse=True))
+ ")"
)
Strng = re.sub(consonantsAll + " " + vowelsAll, r"\1\2", Strng)
Strng = re.sub(consonantsAll + " " + consonantsAll, r"\1\2", Strng)
return Strng
def joinVowelConsIAST(Strng):
return joinVowelCons(Strng, "IAST")
def joinVowelConsISO(Strng):
return joinVowelCons(Strng, "ISO")
def PreProcess(Strng, Source, Target):
if Source in GM.RomanDiacritic or Source == "Latn":
Strng = Strng.lower()
pipeScripts = ["HK", "IASTPali", "IAST", "ISO"]
if Source in pipeScripts:
Strng = Strng.replace("|", ".").replace("||", "..")
if "Arab" in Source:
Strng = re.sub(
"([وي])(?=[\u064E\u0650\u064F\u0651\u064B\u064C\u064D])",
"\u02DE" + r"\1",
Strng,
)
if Source in ["Syrj", "Syrn"]:
Strng = Strng.replace("\u0323", "\u0742")
if Source == "Itrans":
sOm = "OM"
tOm = "oM"
punc = (
"("
+ "|".join(
["\u005C" + x for x in list(string.punctuation)]
+ ["\s"]
+ [
x.replace(".", "\.")
for x in GM.CrunchSymbols(GM.Signs, Source)[1:3]
]
)
+ ")"
)
Strng = re.sub(punc + sOm + punc, r"\1" + tOm + r"\2", Strng)
Strng = re.sub("^" + sOm + punc, tOm + r"\1", Strng)
Strng = re.sub(punc + sOm + "$", r"\1" + tOm, Strng)
Strng = re.sub("^" + sOm + "$", tOm, Strng)
punc = "(\s)"
Strng = re.sub(punc + sOm + punc, r"\1" + tOm + r"\2", Strng)
Strng = re.sub("^" + sOm + punc, tOm + r"\1", Strng)
Strng = re.sub(punc + sOm + "$", r"\1" + tOm, Strng)
Strng = re.sub("^" + sOm + "$", tOm, Strng)
AltForm = [
"O",
"aa",
"ii",
"uu",
"RRi",
"RRI",
"LLi",
"LLI",
"N^",
"JN",
"chh",
"shh",
"x",
"GY",
".n",
".m",
".h",
"AUM",
"E",
"J",
"c.o",
"c.e",
]
NormForm = [
"^o",
"A",
"I",
"U",
"R^i",
"R^I",
"L^i",
"L^I",
"~N",
"~n",
"Ch",
"Sh",
"kSh",
"j~n",
"M",
"M",
"",
"oM",
"^e",
"z",
"A.c",
"e.c",
]
for x, y in zip(AltForm, NormForm):
Strng = Strng.replace(x, y)
AltForms = [
("ee", "I"),
("dny", "j~n"),
("oo", "U"),
("kS", "kSh"),
("w", "v"),
("|", "."),
("kShh", "kSh"),
]
for x, y in AltForms:
Strng = Strng.replace(x, y)
Strng = Strng.replace("OM", "oM")
if Source == "BarahaNorth" or Source == "BarahaSouth":
Strng = Strng.replace("A", "aa")
Strng = Strng.replace("I", "ee")
Strng = Strng.replace("U", "oo")
Strng = Strng.replace("ou", "au")
Strng = Strng.replace("K", "kh")
Strng = Strng.replace("G", "gh")
Strng = Strng.replace("ch", "c")
Strng = Strng.replace("Ch", "C")
Strng = Strng.replace("J", "jh")
Strng = Strng.replace("w", "v")
Strng = Strng.replace("sh", "S")
Strng = Strng.replace("~h", "_h")
Strng = Strng.replace("^", "()")
Strng = Strng.replace("^^", "{}")
Strng = Strng.replace("tx", "rx")
Strng = Strng.replace("zh", "Lx")
Strng = Strng.replace("q", "\_")
Strng = Strng.replace("#", "\\'")
Strng = Strng.replace("$", '\\"')
if Source == "IAST":
Strng = Strng.replace("aï", "a_i")
Strng = Strng.replace("aü", "a_u")
Strng = Strng.replace("\u0303", "ṃ")
if Source == "ISO":
Strng = Strng.replace("a:i", "a_i")
Strng = Strng.replace("a:u", "a_u")
Strng = Strng.replace("\u0303", "ṁ")
if Source == "Titus":
Strng = Strng
if Source == "ISO" or Source == "IAST" or Source == "Titus" or "RussianCyrillic":
Strng = CF.VedicSvarasNonDiacritic(Strng)
if Source == "Latn" and "Syr" in Target:
Strng = (
Strng.replace("ḇ", "v")
.replace("ḡ", "ḡ")
.replace("ḵ", "ḫ")
.replace("p̄", "f")
)
if ("↓" in Strng or "↑" in Strng) and Target in GM.IndicScripts:
Strng = Strng.replace("↓", "॒")
Strng = Strng.replace("↑↑", "᳚")
Strng = Strng.replace("↑", "॑")
if ("↓" in Strng or "↑" in Strng) and Target in GM.LatinScripts:
Strng = Strng.replace("↓", "\\_")
Strng = Strng.replace("↑↑", '\\"')
Strng = Strng.replace("↑", "\\'")
if Source == "WarangCiti":
Strng = Strng.replace("\u200D", "\u00D7")
if Source == "Hebr-Ar":
dot_var = [("עׄ", "ג"), ("תׄ", "ת֒"), ("ת", "ת̈"), ("ק", "ק̈")]
for char, char_var in dot_var:
Strng = Strng.replace(char_var, char)
Strng = normalize(Strng, Source)
return Strng
def ISO259Target(Strng):
Strng = Strng.replace("א", "ʾ").replace("׳", "’")
return Strng
def ISO233Target(Strng):
replacements = [("أ", "ˈʾ"), ("ء", "¦"), ("إ", "ˌʾ")]
for x, y in replacements:
Strng = Strng.replace(x, y)
return Strng
def PersianDMGTarget(Strng):
replacements = [("ا", "ʾ")]
for x, y in replacements:
Strng = Strng.replace(x, y)
return Strng
def ISO233Source(Strng):
replacements = [("أ", "ˈʾ"), ("ء", "¦"), ("إ", "ˌʾ")]
for x, y in replacements:
Strng = Strng.replace(y, x)
replacements = [
("j", "ǧ"),
("g", "ǧ"),
("ḧ", "ẗ"),
("ḫ", "ẖ"),
("a̮", "ỳ"),
("aⁿ", "á"),
("iⁿ", "í"),
("uⁿ", "ú"),
("ā̂", "ʾâ"),
("ˀ", "ˈ"),
]
for x, y in replacements:
Strng = Strng.replace(y, x)
return Strng
def HebrewSBLTarget(Strng):
Strng = Strng.replace("א", "ʾ").replace("׳", "’")
return Strng
def HebrewSBLSource(Strng):
Strng = Strng.replace(
"ʾ",
"א",
).replace("’", "׳")
Strng = Strng.replace("\u0307\u00B0", "\u00B0\u0307")
replacements = [
("v", "ḇ"),
("f", "p̄"),
("d꞉", "d"),
("d", "ḏ"),
("g꞉", "g"),
("g", "ḡ"),
("t꞉", "t"),
("t", "ṯ"),
("š̮", "š"),
("š̪", "ś"),
("o", "ō"),
("ō", "ô"),
("ū", "û"),
("\u033D", "ĕ"),
]
for x, y in replacements:
Strng = Strng.replace(y, x)
return Strng
def ISO259Source(Strng):
Strng = Strng.replace(
"ʾ",
"א",
).replace("’", "׳")
Strng = Strng.replace("\u0307\u00B0", "\u00B0\u0307")
replacements = [
("ḵ", "k"),
("v", "b"),
("f", "p"),
("b", "ḃ"),
("p", "ṗ"),
("k", "k̇"),
("꞉", "\u0307"),
("š̮", "š"),
("š", "s̀"),
("š̪", "ś"),
("ā", "å"),
("e", "ȩ"),
("ō", "ŵ"),
("ū", "ẇ"),
("\u033D", "°"),
("ĕ", "ḝ"),
]
for x, y in replacements:
Strng = Strng.replace(y, x)
import unicodedata
Strng = unicodedata.normalize("NFD", Strng)
Strng = Strng.replace("\u0307", "꞉")
Strng = unicodedata.normalize("NFC", Strng)
return Strng
def UnSupThaana(Strng):
return Strng
def RemoveJoiners(Strng):
Strng = Strng.replace("\u200D", "")
Strng = Strng.replace("\u200C", "")
return Strng
def ArabicGimelJa(Strng):
Strng = Strng.replace("ج", "ڨ")
return Strng
def normalize(Strng, Source):
nuktaDecom = [
"\u0915\u093C",
"\u0916\u093C",
"\u0917\u093C",
"\u091C\u093C",
"\u0921\u093C",
"\u0922\u093C",
"\u092B\u093C",
"\u092F\u093C",
"\u0A32\u0A3C",
"\u0A38\u0A3C",
"\u0A16\u0A3C",
"\u0A17\u0A3C",
"\u0A1C\u0A3C",
"\u0A2B\u0A3C",
"\u09A1\u09BC",
"\u09A2\u09BC",
"\u09AF\u09BC",
"\u0B21\u0B3C",
"\u0B22\u0B3C",
]
nuktaPrecom = [
"\u0958",
"\u0959",
"\u095A",
"\u095B",
"\u095C",
"\u095D",
"\u095E",
"\u095F",
"\u0A33",
"\u0A36",
"\u0A59",
"\u0A5A",
"\u0A5B",
"\u0A5E",
"\u09DC",
"\u09DD",
"\u09DF",
"\u0B5C",
"\u0B5D",
]
if Source not in ["Grantha", "TamilGrantha"]:
for x, y in zip(nuktaDecom, nuktaPrecom):
Strng = Strng.replace(x, y)
if Source in ["IAST", "ISO", "ISOPali", "Titus"]:
Strng = (
Strng.replace("ü", "uʼ")
.replace("ǖ", "ūʼ")
.replace(
"ö",
"aʼ",
)
.replace("ȫ", "āʼ")
)
if Source == "Arab-Ur" or Source == "Arab-Pa":
Strng = Strng.replace("ك", "ک")
Strng = Strng.replace("ي", "ی")
if Source == "Hebr":
vowels = ["ְ", "ֱ", "ֲ", "ֳ", "ִ", "ֵ", "ֶ", "ַ", "ָ", "ֹ", "ֺ", "ֻ", "ׇ"]
vowelsR = "(" + "|".join(vowels + ["וֹ", "וּ"]) + ")"
Strng = re.sub(vowelsR + "([ּׁׂ])", r"\2\1", Strng)
Strng = Strng.replace("\u05BC\u05B0\u05C1", "\u05C1\u05BC\u05B0")
chilluZwj = ["ണ്", "ന്", "ര്", "ല്", "ള്", "ക്"]
chilluAtom = ["ൺ", "ൻ", "ർ", "ൽ", "ൾ", "ൿ"]
for x, y in zip(chilluZwj, chilluAtom):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ൌ", "ൗ")
Strng = Strng.replace("ൟ", "ഈ")
Strng = Strng.replace("ൎ", "ര്")
Strng = Strng.replace("ൻ്റ", "ന്റ")
Strng = Strng.replace("ೝ", "ನ್")
Strng = Strng.replace("ౝ", "న్")
tamAlt = ["ஸ்ரீ", "க்ஷ", "ரி", "ரீ"]
tamNorm = ["ஶ்ரீ", "க்ஷ", "ரி", "ரீ"]
Strng = Strng.replace("ဿ", "သ္သ")
Strng.replace("ஸ²", "ஶ")
subNum = ["¹", "₁", "₂", "₃", "₄"]
supNum = ["", "", "²", "³", "⁴"]
for x, y in zip(tamAlt + subNum, tamNorm + supNum):
Strng = Strng.replace(x, y)
oldVow = ["ྲྀ", "ཷ", "ླྀ", "ཹ", "ཱི", "ཱུ", "ཀྵ", "ྐྵ"]
newVow = ["ྲྀ", "ྲཱྀ", "ླྀ", "ླཱྀ", "ཱི", "ཱུ", "ཀྵ", "ྐྵ"]
for x, y in zip(oldVow, newVow):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ཅ", "ཙ")
Strng = Strng.replace("ཆ", "ཚ")
latinDecom = [
"ā",
"ī",
"ū",
"ē",
"ō",
"ṃ",
"ṁ",
"ḥ",
"ś",
"ṣ",
"ṇ",
"ṛ",
"ṝ",
"ḷ",
"ḹ",
"ḻ",
"ṉ",
"ṟ",
]
latinPrecom = [
"ā",
"ī",
"ū",
"ē",
"ō",
"ṃ",
"ṁ",
"ḥ",
"ś",
"ṣ",
"ṇ",
"ṛ",
"ṝ",
"ḷ",
"ḹ",
"ḻ",
"ṉ",
"ṟ",
]
for x, y in zip(latinDecom, latinPrecom):
Strng = Strng.replace(x, y)
Strng = Strng.replace("ํา", "ำ")
Strng = Strng.replace("ໍາ", "ຳ")
Strng = Strng.replace("।।", "॥")
Strng = Strng.replace("ᤠ᤺ᤣ", "ᤠᤣ᤺")
Strng = Strng.replace("᤺ᤣ", "ᤣ᤺")
Strng = Strng.replace("ᤠᤣ", "ᤥ")
Strng = Strng.replace("ฎ", "ฏ")
Strng = Strng.replace("𑍌", "𑍗")
Strng = Strng.replace("\u0F82", "\u0F83")
Strng = Strng.replace("ॲ", "ऍ")
Strng = Strng.replace("ো", "ো")
Strng = Strng.replace("াে", "ো")
Strng = Strng.replace("ৌ", "ৌ")
Strng = Strng.replace("ৗে", "ৌ")
Strng = Strng.replace("ொ", "ொ")
Strng = Strng.replace("ாெ", "ொ")
Strng = Strng.replace("ோ", "ோ")
Strng = Strng.replace("ாே", "ோ")
Strng = Strng.replace("ௌ", "ௌ")
Strng = Strng.replace("ௗெ", "ௌ")
Strng = Strng.replace("ൊ", "ൊ")
Strng = Strng.replace("ാെ", "ൊ")
Strng = Strng.replace("ോ", "ോ")
Strng = Strng.replace("ാേ", "ോ")
Strng = Strng.replace("𑍋", "𑍋")
Strng = Strng.replace("𑌾𑍇", "𑍋")
return Strng
def removeZW(Strng):
Strng = Strng.replace("\u200C").replace("\u200D")
return Strng
def PhagsPaArrange(Strng, Source):
if Source in GM.IndicScripts:
ListC = "|".join(
sorted(GM.CrunchSymbols(GM.Consonants, Source), key=len, reverse=True)
)
ListV = "|".join(
sorted(GM.CrunchSymbols(GM.Vowels, Source), key=len, reverse=True)
)
ListVS = "|".join(
sorted(GM.CrunchSymbols(GM.VowelSignsNV, Source), key=len, reverse=True)
)
ListCS = "|".join(
sorted(GM.CrunchSymbols(GM.CombiningSigns, Source), key=len, reverse=True)
)
vir = GM.CrunchSymbols(GM.VowelSigns, Source)[0]
yrv = "|".join(
[GM.CrunchSymbols(GM.Consonants, Source)[i] for i in [25, 26, 28]]
)
Strng = re.sub(
"("
+ ListC
+ ")"
+ "("
+ vir
+ ")"
+ "("
+ yrv
+ ")"
+ "("
+ "("
+ ListVS
+ ")?"
+ "("
+ ListCS
+ ")?"
+ ")",
r" \1\2\3\4",
Strng,
)
Strng = re.sub(
"("
+ ListC
+ ListV
+ ")"
+ "("
+ "("
+ ListVS
+ ")?"
+ "("
+ ListCS
+ ")?"
+ ")"
+ "("
+ ListC
+ ")"
+ "("
+ vir
+ ")"
+ "(?!\s)",
r"\1\2\5\6 ",
Strng,
)
Strng = re.sub(
"("
+ ListC
+ ListV
+ ")"
+ "("
+ "("
+ ListVS
+ ")?"
+ "("
+ ListCS
+ ")?"
+ ")"
+ "("
+ ListC
+ ")"
+ "(?!"
+ vir
+ ")",
r"\1\2 \5",
Strng,
)
Strng = re.sub(
"("
+ ListC
+ ListV
+ ")"
+ "("
+ "("
+ ListVS
+ ")?"
+ "("
+ ListCS
+ ")?"
+ ")"
+ "("
+ ListC
+ ")"
+ "(?!"
+ vir
+ ")",
r"\1\2 \5",
Strng,
)
elif Source in GM.LatinScripts:
pass
return Strng
def TamilTranscribeCommon(Strng, c=31):
script = "Tamil"
ListC = GM.CrunchList("ConsonantMap", script)
ListSC = GM.CrunchList("SouthConsonantMap", script)
vir = GM.CrunchSymbols(GM.VowelSigns, script)[0]
ConUnVoiced = [ListC[x] for x in [0, 5, 10, 15, 20]]
ConVoicedJ = [ListC[x] for x in [2, 7, 12, 17, 22]]
ConVoicedS = [ListC[x] for x in [2, 31, 12, 17, 22]]
ConNasalsAll = "|".join([ListC[x] for x in [4, 9, 14, 19, 24]])
conNasalCa = "|".join([ListC[x] for x in [9]])
ConNasalsGroup = [
ConNasalsAll,
conNasalCa,
ConNasalsAll,
ConNasalsAll,
ConNasalsAll,
]
ConMedials = "|".join(ListC[25:28] + ListSC[0:2] + ListSC[3:4])
Vowels = "|".join(GM.CrunchSymbols(GM.Vowels + GM.VowelSignsNV, script))
Aytham = GM.CrunchList("Aytham", script)[0]
Consonants = "|".join(GM.CrunchSymbols(GM.Consonants, script))
NRA = ListSC[3] + vir + ListSC[2]
NDRA = ListC[14] + vir + ListC[12] + vir + ListC[26]
for i in range(len(ConUnVoiced)):
pass
Strng = re.sub(
"("
+ Vowels
+ "|"
+ Consonants
+ "|"
+ Aytham
+ ")"
+ ConUnVoiced[i]
+ "(?!"
+ vir
+ ")",
r"\1" + ConVoicedS[i],
Strng,
)
Strng = re.sub(
"([³])" + ConUnVoiced[i] + "(?!" + vir + ")", r"\1" + ConVoicedS[i], Strng
)
Strng = re.sub("³+", "³", Strng)
Strng = re.sub(
"(" + ConNasalsGroup[i] + ")" + "(" + vir + ")" + ConUnVoiced[i],
r"\1\2" + ConVoicedJ[i],
Strng,
)
Strng = re.sub(
"("
+ ConMedials
+ ")"
+ "("
+ vir
+ ")"
+ ConUnVoiced[i]
+ "(?!"
+ vir
+ ")",
r"\1\2" + ConVoicedS[i],
Strng,
)
Strng = Strng.replace(NRA, NDRA)
Strng = re.sub(
"(?<!"
+ "("
+ ListC[5]
+ "|"
+ ListSC[2]
+ "|"
+ "ட"
+ ")"
+ vir
+ ")"
+ ListC[5]
+ "(?!"
+ vir
+ ")",
ListC[c],
Strng,
)
import string
punct = (
"|".join(
[
"\\" + x
for x in list(string.punctuation.replace(".", "").replace("?", ""))
]
)
+ "|\s"
)
Strng = re.sub(
"(" + ListC[5] + vir + ")" + "((" + punct + ")+)" + "(" + ListC[c] + ")",
r"\1\2" + ListC[5],
Strng,
)
Strng = re.sub(
"(" + ListC[9] + vir + ")" + "((" + punct + ")+)" + "(" + ListC[c] + ")",
r"\1\2" + ListC[7],
Strng,
)
Strng = re.sub(
"(" + ListC[4] + vir + ")" + "((" + punct + ")+)" + "(" + ListC[0] + ")",
r"\1\2" + ListC[2],
Strng,
)
Strng = re.sub(
"(" + ListC[14] + vir + ")" + "((" + punct + ")+)" + "(" + ListC[10] + ")",
r"\1\2" + ListC[12],
Strng,
)
Strng = re.sub(
"(" + ListC[19] + vir + ")" + "((" + punct + ")+)" + "(" + ListC[15] + ")",
r"\1\2" + ListC[17],
Strng,
)
Strng = Strng.replace(Tamil.Aytham[0] + ListC[0], ListC[32] + vir + ListC[32])
Strng = Strng.replace(Tamil.Aytham[0], ListC[32] + vir)
Strng = re.sub(ListSC[2] + vir + ListSC[2], ListC[10] + vir + ListC[26], Strng)
Strng = re.sub(
"("
+ "["
+ ListC[10]
+ ListSC[2]
+ "]"
+ vir
+ ")"
+ "(\s)"
+ "("
+ ListC[c]
+ ")",
r"\1\2" + ListC[5],
Strng,
)
Strng = Strng.replace(ListSC[3], ListC[19])
return Strng
def TamilTranscribe(Strng):
Strng = TamilTranscribeCommon(Strng)
return Strng
def TamilTranscribeDialect(Strng):
Strng = TamilTranscribeCommon(Strng, c=29)
return Strng
def IPAIndic(Strng):
Strng = Strng.replace("ʊ", "u")
Strng = Strng.replace("ɛ", "e")
return | Aksarantara | /Aksarantara-1.1.0-py3-none-any.whl/aksarantara/pre_processing.py | pre_processing.py |
# AksharaJaana
<p align="center">
<img src="https://user-images.githubusercontent.com/63489382/173864136-118db121-fcf4-4c8a-9b7d-7c4e4c0e48f9.png" width=200px>
</p>
<p align="center">
An OCR for Kannada.
</p>
<p align="center">
<a href="https://www.npmjs.com/package/@swc/core">
<a href="https://pypi.org/project/AksharaJaana/"><img src="https://img.shields.io/badge/pypi-package-blue?labelColor=black&style=flat&logo=python&link=https://pypi.org/project/AksharaJaana/" alt="pypi" /></a>
</a>
</p>
AksharaJaana is a package which uses tesseract ocr in the backend to convert the read-only kannada text to editable format.
A Special feature of this is it can separate columns in the page and thus making it easier to read and edit.
Do consider using this package if necessary and feel free to mail me for any clarifications.
- Email : [email protected]
- Twitter handle: https://twitter.com/navaneethakbh
Happy coding and installing.
To see the python package visit https://pypi.org/project/AksharaJaana/
## The Requirements
***Conda environment is preferred for the smooth use***
- AksharaJaana *(pip package)*, check out the latest version available
- Tesseract
- poppler
## Details for Installation
### Ubuntu
Open terminal and execute below commands.
1. **Installing tesseract-ocr in the system**
```bash
sudo apt-get update -y
sudo apt-get install -y tesseract-ocr
sudo apt-get install tesseract-ocr-kan
```
2. **Installing poppler in the system**
```bash
sudo apt-get install -y poppler-utils
```
3. **Installing python and pip (if pip is not installed)**
```bash
sudo apt install python==3.6.9
```
4. **Installing packages for AksharaJaana**
```bash
pip install --upgrade AksharaJaana
```
### Windows
1. Installing tesseract-ocr in the system
- **Download tesseract**
- go to the <a href="https://github.com/UB-Mannheim/tesseract/wiki">website</a>
- click on `tesseract-ocr-w64-setup-v5.0.0-alpha.20200328.exe (64 bit)`.
- **Install tesseract for Kannada Language and Script**
- open the downloaded file and click next and accept the agreement.
- Next you will give an option to choose the languages.
- **Choose kannada in both script and language**
- **Add tesseract to Path**
- Check if this folder `C:\Program Files\Tesseract-OCR\` is present. If yes, follow below procedure
- Add `C:\Program Files\Tesseract-OCR\` to your system PATH by doing the following
1. Click on the `Windows start button`, search for `Edit the system environment variables`, click on Environment Variables
2. Under System variables, look for and double-click on PATH, click on `New`.
3. then add `C:\Program Files\Tesseract-OCR\`, click OK.
- if folder is not present, manually add the folder tesseract-ocr to the Program Files in the C drive which must be present at the download section (after extraction) and follow the same procedure
- See complete [docs](docs/tesseract_installation/README.md).
2. Installing poppler in the system
- **Download Poppler**
- go to <a href="http://blog.alivate.com.au/poppler-windows/">this</a> page
- click on `poppler-0.54_x86`
- **Unzip** the file and copy files to `C:\Users\Program Files\poppler-0.68.0_x86`
- **Add poppler to path**
- Add `C:\Program Files\poppler-0.68.0_x86\bin` to your system PATH by doing the following:
1. Click on the Windows start button, search for Edit the system environment variables, click on Environment Variables
2. under System variables, look for and double-click on PATH, click on New
3. then add C:\Users\Program Files\poppler-0.68.0_x86\bin, click OK.
3. Installing python and pip in the system (If pip is not installed)
- <a href="https://www.python.org/downloads/">Download python</a>
4. Installing packages for AksharaJaana
- open command prompt
```bash
pip install AksharaJaana
```
5. **Reboot** the system before starting to use
### Python Script
```python
from AksharaJaana.main as OCREngine
ocr = OCREngine()
text = ocr.get_text_from_file("Your file Path")
print(text)
```
| AksharaJaana | /AksharaJaana-1.0.1.1.tar.gz/AksharaJaana-1.0.1.1/README.md | README.md |
from PIL import Image
class Asciiart:
def __init__(self, image) -> None:
self.image = Image.open(image)
self.w, self.h = self.image.size
self.ascii_chars = list('01')
self.ascii_text = ''
self.number = ""
self.ascii_html = ''
def set_dim(self, width=0, hight=0):
if width == 0 and hight != 0:
self.w, self.h = int(self.w/self.h * hight), hight
elif width != 0 and hight == 0:
self.w, self.h = width, int(self.h/self.w * width)
else:
self.w, self.h = width, hight
self.image = self.image.resize((self.w, self.h))
def binary_to_decimal(self, binary):
decimal = 0
l = len(binary)
for x in binary:
l -= 1
decimal += pow(2, l) * int(x)
return int(decimal)
def span(self, integer, integer_colour):
return f"<span style='color: rgb{integer_colour};'><b>{integer}</b></span>"
def asciify(self):
div = 255//(len(self.ascii_chars))
bwdata = self.image.convert('L').getdata()
for line_no in range(self.h):
for pixel in range(line_no*self.w, line_no*self.w + self.w):
self.ascii_text += self.ascii_chars[bwdata[pixel]//div -1]
self.ascii_text += '\n'
def numberize(self, first_char=1):
div, number = 255//len(self.ascii_chars), ''
bwdata = self.image.convert('L').getdata()
for line_no in range(self.h):
for pixel in range(line_no*self.w, line_no*self.w + self.w):
number += self.ascii_chars[bwdata[pixel]//div - 1]
self.ascii_text += self.ascii_chars[bwdata[pixel]//div - 1]
self.ascii_text += '\n'
if number[0] == "0":
number = str(first_char) + number[1:]
self.number = number
return self.number
def primify(self, prime, binary=False):
if binary and len(bin(int(prime))) == len(bin(self.number)):
self.number = bin(int(prime))
elif len(str(int(prime))) == len(str(self.number)):
self.number = str(prime)
else:
print("not primified")
def prime_asciify(self):
self.ascii_text = ""
for line in range(self.h):
for dig in range(line*self.w, line*self.w + self.w):
self.ascii_text += self.number[dig]
self.ascii_text += '\n'
def colorify(self):
color = self.image.getdata()
file = '<p>'
if self.number[:2] != "0b":
for line_no in range(self.h):
for pixel in range(line_no*self.w, line_no*self.w + self.w):
file += self.span(self.number[pixel], color[pixel])
file += '<br>'
else:
for line_no in range(self.h):
for pixel in range(line_no*self.w, line_no*self.w + self.w):
file += self.span(self.number[2+pixel], color[pixel])
file += '<br>'
file += "</p>"
self.ascii_html = file
def ascii_show(self):
print(self.ascii_text[:-1])
def text_output(self, fname):
with open(fname, "w") as file:
file.write(self.ascii_text)
def color_output(self, fname):
with open(fname, "w") as file:
file.write(self.ascii_html) | Aksharify | /Aksharify-2.1-py3-none-any.whl/Aksharify.py | Aksharify.py |
# The MIT License (MIT)
Copyright © 2023 Prime Patel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | Aksharify | /Aksharify-2.1-py3-none-any.whl/Aksharify-2.1.dist-info/LICENSE.md | LICENSE.md |
def new_grp():
import math
import time
from turtle import *
import turtle
import random
ws = turtle.Screen()
ws.setup(900, 800)
txt=turtle.textinput("2K23 QUOTES","Enter your Name :")
hideturtle()
quot=["The only one who can tell you “you can’t win” is you and you don’t have to listen","Set your goals high, and don’t stop till you get there.","Life is like riding a bicycle. To keep your balance you must keep moving","I have never let my schooling interfere with my education.","If you can’t yet do great things, do small things in a great way.","Be sure you put your feet in the right place, then stand firm.","Do not wait for the perfect time and place to enter, for you are already onstage.","The greater the difficulty, the more the glory in surmounting it.","I never look back, darling. It distracts from the now.","A year from now you will wish you had started today.","I never dreamed about success. I worked for it","Success is getting what you want, happiness is wanting what you get.","Don’t let yesterday take up too much of today.","Goal setting is the secret to a compelling future.","Either you run the day or the day runs you.","Make sure your worst enemy doesn’t live between your own two ears.","Hustle beats talent when talent doesn’t hustle","Start where you are. Use what you have. Do what you can.","We are what we repeatedly do. Excellence, then, is not an act, but a habit.","Setting goals is the first step in turning the invisible into the visible."]
q=random.choice(quot)
speed(0)
pensize(10)
colormode(255)
while True:
turtle.bgcolor("yellow")
turtle.color("red")
write(txt + " Happy new Year" , align="center", font=("Cooper Black", 25, "italic"))
time.sleep(2)
turtle.clear()
turtle.color("blue")
write( "Quote for you : \n"+q, align="center", font=("Cooper Black", 15, "italic"))
time.sleep(3)
turtle.clear()
def hearta(k):
return 15*math.sin(k)**3
def heartb(k):
return 12*math.cos(k)-5*\
math.cos(2*k)-2*\
math.cos(3*k)-\
math.cos(4*k)
speed(0)
bgcolor("black")
for i in range(6000):
goto(hearta(i)*20,heartb(i)*20)
for j in range(5):
color("#f73487")
goto(0,0)
done() | Akshay-grpi | /Akshay_grpi-1.0.tar.gz/Akshay_grpi-1.0/src/Akshay_grpi.py | Akshay_grpi.py |
def plotLogo(ax):
#import matplotlib.pyplot as ax
import numpy as np
x = np.arange(-np.pi/2, 7.0*np.pi, .01)
y = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(.5*x)
y4 = np.cos(.5*x)
ax.plot(x, y, linewidth=1, color='lightgrey')
ax.plot(x, -y, linewidth=1, color='lightgrey')
ax.plot(x, y2, linewidth=1, color='lightgrey')
ax.plot(x,-y2, linewidth=1, color='lightgrey')
ax.plot(x, y3, linewidth=1, color='lightgrey')
ax.plot(x, -y3, linewidth=1, color='lightgrey')
ax.plot(x, y4, linewidth=1, color='lightgrey')
ax.plot(x, -y4, linewidth=1, color='lightgrey')
a = np.arange(10,615)
ax.plot(x[a],y[a], linewidth=3, color='black')
ax.fill_between(x[a],y[a], y2=-1, where=y[a]>=-1, interpolate=True, linewidth=0, alpha=.95, color='maroon')
k = np.arange(615, 785)
k2 = np.arange(785, 1105)
ax.plot(x[k], y[k ], linewidth=3, color='black')
ax.plot(x[k], -y[k ], linewidth=3, color='black')
ax.plot(x[k2], y3[k2], linewidth=3, color='black')
ax.plot(x[k2], -y3[k2], linewidth=3, color='black')
#v = np.arange(1265,1865) # y
v = np.arange(1105, 1720)
ax.plot(x[v], -y2[v], linewidth=3, color='black')
o = np.arange(1728,2357)
ax.plot(x[o], y4[o], linewidth=3, color='black')
ax.plot(x[o], -y4[o], linewidth=3, color='black')
ax.fill_between(x[o], y4[o], y2=-y4[o], where=y4[o]<=1, interpolate=True, linewidth=0, alpha=.95, color='maroon')
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure( figsize=(6,3) )
#ax = fig.add_axes([.1,.1,.8,.8])
ax = fig.add_subplot(211)
fig.patch.set_facecolor( None )
fig.patch.set_alpha( .0 )
ax.axis('off')
plotLogo(ax)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
subplot2 = fig.add_subplot(212)
subplot2.text(0.5, 1.,'surface NMR workbench',
horizontalalignment='center',
verticalalignment='center',
size=22,
transform = subplot2.transAxes)
subplot2.xaxis.set_major_locator(plt.NullLocator())
subplot2.yaxis.set_major_locator(plt.NullLocator())
subplot2.axis('off')
plt.savefig("logo.pdf")
plt.show()
#ax.fill_between(x[o], -y4[o], y2=0, where=-y4[o]<=1, interpolate=True, linewidth=0, alpha=.5, color='black')
#ax.plot(x[o], y2[o], linewidth=3, color='black')
#ax.plot(x[o],-y2[o], linewidth=3, color='black')
#ax.fill_between(x[a], y[a], y2=-1, where=y[a]>=-1, interpolate=True, linewidth=0, alpha=.5, color='black')
#ax.show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/logo2.py | logo2.py |
from PyQt5 import uic
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QMainWindow, QPushButton, QApplication, QTextEdit, QApplication, QDialog
def p(x):
print (x)
class callScript(QDialog):
#def __init__(self):
# super().__init__()
def setupCB(self, akvoData, kernelParams, SaveStr):
#QtGui.QWidget.__init__(self)
#uic.loadUi('redirect.ui', self)
#print ('Connecting process')
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.stdoutReady)
self.process.readyReadStandardError.connect(self.stderrReady)
self.process.started.connect(lambda: p('Started!'))
self.process.finished.connect(lambda: p('Finished!'))
#print ('Starting process')
#self.process.start('python', ['calcAkvoKernel.py', akvoData, TxCoil, SaveStr])
self.process.start('akvoK0', [ akvoData, kernelParams, SaveStr])
def setupQTInv(self, params):
#QtGui.QWidget.__init__(self)
#uic.loadUi('redirect.ui', self)
#print ('Connecting process')
self.process = QtCore.QProcess(self)
self.process.readyReadStandardOutput.connect(self.stdoutReady)
self.process.readyReadStandardError.connect(self.stderrReady)
self.process.started.connect(lambda: p('Started!'))
self.process.finished.connect(lambda: p('Finished!'))
#print ('Starting process')
#self.process.start('python', ['calcAkvoKernel.py', akvoData, TxCoil, SaveStr])
self.process.start('akvoQT', [params])
def append(self, text):
cursor = self.ui.textEdit.textCursor()
cursor.movePosition(cursor.End)
cursor.insertText(text)
self.ui.textEdit.ensureCursorVisible()
#MyTextEdit.verticalScrollBar()->setValue(MyTextEdit.verticalScrollBar()->maximum());
def stdoutReady(self):
text = str(self.process.readAllStandardOutput(), encoding='utf-8')
#print (text)
self.append(text)
def stderrReady(self):
text = str(self.process.readAllStandardError())
#print (text) #.strip())
self.append(text)
#def main():
# import sys
# app = QApplication(sys.argv)
# win = MainWindow()
# win.show()
# sys.exit(app.exec_())
#if __name__ == '__main__':
# main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/callScript.py | callScript.py |
def plotLogo(ax):
#import matplotlib.pyplot as ax
import numpy as np
x = np.arange(-np.pi/2, 7.0*np.pi, .01)
y = np.sin(x)
y2 = np.cos(x)
y3 = np.sin(.5*x)
y4 = np.cos(.5*x)
ax.plot(x, y, linewidth=1, color='lightgrey')
ax.plot(x, -y, linewidth=1, color='lightgrey')
ax.plot(x, y2, linewidth=1, color='lightgrey')
ax.plot(x,-y2, linewidth=1, color='lightgrey')
ax.plot(x, y3, linewidth=1, color='lightgrey')
ax.plot(x, -y3, linewidth=1, color='lightgrey')
ax.plot(x, y4, linewidth=1, color='lightgrey')
ax.plot(x, -y4, linewidth=1, color='lightgrey')
a = np.arange(10,615)
ax.plot(x[a],y[a], linewidth=3, color='black')
ax.fill_between(x[a],y[a], y2=-1, where=y[a]>=-1, interpolate=True, linewidth=0, alpha=.95, color='maroon')
k = np.arange(615, 785)
k2 = np.arange(785, 1105)
ax.plot(x[k], y[k ], linewidth=3, color='black')
ax.plot(x[k], -y[k ], linewidth=3, color='black')
ax.plot(x[k2], y3[k2], linewidth=3, color='black')
ax.plot(x[k2], -y3[k2], linewidth=3, color='black')
#v = np.arange(1265,1865) # y
v = np.arange(1105, 1720)
ax.plot(x[v], -y2[v], linewidth=3, color='black')
o = np.arange(1728,2357)
ax.plot(x[o], y4[o], linewidth=3, color='black')
ax.plot(x[o], -y4[o], linewidth=3, color='black')
ax.fill_between(x[o], y4[o], y2=-y4[o], where=y4[o]<=1, interpolate=True, linewidth=0, alpha=.95, color='maroon')
if __name__ == "__main__":
import matplotlib.pyplot as plt
fig = plt.figure( figsize=(6,3) )
#ax = fig.add_axes([.1,.1,.8,.8])
ax = fig.add_subplot(211)
fig.patch.set_facecolor( None )
fig.patch.set_alpha( .0 )
ax.axis('off')
plotLogo(ax)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
subplot2 = fig.add_subplot(212)
subplot2.text(0.5, 1.,'surface NMR workbench',
horizontalalignment='center',
verticalalignment='center',
size=22,
transform = subplot2.transAxes)
subplot2.xaxis.set_major_locator(plt.NullLocator())
subplot2.yaxis.set_major_locator(plt.NullLocator())
subplot2.axis('off')
plt.savefig("logo.pdf")
plt.show()
#ax.fill_between(x[o], -y4[o], y2=0, where=-y4[o]<=1, interpolate=True, linewidth=0, alpha=.5, color='black')
#ax.plot(x[o], y2[o], linewidth=3, color='black')
#ax.plot(x[o],-y2[o], linewidth=3, color='black')
#ax.fill_between(x[a], y[a], y2=-1, where=y[a]>=-1, interpolate=True, linewidth=0, alpha=.5, color='black')
#ax.show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/logo.py | logo.py |
import sys, os, random
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
class AppForm(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setWindowTitle('Demo: PyQt with matplotlib')
self.create_menu()
self.create_main_frame()
self.create_status_bar()
self.textbox.setText('1 2 3 4')
self.on_draw()
def save_plot(self):
file_choices = "PNG (*.png)|*.png"
path = unicode(QFileDialog.getSaveFileName(self,
'Save file', '',
file_choices))
if path:
self.canvas.print_figure(path, dpi=self.dpi)
self.statusBar().showMessage('Saved to %s' % path, 2000)
def on_about(self):
msg = """ A demo of using PyQt with matplotlib:
* Use the matplotlib navigation bar
* Add values to the text box and press Enter (or click "Draw")
* Show or hide the grid
* Drag the slider to modify the width of the bars
* Save the plot to a file using the File menu
* Click on a bar to receive an informative message
"""
QMessageBox.about(self, "About the demo", msg.strip())
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
QMessageBox.information(self, "Click!", msg)
def on_draw(self):
""" Redraws the figure
"""
str = unicode(self.textbox.text())
self.data = map(int, str.split())
x = range(len(self.data))
# clear the axes and redraw the plot anew
#
self.axes.clear()
self.axes.grid(self.grid_cb.isChecked())
self.axes.bar(
left=x,
height=self.data,
width=self.slider.value() / 100.0,
align='center',
alpha=0.44,
picker=5)
self.canvas.draw()
def create_main_frame(self):
self.main_frame = QWidget()
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((5.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.main_frame)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
# Create the navigation toolbar, tied to the canvas
#
self.mpl_toolbar = NavigationToolbar(self.canvas, self.main_frame)
# Other GUI controls
#
self.textbox = QLineEdit()
self.textbox.setMinimumWidth(200)
self.connect(self.textbox, SIGNAL('editingFinished ()'), self.on_draw)
self.draw_button = QPushButton("&Draw")
self.connect(self.draw_button, SIGNAL('clicked()'), self.on_draw)
self.grid_cb = QCheckBox("Show &Grid")
self.grid_cb.setChecked(False)
self.connect(self.grid_cb, SIGNAL('stateChanged(int)'), self.on_draw)
slider_label = QLabel('Bar width (%):')
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(1, 100)
self.slider.setValue(20)
self.slider.setTracking(True)
self.slider.setTickPosition(QSlider.TicksBothSides)
self.connect(self.slider, SIGNAL('valueChanged(int)'), self.on_draw)
#
# Layout with box sizers
#
hbox = QHBoxLayout()
for w in [ self.textbox, self.draw_button, self.grid_cb,
slider_label, self.slider]:
hbox.addWidget(w)
hbox.setAlignment(w, Qt.AlignVCenter)
vbox = QVBoxLayout()
vbox.addWidget(self.canvas)
vbox.addWidget(self.mpl_toolbar)
vbox.addLayout(hbox)
self.main_frame.setLayout(vbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("This is a demo")
self.statusBar().addWidget(self.status_text, 1)
def create_menu(self):
self.file_menu = self.menuBar().addMenu("&File")
load_file_action = self.create_action("&Save plot",
shortcut="Ctrl+S", slot=self.save_plot,
tip="Save the plot")
quit_action = self.create_action("&Quit", slot=self.close,
shortcut="Ctrl+Q", tip="Close the application")
self.add_actions(self.file_menu,
(load_file_action, None, quit_action))
self.help_menu = self.menuBar().addMenu("&Help")
about_action = self.create_action("&About",
shortcut='F1', slot=self.on_about,
tip='About the demo')
self.add_actions(self.help_menu, (about_action,))
def add_actions(self, target, actions):
for action in actions:
if action is None:
target.addSeparator()
else:
target.addAction(action)
def create_action( self, text, slot=None, shortcut=None,
icon=None, tip=None, checkable=False,
signal="triggered()"):
action = QAction(text, self)
if icon is not None:
action.setIcon(QIcon(":/%s.png" % icon))
if shortcut is not None:
action.setShortcut(shortcut)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if slot is not None:
self.connect(action, SIGNAL(signal), slot)
if checkable:
action.setCheckable(True)
return action
def main():
app = QApplication(sys.argv)
form = AppForm()
form.show()
app.exec_()
if __name__ == "__main__":
main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/temp.py | temp.py |
import sys
import matplotlib
matplotlib.use("QT5Agg")
from PyQt5 import QtCore, QtGui, QtWidgets #, uic
import numpy as np
import time
import os
from copy import deepcopy
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT #as NavigationToolbar
import datetime, time
import pkg_resources # part of setuptools
from collections import OrderedDict
from ruamel import yaml
from akvo.gui.main_ui import Ui_MainWindow
from akvo.gui.addCircularLoop_ui import Ui_circularLoopAdd
from akvo.gui.addFigure8Loop_ui import Ui_figure8LoopAdd
from akvo.gui.addPolygonalLoop_ui import Ui_polygonalLoopAdd
from akvo.gui.redirect_ui import Ui_callScript
from akvo.gui.callScript import callScript
from akvo.tressel import mrsurvey
from pyLemma import LemmaCore
from pyLemma import FDEM1D
from pyLemma import Merlin
VERSION = pkg_resources.require("Akvo")[0].version
GAMMAH = 42.577478518 * 1e-3 # Hz nT
# Writes out numpy arrays into Eigen vectors as serialized by Lemma
class MatrixXr(yaml.YAMLObject):
yaml_tag = u'MatrixXr'
def __init__(self, rows, cols, data):
self.rows = rows
self.cols = cols
self.data = np.zeros((rows,cols))
def __repr__(self):
return "%s(rows=%r, cols=%r, data=%r)" % (self.__class__.__name__, self.rows, self.cols, self.data)
class VectorXr(yaml.YAMLObject):
yaml_tag = r'VectorXr'
def __init__(self, array):
self.size = np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
def setup_yaml():
""" https://stackoverflow.com/a/8661021 """
represent_dict_order = lambda self, data: self.represent_mapping('tag:yaml.org,2002:map', data.items())
yaml.add_representer(OrderedDict, represent_dict_order)
setup_yaml()
class AkvoYamlNode(yaml.YAMLObject):
yaml_tag = u'AkvoData'
def __init__(self):
self.Akvo_VERSION = VERSION
self.Import = OrderedDict() # {}
self.Processing = [] # OrderedDict()
self.Stacking = OrderedDict()
self.META = OrderedDict()
def __repr__(self):
return "%s(name=%r, Akvo_VERSION=%r, Import=%r, Processing=%r, self.Stacking=%r, self.META=%r)" % (
self.__class__.__name__, self.Akvo_VERSION, self.Import, self.Processing, self.Stacking, self.META )
try:
import thread
except ImportError:
import _thread as thread #Py3K compatibility
class MyPopup(QtWidgets.QWidget):
def __init__(self, name):
super().__init__()
self.name = name
self.initUI()
def initUI(self):
lblName = QtWidgets.QLabel(self.name, self)
class ApplicationWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
#QtWidgets.QMainWindow.__init__(self)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
# alternative to calling pyuic
#self.ui = uic.loadUi('main.ui', self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.RAWDataProc = None
self.YamlNode = AkvoYamlNode()
# initialise some stuff
self.ui.lcdNumberTauPulse2.setEnabled(0)
self.ui.lcdNumberTauPulse1.setEnabled(0)
self.ui.lcdNumberNuTx.setEnabled(0)
self.ui.lcdNumberTuneuF.setEnabled(0)
self.ui.lcdNumberSampFreq.setEnabled(0)
self.ui.lcdNumberTauDelay.setEnabled(0)
self.ui.lcdNumberNQ.setEnabled(0)
self.logText = []
#######################
##################### #
## Make connections # #
##################### #
#######################
##############
# Menu items #
##############
self.ui.actionOpen_GMR.triggered.connect(self.openGMRRAWDataset)
self.ui.actionLoad_MIDI.triggered.connect(self.loadMIDI2Dataset)
self.ui.actionSave_Preprocessed_Dataset.triggered.connect(self.SavePreprocess)
self.ui.actionExport_Preprocessed_Dataset.triggered.connect(self.ExportPreprocess)
self.ui.actionExport_Preprocessed_Dataset.setEnabled(False)
self.ui.actionOpen_Preprocessed_Dataset.triggered.connect(self.OpenPreprocess)
self.ui.actionAboutAkvo.triggered.connect(self.about)
###########
# Buttons #
###########
#self.ui.loadDataPushButton.pressed.connect(self.loadRAW)
self.ui.sumDataGO.pressed.connect( self.sumDataChans )
self.ui.bandPassGO.pressed.connect( self.bandPassFilter )
self.ui.filterDesignPushButton.pressed.connect( self.designFilter )
self.ui.fdDesignPushButton.pressed.connect( self.designFDFilter )
self.ui.downSampleGO.pressed.connect( self.downsample )
self.ui.windowFilterGO.pressed.connect( self.windowFilter )
self.ui.adaptGO.pressed.connect( self.adaptFilter )
self.ui.adaptFDGO.pressed.connect( self.adaptFilterFD )
self.ui.qdGO.pressed.connect( self.quadDet )
self.ui.gateIntegrateGO.pressed.connect( self.gateIntegrate )
self.ui.calcQGO.pressed.connect( self.calcQ )
self.ui.FDSmartStackGO.pressed.connect( self.FDSmartStack )
self.ui.harmonicGO.pressed.connect( self.harmonicModel )
self.ui.K0Data.pressed.connect( self.K0DataSelect )
self.ui.invDataButton.pressed.connect( self.invDataSelect )
self.ui.invKernelButton.pressed.connect( self.invKernelSelect )
self.ui.f0K1Spin.valueChanged.connect( self.LCDHarmonics )
self.ui.f0KNSpin.valueChanged.connect( self.LCDHarmonics )
self.ui.f0KsSpin.valueChanged.connect( self.LCDHarmonics )
self.ui.f0Spin.valueChanged.connect( self.LCDHarmonics )
self.ui.NHarmonicsFreqsSpin.valueChanged.connect( self.LCDHarmonics2 )
self.ui.f1K1Spin.valueChanged.connect( self.LCDHarmonics2 )
self.ui.f1KNSpin.valueChanged.connect( self.LCDHarmonics2 )
self.ui.f1KsSpin.valueChanged.connect( self.LCDHarmonics2 )
self.ui.f1Spin.valueChanged.connect( self.LCDHarmonics2 )
self.ui.plotQD.setEnabled(False)
self.ui.plotQD.pressed.connect( self.plotQD )
self.ui.plotGI.setEnabled(False)
self.ui.plotGI.pressed.connect( self.plotGI )
# balance the Larmor frequency info and Tx off resonance info
self.ui.intensitySpinBox.valueChanged.connect( self.adjustLarmor )
self.ui.txv.valueChanged.connect( self.adjustB0 )
self.ui.larmorv.valueChanged.connect( self.adjustB02 )
# Kernel
self.ui.calcK0.pressed.connect( self.calcK0 )
# Inversion
self.ui.invertButton.pressed.connect( self.QTInv )
# META
self.ui.locEdit.editingFinished.connect( self.logSite )
self.ui.UTMzone.currentIndexChanged.connect( self.logSite )
self.ui.latBand.currentIndexChanged.connect( self.logSite )
self.ui.ellipsoid.currentIndexChanged.connect( self.logSite )
self.ui.incSpinBox.valueChanged.connect( self.logSite )
self.ui.decSpinBox.valueChanged.connect( self.logSite )
self.ui.intensitySpinBox.valueChanged.connect( self.logSite )
self.ui.tempSpinBox.valueChanged.connect( self.logSite )
self.ui.timeEdit.timeChanged.connect( self.logSite )
self.ui.dateEdit.dateChanged.connect( self.logSite )
# this may call the yaml stuff too often...
self.ui.txtComments.textChanged.connect( self.logSite )
self.ui.plotLoops.pressed.connect( self.plotLoops2 )
self.ui.removeLoopButton.pressed.connect( self.removeLoop )
# Loops
self.ui.addLoopButton.pressed.connect( self.loopAdd )
self.loops = {}
# hide header info box
#self.ui.headerFileBox.setVisible(False)
self.ui.headerFileBox.clicked.connect( self.headerBoxShrink )
self.ui.headerBox2.setVisible(False)
# Clean up the tab widget
self.ui.actionPreprocessing.triggered.connect(self.addPreProc)
self.ui.actionModelling.triggered.connect(self.addModelling)
self.ui.actionInversion.triggered.connect(self.addInversion)
# tabs
#self.ui.ProcTabs.tabCloseRequested.connect( self.closeTabs )
#self.ui.ProcTabs.tabBar().setTabButton(7, QtWidgets.QTabBar.RightSide,None)
self.ui.ProcTabs.removeTab(4)
self.ui.ProcTabs.removeTab(4)
self.ui.ProcTabs.removeTab(4)
self.ui.ProcTabs.removeTab(4)
#self.ui.LoadTab.close( )
# Add progressbar to statusbar
self.ui.barProgress = QtWidgets.QProgressBar()
self.ui.statusbar.addPermanentWidget(self.ui.barProgress, 0);
self.ui.barProgress.setMaximumSize(100, 16777215);
self.ui.barProgress.hide();
self.ui.mplwidget_navigator.setCanvas(self.ui.mplwidget)
#self.ui.mplwidget_navigator_2.setCanvas(self.ui.mplwidget)
self.ui.txRxTable.setColumnCount(4)
self.ui.txRxTable.setRowCount(0)
self.ui.txRxTable.setHorizontalHeaderLabels( ["Label", "Geom.","Turns","Tx/Rx"] )
##########################################################################
# layer Table
self.ui.layerTableWidget.setRowCount(80)
self.ui.layerTableWidget.setColumnCount(3)
self.ui.layerTableWidget.setHorizontalHeaderLabels( [r"top [m]", r"bottom [m]", "ρ [Ωm]" ] )
# do we want this
self.ui.layerTableWidget.setDragDropOverwriteMode(False)
self.ui.layerTableWidget.setDragEnabled(True)
self.ui.layerTableWidget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
pCell0 = QtWidgets.QTableWidgetItem()
pCell0.setFlags(QtCore.Qt.NoItemFlags) # not selectable
pCell0.setBackground( QtGui.QColor("lightgrey").lighter(110) )
pCell0.setForeground( QtGui.QColor("black") )
pCell0.setText(str("0"))
self.ui.layerTableWidget.setItem(0, 0, pCell0)
pCell1 = QtWidgets.QTableWidgetItem()
#pCell1.setFlags(QtCore.Qt.NoItemFlags) # not selectable
pCell1.setBackground( QtGui.QColor("lightblue") ) #.lighter(110) )
pCell1.setForeground( QtGui.QColor("black") )
self.ui.layerTableWidget.setItem(0, 1, pCell1)
pCell2 = QtWidgets.QTableWidgetItem()
#pCell1.setFlags(QtCore.Qt.NoItemFlags) # not selectable
pCell2.setBackground( QtGui.QColor("white") ) #.lighter(110) )
pCell2.setForeground( QtGui.QColor("black") )
self.ui.layerTableWidget.setItem(0, 2, pCell2)
for ir in range(1, self.ui.layerTableWidget.rowCount() ):
for ic in range(0, self.ui.layerTableWidget.columnCount() ):
pCell = QtWidgets.QTableWidgetItem()
#pCell.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
pCell.setFlags(QtCore.Qt.NoItemFlags) # not selectable
pCell.setBackground( QtGui.QColor("lightgrey").lighter(110) )
pCell.setForeground( QtGui.QColor("black"))
self.ui.layerTableWidget.setItem(ir, ic, pCell)
self.ui.layerTableWidget.cellChanged.connect(self.sigmaCellChanged)
def LCDHarmonics(self):
self.ui.lcdH1F.setEnabled(True)
self.ui.lcdH1F.display( self.ui.f0Spin.value() * self.ui.f0K1Spin.value() )
self.ui.lcdHNF.setEnabled(True)
self.ui.lcdHNF.display( self.ui.f0Spin.value() * self.ui.f0KNSpin.value() )
self.ui.lcdf0NK.setEnabled(True)
self.ui.lcdf0NK.display( (self.ui.f0KNSpin.value()+1-self.ui.f0K1Spin.value()) * self.ui.f0KsSpin.value() )
def LCDHarmonics2(self):
if self.ui.NHarmonicsFreqsSpin.value() == 2:
self.ui.lcdH1F2.setEnabled(True)
self.ui.lcdH1F2.display( self.ui.f1Spin.value() * self.ui.f1K1Spin.value() )
self.ui.lcdHNF2.setEnabled(True)
self.ui.lcdHNF2.display( self.ui.f1Spin.value() * self.ui.f1KNSpin.value() )
self.ui.lcdf0NK2.setEnabled(True)
self.ui.lcdf0NK2.display( (self.ui.f1KNSpin.value()+1-self.ui.f1K1Spin.value()) * self.ui.f1KsSpin.value() )
else:
self.ui.lcdH1F2.setEnabled(False)
self.ui.lcdHNF2.setEnabled(False)
self.ui.lcdf0NK2.setEnabled(False)
def adjustLarmor(self):
""" Triggers when the B0 intensity spin box is cycled
"""
self.ui.larmorv.setValue( self.ui.intensitySpinBox.value() * GAMMAH )
self.ui.txv.setValue( self.RAWDataProc.transFreq - self.ui.larmorv.value() )
def adjustB0(self):
""" Triggers when tx frequency offset is cycled
"""
self.ui.intensitySpinBox.setValue( (self.RAWDataProc.transFreq - self.ui.txv.value()) / GAMMAH )
def adjustB02(self):
""" Triggers when Larmor frequency spin box is cycled o
"""
self.ui.intensitySpinBox.setValue( (self.ui.larmorv.value()) / GAMMAH )
def closeTabs(self):
#self.ui.ProcTabs.removeTab(idx)
self.ui.ProcTabs.clear( )
def addPreProc(self):
if self.ui.actionPreprocessing.isChecked():
self.ui.actionModelling.setChecked(False)
self.ui.actionInversion.setChecked(False)
self.ui.ProcTabs.clear( )
self.ui.ProcTabs.insertTab( 0, self.ui.LoadTab, "Load" )
self.ui.ProcTabs.insertTab( 1, self.ui.NCTab, "Noise removal" )
self.ui.ProcTabs.insertTab( 2, self.ui.QCTab, "QC" )
self.ui.ProcTabs.insertTab( 3, self.ui.METATab, "META" )
self.ui.ProcTabs.insertTab( 4, self.ui.LogTab, "Log" )
else:
self.ui.ProcTabs.removeTab(0)
self.ui.ProcTabs.removeTab(0)
self.ui.ProcTabs.removeTab(0)
self.ui.ProcTabs.removeTab(0)
def addModelling(self):
if self.ui.actionModelling.isChecked():
self.ui.actionPreprocessing.setChecked(False)
self.ui.actionInversion.setChecked(False)
self.ui.ProcTabs.clear( )
self.ui.ProcTabs.insertTab( 0, self.ui.KernTab, "Kernel" )
self.ui.ProcTabs.insertTab( 1, self.ui.ModelTab, "Modelling" )
#self.ui.ProcTabs.insertTab( 2, self.ui.LogTab, "Log" )
else:
self.ui.ProcTabs.removeTab(0)
self.ui.ProcTabs.removeTab(0)
def addInversion(self, idx):
if self.ui.actionInversion.isChecked():
self.ui.actionPreprocessing.setChecked(False)
self.ui.actionModelling.setChecked(False)
self.ui.ProcTabs.clear( )
self.ui.ProcTabs.insertTab( 0, self.ui.InvertTab, "QT Inversion" )
self.ui.ProcTabs.insertTab( 1, self.ui.AppraiseTab, "Appraisal" )
#self.ui.ProcTabs.insertTab( 2, self.ui.LogTab, "Log" )
else:
self.ui.ProcTabs.removeTab(0)
self.ui.ProcTabs.removeTab(0)
def invDataSelect(self):
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
self.akvoDataFile = QtWidgets.QFileDialog.getOpenFileName(self, 'Select Datafile File', fpath, r"Akvo datafiles (*.akvoProcData *.yaml)")[0]
self.ui.dataText.clear()
self.ui.dataText.append( self.akvoDataFile )
def K0DataSelect(self):
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
self.K0akvoDataFile = QtWidgets.QFileDialog.getOpenFileName(self, 'Select Datafile File', fpath, r"Akvo datafiles (*.akvoProcData *.yaml)")[0]
# populate combo box with loops
if os.path.isfile( self.K0akvoDataFile ):
self.ui.K0DataText.clear()
self.ui.K0DataText.append( self.K0akvoDataFile )
with open(self.K0akvoDataFile) as f:
parse = yaml.load( f, Loader=yaml.Loader )
self.ui.txListWidget.clear()
self.ui.rxListWidget.clear()
for loop in parse.META["Loops"]:
print(loop)
self.ui.txListWidget.addItem( parse.META["Loops"][loop] )
self.ui.rxListWidget.addItem( parse.META["Loops"][loop] )
# TODO, why are these necessary
self.ui.txListWidget.setCurrentRow(0)
self.ui.rxListWidget.setCurrentRow(0)
# else do nothing
def invKernelSelect(self):
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
self.K0file = QtWidgets.QFileDialog.getOpenFileName(self, 'Select Kernel File', fpath, r"Akvo kernels (*.akvoK0)")[0]
self.ui.kernelText.clear()
self.ui.kernelText.append(self.K0file)
def QTInv(self):
print("Big RED INVERT BUTTON")
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
#K0file = self.ui.kernelText.text()
#akvoDataFile = self.ui.dataText.text()
T2lo = self.ui.T2low.value()
T2hi = self.ui.T2hi.value()
NT2 = self.ui.NT2.value()
dataChan = self.ui.invChan.currentText()
t2Obj = self.ui.T2Objective.currentText()
depthObj = self.ui.depthObjective.currentText()
alpha_0 = self.ui.initialAlpha.value()
invDict = dict()
invDict["data"] = dict()
invDict["data"] = dict()
invDict["data"][self.akvoDataFile] = dict()
invDict["data"][self.akvoDataFile]["channels"] = [dataChan,]
invDict["K0"] = [self.K0file,]
invDict["T2Bins"] = dict()
invDict["T2Bins"]["low"] = T2lo
invDict["T2Bins"]["high"] = T2hi
invDict["T2Bins"]["number"] = NT2
invDict["NonLinearRefinement"] = self.ui.NLButton.isChecked()
invDict["CalcDOI"] = self.ui.DOIButton.isChecked()
node = yaml.YAML()
kpo = open( "invert.yml", 'w' )
node.dump(invDict, kpo)
callBox = callScript( ) #QtWidgets.QDialog()
callBox.ui = Ui_callScript()
callBox.ui.setupUi( callBox )
callBox.setupQTInv( "invert.yml" )
callBox.exec_()
callBox.show()
def calcK0(self):
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
#self.K0akvoDataFile = QtWidgets.QFileDialog.getOpenFileName(self, 'Select Datafile File', fpath, r"Akvo datafiles (*.yaml)")[0]
#akvoData = QtWidgets.QFileDialog.getOpenFileName(self, 'Open Datafile File', fpath, r"Akvo datafiles (*.yaml)")[0]
txCoilList = self.ui.txListWidget.selectedItems() #currentItem().text()
txCoils = []
for txCoil in txCoilList:
print("txCoil", txCoil.text())
txCoils.append(txCoil.text())
rxCoilList = self.ui.txListWidget.selectedItems() #currentItem().text()
rxCoils = []
for rxCoil in rxCoilList:
print("rxCoil", rxCoil.text())
rxCoils.append(rxCoil.text())
saveStr = QtWidgets.QFileDialog.getSaveFileName(self, "Save kernel as", fpath, r"Merlin KernelV0 (*.akvoK0)")[0]
intDict = dict()
intDict["origin_n"] = self.ui.originN.value()
intDict["origin_e"] = self.ui.originE.value()
intDict["origin_d"] = self.ui.originD.value()
intDict["size_n"] = self.ui.sizeN.value()
intDict["size_e"] = self.ui.sizeE.value()
intDict["size_d"] = self.ui.sizeD.value()
intDict["nLay"] = self.ui.NLayers.value()
intDict["thick1"] = self.ui.thick1.value()
intDict["thickN"] = self.ui.thickN.value()
intDict["Lspacing"] = self.ui.layerSpacing.currentText()
intDict["minLevel"] = self.ui.minLevel.value()
intDict["maxLevel"] = self.ui.maxLevel.value()
intDict["branchTol"] = self.ui.branchTol.value()
intDict["txCoils"] = txCoils
intDict["rxCoils"] = rxCoils
# conductivity model...
#tops = self.ui.layerTableWidget.col(0)
#print("Tops", tops)
tops = []
itop = 0
while self.ui.layerTableWidget.item(itop, 0).text():
tops.append( float(self.ui.layerTableWidget.item(itop,0).text()) )
itop += 1
bots = []
ibot = 0
while self.ui.layerTableWidget.item(ibot, 1).text():
bots.append( float(self.ui.layerTableWidget.item(ibot, 1).text()) )
ibot += 1
sigs = []
isig = 0
while self.ui.layerTableWidget.item(isig, 2).text():
sigs.append( 1./float(self.ui.layerTableWidget.item(isig, 2).text()) )
isig += 1
intDict["tops"] = tops
intDict["bots"] = bots
intDict["sigs"] = sigs
node = yaml.YAML()
kpo = open( "kparams.yml", 'w' )
node.dump(intDict, kpo)
callBox = callScript( ) #QtWidgets.QDialog()
callBox.ui = Ui_callScript()
callBox.ui.setupUi( callBox )
callBox.setupCB( self.K0akvoDataFile, "kparams.yml", saveStr )
callBox.exec_()
callBox.show()
def loopAdd(self):
#print(self.ui.loopLabel.text())
#print(self.ui.loopGeom.currentText())
#print(self.ui.loopType.currentText())
#print( "label len", len(self.ui.loopLabel.text()) )
if len(self.ui.loopLabel.text().strip()) == 0:
Error = QtWidgets.QMessageBox()
Error.setWindowTitle("Error!")
Error.setText("Loop label cannot be blank or repeated")
Error.setDetailedText("Each loop label must be unique and comprise at least one character. Leading and trailing whitespace will be trimmed.")
Error.exec_()
else:
### Circular loop
if self.ui.loopGeom.currentText() == "Circular":
dialog = QtWidgets.QDialog()
dialog.ui = Ui_circularLoopAdd()
dialog.ui.setupUi(dialog)
dialog.exec_()
dialog.show()
if dialog.result():
cn = dialog.ui.centreNorth.value()
ce = dialog.ui.centreEast.value()
ht = dialog.ui.loopHeight.value()
rad = dialog.ui.loopRadius.value()
turns = dialog.ui.loopTurns.value()
ns = dialog.ui.segments.value()
cwise = dialog.ui.cwiseBox.currentIndex()
#print("cwise", cwise)
#dip = dialog.ui.dip.value()
#azimuth = dialog.ui.az.value()
self.loops[self.ui.loopLabel.text()] = FDEM1D.PolygonalWireAntenna()
self.loops[self.ui.loopLabel.text()].SetNumberOfPoints( dialog.ui.segments.value() + 1 )
self.loops[self.ui.loopLabel.text()].SetNumberOfTurns( dialog.ui.loopTurns.value() )
points = np.linspace(0, 2*np.pi, dialog.ui.segments.value()+1)
for iseg, ipt in enumerate(points):
if cwise == 0:
self.loops[self.ui.loopLabel.text()].SetPoint(iseg, ( cn+rad*np.sin(ipt), ce+rad*np.cos(ipt), ht) )
else:
self.loops[self.ui.loopLabel.text()].SetPoint(iseg, ( cn-rad*np.sin(ipt), ce+rad*np.cos(ipt), ht) )
self.loops[self.ui.loopLabel.text()].SetNumberOfFrequencies(1)
self.loops[self.ui.loopLabel.text()].SetCurrent(1.)
if self.ui.loopGeom.currentText() == "figure-8":
dialog = QtWidgets.QDialog()
dialog.ui = Ui_figure8LoopAdd()
dialog.ui.setupUi(dialog)
dialog.exec_()
dialog.show()
if dialog.result():
cn1 = dialog.ui.centreNorth1.value()
ce1 = dialog.ui.centreEast1.value()
cn2 = dialog.ui.centreNorth2.value()
ce2 = dialog.ui.centreEast2.value()
ht = dialog.ui.loopHeight.value()
rad = dialog.ui.loopRadius.value()
turns = dialog.ui.loopTurns.value()
ns = dialog.ui.segments.value()
cwise = dialog.ui.cwiseBox.currentIndex()
self.loops[self.ui.loopLabel.text()] = FDEM1D.PolygonalWireAntenna()
self.loops[self.ui.loopLabel.text()].SetNumberOfPoints( 2*dialog.ui.segments.value() + 1 )
self.loops[self.ui.loopLabel.text()].SetNumberOfTurns( dialog.ui.loopTurns.value() )
# first loop
points = np.linspace(0, 2*np.pi, dialog.ui.segments.value())
ptsL = []
for iseg, ipt in enumerate(points):
ptsL.append( np.array( [cn1+rad*np.sin(ipt), ce1+rad*np.cos(ipt)] ))
lenP = len(points)
# search for closest point, ugly and not efficient, but it's not critical here
closest = 1e8
iclosest = -1
for iseg, ipt in enumerate(points):
p2 = np.array([cn2-rad*np.sin(ipt), ce2-rad*np.cos(ipt)])
for p1 in ptsL:
dist = np.linalg.norm(p1-p2)
if dist < closest:
closest = dist
iclosest = iseg
points = np.concatenate([points[iclosest::],points[0:iclosest]])
# Fill first loop
point1 = False
for iseg, ipt in enumerate(points):
if cwise == 0:
self.loops[self.ui.loopLabel.text()].SetPoint(iseg, ( cn1+rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht) )
pointlast = ( cn1+rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht)
if not point1:
point1 = ( cn1+rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht)
else:
self.loops[self.ui.loopLabel.text()].SetPoint(iseg, ( cn1-rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht) )
if not point1:
point1 = ( cn1-rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht)
pointlast = ( cn1-rad*np.sin(ipt), ce1+rad*np.cos(ipt), ht)
lenP = len(points)
# reorder points again to find nearest point in second loop
closest = 99999
iclosest = -1
for iseg, ipt in enumerate(points):
if cwise == 0:
p2 = np.array([cn2-rad*np.sin(ipt), ce2+rad*np.cos(ipt)])
else:
p2 = np.array([cn2+rad*np.sin(ipt), ce2+rad*np.cos(ipt)])
for p1 in ptsL:
dist = np.linalg.norm(np.array(pointlast[0:2])-p2)
if dist < closest:
closest = dist
iclosest = iseg
points = np.concatenate([points[iclosest::],points[0:iclosest]])
# fill second loop
for iseg, ipt in enumerate(points):
if cwise == 0:
self.loops[self.ui.loopLabel.text()].SetPoint(lenP+iseg, ( cn2-rad*np.sin(ipt), ce2+rad*np.cos(ipt), ht) )
else:
self.loops[self.ui.loopLabel.text()].SetPoint(lenP+iseg, ( cn2+rad*np.sin(ipt), ce2+rad*np.cos(ipt), ht) )
# close loop
self.loops[self.ui.loopLabel.text()].SetPoint(lenP+iseg+1, point1)
self.loops[self.ui.loopLabel.text()].SetNumberOfFrequencies(1)
self.loops[self.ui.loopLabel.text()].SetCurrent(1.)
if self.ui.loopGeom.currentText() == "polygon":
dialog = QtWidgets.QDialog()
dialog.ui = Ui_polygonalLoopAdd()
dialog.ui.setupUi(dialog)
##########################################################################
# Loop Table
dialog.ui.loopTableWidget.setRowCount(80)
dialog.ui.loopTableWidget.setColumnCount(3)
#dialog.ui.loopTableWidget.horizontalHeader().setSectionResizeMode(0, QtGui.Qt.QHeaderView.Stretch)
dialog.ui.loopTableWidget.setHorizontalHeaderLabels( \
["Northing [m]","Easting [m]","Height [m]"])
for ir in range(0, dialog.ui.loopTableWidget.rowCount() ):
for ic in range(0, 3): #dialog.ui.loopTableWidget.columnCount() ):
pCell = QtWidgets.QTableWidgetItem()
#pCell.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable)
#pCell.setFlags(QtCore.Qt.NoItemFlags) # not selectable
#pCell.setBackground( QtGui.QColor("lightgrey").lighter(110) )
dialog.ui.loopTableWidget.setItem(ir, ic, pCell)
#dialog.ui.loopTableWidget.cellChanged.connect(self.loopCellChanged)
#dialog.ui.loopTableWidget.itemClicked.connect(self.loopCellClicked)
#self.ui.loopTableWidget.cellPressed.connect(self.loopCellChanged)
#self.ui.loopTableWidget.cellPressed.connect(self.loopCellClicked)
dialog.ui.loopTableWidget.setDragDropOverwriteMode(False)
dialog.ui.loopTableWidget.setDragEnabled(False)
#self.ui.loopTableWidget.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
dialog.ui.loopTableWidget.resizeColumnsToContents()
dialog.exec_()
dialog.show()
if dialog.result():
self.loops[self.ui.loopLabel.text()] = FDEM1D.PolygonalWireAntenna()
self.loops[self.ui.loopLabel.text()].SetNumberOfTurns( dialog.ui.loopTurns.value() )
npts = 0
for ir in range(0, dialog.ui.loopTableWidget.rowCount() ):
if len(dialog.ui.loopTableWidget.item(ir, 0).text()) == 0:
break
npts += 1
self.loops[self.ui.loopLabel.text()].SetNumberOfPoints( npts )
for ir in range( 0, npts ):
self.loops[self.ui.loopLabel.text()].SetPoint(ir, eval(dialog.ui.loopTableWidget.item(ir, 0).text()), \
eval(dialog.ui.loopTableWidget.item(ir, 1).text()), \
eval(dialog.ui.loopTableWidget.item(ir, 2).text()) \
)
self.loops[self.ui.loopLabel.text()].SetNumberOfFrequencies(1)
self.loops[self.ui.loopLabel.text()].SetCurrent(1.)
# general across all types
if dialog.result():
yml = open( self.ui.loopLabel.text() + ".pwa", 'w' )
print( self.loops[self.ui.loopLabel.text()], file=yml)
# update the table
self.ui.txRxTable.setRowCount( len(self.loops.keys()) )
pCell = QtWidgets.QTableWidgetItem()
pCell.setText( self.ui.loopLabel.text() )
pCell.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.txRxTable.setItem( len(self.loops.keys())-1, 0, pCell)
gCell = QtWidgets.QTableWidgetItem()
gCell.setText( self.ui.loopGeom.currentText() )
gCell.setFlags( QtCore.Qt.ItemIsEnabled )
self.ui.txRxTable.setItem( len(self.loops.keys())-1, 1, gCell)
tCell = QtWidgets.QTableWidgetItem()
tCell.setText( str(dialog.ui.loopTurns.value()) )
tCell.setFlags( QtCore.Qt.ItemIsEnabled )
self.ui.txRxTable.setItem( len(self.loops.keys())-1, 2, tCell)
txCell = QtWidgets.QTableWidgetItem()
txCell.setText( str(self.ui.loopType.currentText()) )
txCell.setFlags( QtCore.Qt.ItemIsEnabled )
self.ui.txRxTable.setItem( len(self.loops.keys())-1, 3, txCell)
def headerBoxShrink(self):
#self.ui.headerFileBox.setVisible(False)
if self.ui.headerFileBox.isChecked( ):
#self.ui.headerFileBox.setMinimumSize(460,250)
self.ui.headerBox2.setVisible(True)
else:
#self.ui.headerFileBox.setMinimumSize(460,50)
self.ui.headerBox2.setVisible(False)
def sigmaCellChanged(self):
self.ui.layerTableWidget.cellChanged.disconnect(self.sigmaCellChanged)
# TODO consider building the model whenever this is called. Would be nice to be able to
# do that. Would require instead dist of T2 I guess.
jj = self.ui.layerTableWidget.currentColumn()
ii = self.ui.layerTableWidget.currentRow()
val = "class 'NoneType'>"
try:
val = eval (str( self.ui.layerTableWidget.item(ii, jj).text() ))
except:
#if jj != 0:
# Error = QtWidgets.QMessageBox()
# Error.setWindowTitle("Error!")
# Error.setText("Non-numeric value encountered")
self.ui.layerTableWidget.cellChanged.connect(self.sigmaCellChanged)
return
if jj == 1:
#item.setFlags(QtCore.Qt.ItemIsEnabled)
pCell = self.ui.layerTableWidget.item(ii, jj)
pCell.setBackground( QtGui.QColor("white"))
pCell = self.ui.layerTableWidget.item(ii+1, jj-1)
if str(type(pCell)) == "<class 'NoneType'>":
pCell = QtWidgets.QTableWidgetItem()
pCell.setFlags(QtCore.Qt.ItemIsEnabled)
self.ui.layerTableWidget.setItem(ii+1, jj-1, pCell)
if ii == 0:
pCell.setText(str(val))
#pCell3 = self.ui.layerTableWidget.item(ii+1, jj)
#print ("setting", ii, jj, type(pCell3))
#print ( "setting", ii, jj, type(pCell3))
#pCell3.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled )
#pCell3.setFlags( QtCore.Qt.ItemIsEditable )
elif ii > 0:
val2 = eval (str( self.ui.layerTableWidget.item(ii-1, jj).text() ))
#print ("val2", val2, val, type(val))
#if str(type(pCell)) == "<class 'NoneType'>":
if type(val) == str or val > val2:
pCell.setText(str(val))
else:
Error = QtWidgets.QMessageBox()
Error.setWindowTitle("Error!")
Error.setText("Non-increasing layer detected")
Error.setDetailedText("Each layer interface must be below the one above it.")
Error.exec_()
#err_msg = "Quadrature detection has already been done!"
#reply =QtWidgets.QMessageBox.critical(self, 'Error',
# err_msg)
pCell2 = self.ui.layerTableWidget.item(ii, jj)
pCell2.setText(str(""))
self.ui.layerTableWidget.cellChanged.connect(self.sigmaCellChanged)
return
# enable next layer
pCell4 = self.ui.layerTableWidget.item(ii+1, jj)
pCell4.setBackground( QtGui.QColor("lightblue") ) #.lighter(110))
pCell4.setForeground( QtGui.QColor("black"))
pCell4.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled )
pCell5 = self.ui.layerTableWidget.item(ii+1, jj+1)
pCell5.setBackground( QtGui.QColor("white"))
pCell5.setForeground( QtGui.QColor("black"))
pCell5.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled )
if ii == 0 and jj == 0:
pCell = self.ui.layerTableWidget.item(0, 1)
pCell.setBackground(QtGui.QColor("lightblue")) #.lighter(110) )
pCell.setForeground( QtGui.QColor("black"))
pCell.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled )
self.ui.layerTableWidget.cellChanged.connect(self.sigmaCellChanged)
def plotLoops2(self):
self.ui.mplwidget.reAxH(1)
for loop in self.loops:
POINTS = self.loops[loop].GetPoints().T
self.ui.mplwidget.ax1.plot( POINTS[:,1], POINTS[:,0], label=loop )
self.ui.mplwidget.ax1.spines['right'].set_visible(False)
self.ui.mplwidget.ax1.spines['top'].set_visible(False)
self.ui.mplwidget.ax1.set_xlabel("easting (m)")
self.ui.mplwidget.ax1.set_ylabel("northing (m)")
self.ui.mplwidget.ax1.legend()
self.ui.mplwidget.ax1.set_aspect('equal') #, adjustable='box')
self.ui.mplwidget.draw()
def removeLoop(self):
del self.loops[ self.ui.txRxTable.item( self.ui.txRxTable.currentRow(), 0).text() ]
self.ui.txRxTable.removeRow(self.ui.txRxTable.currentRow())
def about(self):
# TODO proper popup with info
#self.w = MyPopup("""About Akvo \n
# Akvo is an open source project developed primarily by Trevor Irons.
#""")
#self.w.setGeometry(100, 100, 400, 200)
#self.w.show()
#print("ABOUT")
# Just a splash screen for now
logo = pkg_resources.resource_filename(__name__, 'akvo-about.png')
pixmap = QtGui.QPixmap(logo)
self.splash = QtWidgets.QSplashScreen(pixmap, QtCore.Qt.WindowStaysOnTopHint)
self.splash.show()
def connectGMRDataProcessor(self):
self.RAWDataProc = mrsurvey.GMRDataProcessor()
self.RAWDataProc.progressTrigger.connect(self.updateProgressBar)
self.RAWDataProc.enableDSPTrigger.connect(self.enableDSP)
self.RAWDataProc.doneTrigger.connect(self.doneStatus)
self.RAWDataProc.updateProcTrigger.connect(self.updateProc)
def loadMIDI2Dataset (self):
""" Opens a MIDI file and extracts header info
"""
try:
with open('.midi2.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
self.headerstr = QtWidgets.QFileDialog.getExistingDirectory(self, 'Load MIDI 2 Directory', fpath)
self.ui.headerFileTextBrowser.clear()
self.ui.headerFileTextBrowser.append(self.headerstr)
if len(self.headerstr) == 0:
return
# TODO, should rename this class, use MIDI path
self.connectGMRDataProcessor()
self.RAWDataProc.readMIDI2Header(str(self.headerstr))
# look in the directory for all the files
self.ui.loadDataPushButton.pressed.connect(self.loadMIDI2)
# If we got this far, enable all the widgets
self.ui.lcdNumberTauPulse1.setEnabled(True)
self.ui.lcdNumberNuTx.setEnabled(True)
self.ui.lcdNumberTuneuF.setEnabled(True)
self.ui.lcdNumberSampFreq.setEnabled(True)
self.ui.lcdNumberNQ.setEnabled(True)
self.ui.headerFileBox.setEnabled(True)
self.ui.headerFileBox.setChecked( True )
self.ui.headerBox2.setVisible(True)
self.ui.inputRAWParametersBox.setEnabled(True)
self.ui.loadDataPushButton.setEnabled(True)
# make plots as you import the dataset
self.ui.plotImportCheckBox.setEnabled(True)
self.ui.plotImportCheckBox.setChecked(True)
# Update info from the header into the GUI
self.ui.pulseTypeTextBrowser.clear()
self.ui.pulseTypeTextBrowser.append(self.RAWDataProc.pulseType)
self.ui.lcdNumberNuTx.display(self.RAWDataProc.transFreq)
self.ui.lcdNumberTauPulse1.display(1e3*self.RAWDataProc.pulseLength[0])
self.ui.lcdNumberTuneuF.display(self.RAWDataProc.TuneCapacitance)
self.ui.lcdNumberSampFreq.display(self.RAWDataProc.samp)
self.ui.lcdNumberNQ.display(self.RAWDataProc.nPulseMoments)
self.ui.DeadTimeSpinBox.setValue(1e3*self.RAWDataProc.deadTime)
self.ui.CentralVSpinBox.setValue( self.RAWDataProc.transFreq )
# set the B0 field according to Tx as an initial guess
self.ui.intensitySpinBox.setValue( self.RAWDataProc.transFreq/GAMMAH )
if self.RAWDataProc.pulseType != "FID":
self.ui.lcdNumberTauPulse2.setEnabled(1)
self.ui.lcdNumberTauPulse2.display(1e3*self.RAWDataProc.pulseLength[1])
self.ui.lcdNumberTauDelay.setEnabled(1)
self.ui.lcdNumberTauDelay.display(1e3*self.RAWDataProc.interpulseDelay)
self.ui.FIDProcComboBox.clear()
if self.RAWDataProc.pulseType == "4PhaseT1" or self.RAWDataProc.pulseType == "T1":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1")
self.ui.FIDProcComboBox.insertItem(1, "Pulse 2")
self.ui.FIDProcComboBox.insertItem(2, "Both")
self.ui.FIDProcComboBox.setCurrentIndex (1)
elif self.RAWDataProc.pulseType == "FID":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1")
self.ui.FIDProcComboBox.setCurrentIndex (0)
def openGMRRAWDataset(self):
""" Opens a GMR header file
"""
try:
with open('.gmr.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
self.headerstr = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', fpath)[0] # arg2 = File Type 'All Files (*)'
self.ui.headerFileTextBrowser.clear()
self.ui.headerFileTextBrowser.append(self.headerstr)
if len(self.headerstr) == 0:
return
# clear the processing log
self.ui.logTextBrowser.clear()
self.logText = [] #MAK 20170126
path,filen=os.path.split(str(self.headerstr))
f = open('.gmr.last.path', 'w')
f.write( str(self.headerstr) ) # prompt last file
self.connectGMRDataProcessor()
self.RAWDataProc.readHeaderFile(str(self.headerstr))
# make sure we will use GMR path
self.ui.loadDataPushButton.pressed.connect(self.loadRAW)
# If we got this far, enable all the widgets
self.ui.lcdNumberTauPulse1.setEnabled(True)
self.ui.lcdNumberNuTx.setEnabled(True)
self.ui.lcdNumberTuneuF.setEnabled(True)
self.ui.lcdNumberSampFreq.setEnabled(True)
self.ui.lcdNumberNQ.setEnabled(True)
self.ui.headerFileBox.setEnabled(True)
self.ui.headerFileBox.setChecked( True )
self.ui.headerBox2.setVisible(True)
self.ui.inputRAWParametersBox.setEnabled(True)
self.ui.loadDataPushButton.setEnabled(True)
# make plots as you import the dataset
self.ui.plotImportCheckBox.setEnabled(True)
self.ui.plotImportCheckBox.setChecked(True)
# Update info from the header into the GUI
self.ui.pulseTypeTextBrowser.clear()
self.ui.pulseTypeTextBrowser.append(self.RAWDataProc.pulseType)
self.ui.lcdNumberNuTx.display(self.RAWDataProc.transFreq)
self.ui.lcdNumberTauPulse1.display(1e3*self.RAWDataProc.pulseLength[0])
self.ui.lcdNumberTuneuF.display(self.RAWDataProc.TuneCapacitance)
self.ui.lcdNumberSampFreq.display(self.RAWDataProc.samp)
self.ui.lcdNumberNQ.display(self.RAWDataProc.nPulseMoments)
self.ui.DeadTimeSpinBox.setValue(1e3*self.RAWDataProc.deadTime)
self.ui.CentralVSpinBox.setValue( self.RAWDataProc.transFreq )
# set the B0 field according to Tx as an initial guess
self.ui.intensitySpinBox.setValue( self.RAWDataProc.transFreq/GAMMAH )
if self.RAWDataProc.pulseType != "FID":
self.ui.lcdNumberTauPulse2.setEnabled(1)
self.ui.lcdNumberTauPulse2.display(1e3*self.RAWDataProc.pulseLength[1])
self.ui.lcdNumberTauDelay.setEnabled(1)
self.ui.lcdNumberTauDelay.display(1e3*self.RAWDataProc.interpulseDelay)
self.ui.FIDProcComboBox.clear()
if self.RAWDataProc.pulseType == "4PhaseT1" or self.RAWDataProc.pulseType == "T1":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1")
self.ui.FIDProcComboBox.insertItem(1, "Pulse 2")
self.ui.FIDProcComboBox.insertItem(2, "Both")
self.ui.FIDProcComboBox.setCurrentIndex (1)
elif self.RAWDataProc.pulseType == "FID":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1")
self.ui.FIDProcComboBox.setCurrentIndex (0)
def ExportPreprocess(self):
""" This method exports to YAML
"""
try:
with open('.akvo.last.yaml.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
fdir = os.path.dirname(fpath)
# Pickle the preprocessed data dictionary
SaveStr = QtWidgets.QFileDialog.getSaveFileName(self, "Save as", fdir, r"Processed data (*.akvoProcData)")[0]
spath,filen=os.path.split(str(SaveStr))
f = open('.akvo.last.yaml.path', 'w')
f.write( str(spath) ) # prompt last file
INFO = {}
INFO["headerstr"] = str(self.headerstr)
INFO["pulseType"] = self.RAWDataProc.pulseType
INFO["transFreq"] = np.array(self.RAWDataProc.transFreq).tolist() # numpy is for MIDI datasets
INFO["pulseLength"] = np.array(self.RAWDataProc.pulseLength).tolist()
INFO["TuneCapacitance"] = np.array(self.RAWDataProc.TuneCapacitance).tolist()
#INFO["samp"] = self.RAWDataProc.samp
INFO["nPulseMoments"] = self.RAWDataProc.nPulseMoments
#INFO["deadTime"] = self.RAWDataProc.deadTime
INFO["processed"] = "Akvo v" + VERSION + ", on " + time.strftime("%d/%m/%Y")
# Pulse current info
ip = 0
INFO["Pulses"] = {}
for pulse in self.RAWDataProc.DATADICT["PULSES"]:
qq = []
qv = []
for ipm in range(self.RAWDataProc.DATADICT["nPulseMoments"]):
#for ipm in self.pulseMoments:
#for istack in self.RAWDataProc.DATADICT["stacks"]:
# print ("stack q", self.RAWDataProc.DATADICT[pulse]["Q"][ipm,istack-1])
qq.append(np.mean( self.RAWDataProc.DATADICT[pulse]["Q"][ipm,:]) )
qv.append(np.std( self.RAWDataProc.DATADICT[pulse]["Q"][ipm,:]/self.RAWDataProc.pulseLength[ip] ))
qq = np.array(qq)
qv = np.array(qv)
iQ = np.argsort(np.array(qq))
qq = np.array(qq)[iQ]
qv = np.array(qv)[iQ]
INFO["Pulses"][pulse] = {}
INFO["Pulses"][pulse]["units"] = "A"
INFO["Pulses"][pulse]["current"] = VectorXr(qq/self.RAWDataProc.pulseLength[ip])
INFO["Pulses"][pulse]["variance"] = VectorXr(qv)
ip += 1
# Data
if self.RAWDataProc.gated == True:
INFO["Gated"] = {}
INFO["Gated"]["abscissa units"] = "ms"
INFO["Gated"]["data units"] = "nT"
for pulse in self.RAWDataProc.DATADICT["PULSES"]:
INFO["Gated"][pulse] = {}
INFO["Gated"][pulse]["abscissa"] = VectorXr( self.RAWDataProc.GATEDABSCISSA )
INFO["Gated"][pulse]["windows"] = VectorXr( self.RAWDataProc.GATEDWINDOW )
for ichan in self.RAWDataProc.DATADICT[pulse]["chan"]:
INFO["Gated"][pulse]["Chan. " + str(ichan)] = {}
#INFO["Gated"][pulse]["Chan. " + str(ichan)]["STD"] = VectorXr( np.std(self.RAWDataProc.GATED[ichan]["NR"], axis=0) )
INFO["Gated"][pulse]["Chan. " + str(ichan)]["STD"] = VectorXr( np.average(self.RAWDataProc.GATED[ichan]["BN"], axis=0) )
for ipm in range(self.RAWDataProc.DATADICT["nPulseMoments"]):
#for ipm in self.pulseMoments:
INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " CA"] = VectorXr(self.RAWDataProc.GATED[ichan]["CA"][ipm])
INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " RE"] = VectorXr(self.RAWDataProc.GATED[ichan]["RE"][ipm])
INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " IM"] = VectorXr(self.RAWDataProc.GATED[ichan]["IM"][ipm])
#INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " IP"] = VectorXr(self.RAWDataProc.GATED[ichan]["IP"][ipm])
#INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " NR"] = VectorXr(self.RAWDataProc.GATED[ichan]["NR"][ipm])
#INFO["Gated"][pulse]["Chan. " + str(ichan)]["Q-"+str(ipm) + " STD" ] = VectorXr(self.RAWDataProc.GATED[ichan]["SIGMA"][ipm])
# we have gated data
# Window edges
# Window centres
with open(SaveStr, 'w') as outfile:
#for line in self.logText:
# outfile.write(line+"\n")
yaml.dump(self.YamlNode, outfile)
yaml.dump(INFO, outfile) #, default_flow_style=False)
def SavePreprocess(self):
#if "Saved" not in self.YamlNode.Processing.keys():
# self.YamlNode.Processing["Saved"] = []
#self.YamlNode.Processing["Saved"].append(datetime.datetime.now().isoformat())
#self.Log()
import pickle, os
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
fdir = os.path.dirname(fpath)
# Pickle the preprocessed data dictionary
SaveStr = QtWidgets.QFileDialog.getSaveFileName(self, "Save as", fdir, r"Pickle (*.dmp)")
spath,filen=os.path.split(str(SaveStr[0]))
f = open('.akvo.last.path', 'w')
f.write( str(spath) ) # prompt last file
save = open(SaveStr[0], 'wb')
# Add some extra info
INFO = {}
INFO["pulseType"] = self.RAWDataProc.pulseType
INFO["prePulseDelay"] = self.RAWDataProc.prePulseDelay
INFO["interpulseDelay"] = self.RAWDataProc.interpulseDelay
INFO["transFreq"] = self.RAWDataProc.transFreq
INFO["pulseLength"] = self.RAWDataProc.pulseLength
INFO["TuneCapacitance"] = self.RAWDataProc.TuneCapacitance
INFO["samp"] = self.RAWDataProc.samp
INFO["nPulseMoments"] = self.RAWDataProc.nPulseMoments
INFO["deadTime"] = self.RAWDataProc.deadTime
INFO["transFreq"] = self.RAWDataProc.transFreq
INFO["headerstr"] = str(self.headerstr)
INFO["nDAQVersion"] = self.RAWDataProc.nDAQVersion
INFO["log"] = yaml.dump( self.YamlNode )
# 1.6.4 and on
INFO["Instrument"] = self.RAWDataProc.Instrument
if self.RAWDataProc.Instrument == "MIDI 2":
INFO["MIDIGain"] = self.RAWDataProc.MIDIGain
INFO["datadir"] = self.RAWDataProc.datadir
TXRX = []
for ir in range(0, self.ui.txRxTable.rowCount() ):
txrx = []
for ic in range(0, self.ui.txRxTable.columnCount() ):
txrx.append( self.ui.txRxTable.item(ir, ic).text() )
TXRX.append(txrx)
INFO["TXRX"] = TXRX
if "Stacking" in self.YamlNode.Stacking.keys():
INFO["sigma"] = self.RawDataProc.sigma
if "Gate integrate" in self.YamlNode.Stacking.keys():
INFO["GATED"] = self.RAWDataProc.GATED
#print("META SAVE")
#print("INFO log", INFO["log"])
self.RAWDataProc.DATADICT["INFO"] = INFO
pickle.dump(self.RAWDataProc.DATADICT, save)
#pickle.dump(self.RAWDataProc, save) # doesn't work :-(
save.close()
# Export XML file suitable for USGS ScienceBase Data Release
def ExportXML(self):
""" This is a filler function for use by USGS collaborators
"""
return 42
def OpenPreprocess(self):
import pickle
try:
with open('.akvo.last.path') as f:
fpath = f.readline()
pass
except IOError as e:
fpath = '.'
#filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', '.')
fpath = QtWidgets.QFileDialog.getOpenFileName(self, 'Open preprocessed file', fpath, r"Pickle Files (*.dmp)")[0]
f = open('.akvo.last.path', 'w')
f.write( str(fpath) ) # prompt last file
self.ui.logTextBrowser.clear()
self.logText = []
if len(fpath) == 0:
return
pfile = open(fpath,'rb')
unpickle = pickle.Unpickler(pfile)
self.connectGMRDataProcessor()
self.RAWDataProc.DATADICT = unpickle.load()
# This line causes Akvo to crash, if the header file is no longer there. We don't need to load the
# file. TODO, need to disable "Load Data" in Load command though, as that is no longer possible.
#self.RAWDataProc.readHeaderFile(self.RAWDataProc.DATADICT["INFO"]["headerstr"])
self.headerstr = self.RAWDataProc.DATADICT["INFO"]["headerstr"]
self.RAWDataProc.pulseType = self.RAWDataProc.DATADICT["INFO"]["pulseType"]
self.RAWDataProc.transFreq = self.RAWDataProc.DATADICT["INFO"]["transFreq"]
self.RAWDataProc.pulseLength = self.RAWDataProc.DATADICT["INFO"]["pulseLength"]
self.RAWDataProc.TuneCapacitance = self.RAWDataProc.DATADICT["INFO"]["TuneCapacitance"]
self.RAWDataProc.samp = self.RAWDataProc.DATADICT["INFO"]["samp"]
self.RAWDataProc.nPulseMoments = self.RAWDataProc.DATADICT["INFO"]["nPulseMoments"]
self.RAWDataProc.deadTime = self.RAWDataProc.DATADICT["INFO"]["deadTime"]
self.RAWDataProc.transFreq = self.RAWDataProc.DATADICT["INFO"]["transFreq"]
self.RAWDataProc.nDAQVersion = self.RAWDataProc.DATADICT["INFO"]["nDAQVersion"]
#self.RAWDataProc.prePulseDelay = self.RAWDataProc.DATADICT["INFO"]["prePulseDelay"]
self.RAWDataProc.dt = 1./self.RAWDataProc.samp
self.RAWDataProc.Instrument = self.RAWDataProc.DATADICT["INFO"]["Instrument"]
if self.RAWDataProc.DATADICT["INFO"]["Instrument"] == "MIDI 2":
self.RAWDataProc.Instrument = "MIDI 2"
self.RAWDataProc.MIDIGain = self.RAWDataProc.DATADICT["INFO"]["MIDIGain"]
self.RAWDataProc.datadir = self.RAWDataProc.DATADICT["INFO"]["datadir"]
self.dataChan = self.RAWDataProc.DATADICT[ self.RAWDataProc.DATADICT["PULSES"][0] ]["chan"]
# Keep backwards compatibility with prior saved pickles???
#self.ui.logTextBrowser.clear()
#self.ui.logTextBrowser.append( yaml.dump(self.YamlNode)) #, default_flow_style=False) )
#for a in self.logText:
# self.ui.logTextBrowser.append(str(a))
#self.ui.logTextBrowser
#self.ui.logTextBrowser.clear()
#print ( self.RAWDataProc.DATADICT["INFO"]["log"] )
if "TXRX" in self.RAWDataProc.DATADICT["INFO"].keys():
TXRX = self.RAWDataProc.DATADICT["INFO"]["TXRX"]
self.ui.txRxTable.setRowCount( len(TXRX))
for irow, row in enumerate(TXRX):
for icol, val in enumerate(row):
pCell = QtWidgets.QTableWidgetItem()
pCell.setText( val )
pCell.setFlags( QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled )
self.ui.txRxTable.setItem(irow, icol, pCell)
self.logText = self.RAWDataProc.DATADICT["INFO"]["log"] # YAML
parse = yaml.load( self.logText, Loader=yaml.Loader )
self.YamlNode = AkvoYamlNode( ) #self.logText )
self.YamlNode.Akvo_VERSION = (yaml.load( self.logText, Loader=yaml.Loader )).Akvo_VERSION
AKVO_VERSION = np.array(self.YamlNode.Akvo_VERSION.split("."), dtype=int)
if AKVO_VERSION[0] >= 1 and AKVO_VERSION[1] >= 2 and AKVO_VERSION[2] >= 3:
self.RAWDataProc.interpulseDelay = self.RAWDataProc.DATADICT["INFO"]["interpulseDelay"]
self.YamlNode.Import = OrderedDict(parse.Import)
self.YamlNode.Processing = list(parse.Processing)
self.YamlNode.Stacking = OrderedDict(parse.Stacking)
self.YamlNode.META = OrderedDict(parse.META)
self.logGUI()
self.Log()
#self.ui.logTextBrowser.append( yaml.dump(self.YamlNode)) #, default_flow_style=False) )
#except KeyError:
# pass
# Remove "Saved" and "Loaded" from processing flow
#if "Loaded" not in self.YamlNode.Processing.keys():
# self.YamlNode.Processing["Loaded"] = []
#self.YamlNode.Processing["Loaded"].append(datetime.datetime.now().isoformat())
#self.Log()
if "Stacking" in self.YamlNode.Stacking.keys():
self.RAWDataProc.sigma = self.RAWDataProc.DATADICT["sigma"]
# check to see if gate integration has been done, and it's ready to send to Merlin
if "Gate integrate" in self.YamlNode.Stacking.keys():
self.RAWDataProc.GATED = self.RAWDataProc.DATADICT["GATED"]
self.ui.actionExport_Preprocessed_Dataset.setEnabled(True)
#self.ui.plotGI.setEnabled(True)
# If we got this far, enable all the widgets
self.ui.lcdNumberTauPulse1.setEnabled(True)
self.ui.lcdNumberNuTx.setEnabled(True)
self.ui.lcdNumberTuneuF.setEnabled(True)
self.ui.lcdNumberSampFreq.setEnabled(True)
self.ui.lcdNumberNQ.setEnabled(True)
self.ui.headerFileBox.setEnabled(True)
self.ui.headerFileBox.setChecked( True )
self.headerBoxShrink()
#self.ui.headerBox2.setVisible(True)
self.ui.inputRAWParametersBox.setEnabled(False)
self.ui.loadDataPushButton.setEnabled(True)
# make plots as you import the datasetmost
self.ui.plotImportCheckBox.setEnabled(True)
self.ui.plotImportCheckBox.setChecked(True)
# enable the LCDs
self.ui.lcdNumberFID1Length.setEnabled(1)
self.ui.lcdNumberFID2Length.setEnabled(1)
self.ui.lcdNumberResampFreq.setEnabled(1)
self.ui.lcdTotalDeadTime.setEnabled(1)
# enable META tab
self.ui.METATab.setEnabled(1)
self.ui.siteBox.setEnabled(1)
#self.ui.lcdTotalDeadTime.display( 1e3*self.RAWDataProc.DATADICT["INFO"]["deadTime"] )
self.ui.headerFileTextBrowser.clear( )
self.ui.headerFileTextBrowser.append( self.RAWDataProc.DATADICT["INFO"]["headerstr"] )
if u"Pulse 1" in self.RAWDataProc.DATADICT.keys():
self.ui.lcdNumberFID1Length.display(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0])
self.ui.lcdTotalDeadTime.display( round(1e3*(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0]-self.RAWDataProc.DATADICT["Pulse 1"]["PULSE_TIMES"][-1]), 3) )
#print("CALC DEAD", (1e3*(self.RAWDataProc.prePulseDelay))) # - (self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0]-self.RAWDataProc.DATADICT["Pulse 1"]["PULSE_TIMES"][-1])), 3) )
if u"Pulse 2" in self.RAWDataProc.DATADICT.keys():
self.ui.lcdNumberFID1Length.display(self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][0])
self.ui.lcdTotalDeadTime.display( 1e3 * (self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][0]-self.RAWDataProc.DATADICT["Pulse 2"]["PULSE_TIMES"][-1]) )
# Update info from the header into the GUI
self.ui.pulseTypeTextBrowser.clear()
self.ui.pulseTypeTextBrowser.append(self.RAWDataProc.pulseType)
self.ui.lcdNumberNuTx.display(self.RAWDataProc.transFreq)
self.ui.lcdNumberTauPulse1.display(1e3*self.RAWDataProc.pulseLength[0])
self.ui.lcdNumberTuneuF.display(self.RAWDataProc.TuneCapacitance)
self.ui.lcdNumberResampFreq.display(self.RAWDataProc.samp)
self.ui.lcdNumberSampFreq.display(50000) # TODO, if non GMR is supported, query
self.ui.lcdNumberNQ.display(self.RAWDataProc.nPulseMoments)
self.ui.DeadTimeSpinBox.setValue(1e3*self.RAWDataProc.deadTime)
self.ui.CentralVSpinBox.setValue( self.RAWDataProc.transFreq )
if self.RAWDataProc.pulseType != "FID":
self.ui.lcdNumberTauPulse2.setEnabled(1)
self.ui.lcdNumberTauPulse2.display(1e3*self.RAWDataProc.pulseLength[1])
self.ui.lcdNumberTauDelay.setEnabled(1)
self.ui.lcdNumberTauDelay.display(1e3*self.RAWDataProc.interpulseDelay)
self.ui.FIDProcComboBox.clear()
if self.RAWDataProc.pulseType == "4PhaseT1" or self.RAWDataProc.pulseType == "T1":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1") #, const QVariant & userData = QVariant() )
self.ui.FIDProcComboBox.insertItem(1, "Pulse 2") #, const QVariant & userData = QVariant() )
self.ui.FIDProcComboBox.insertItem(2, "Both") #, const QVariant & userData = QVariant() )
if len( self.RAWDataProc.DATADICT["PULSES"]) == 2:
self.ui.FIDProcComboBox.setCurrentIndex (2)
elif self.RAWDataProc.DATADICT["PULSES"][0] == "Pulse 1":
self.ui.FIDProcComboBox.setCurrentIndex (0)
else:
self.ui.FIDProcComboBox.setCurrentIndex (1)
elif self.RAWDataProc.pulseType == "FID":
self.ui.FIDProcComboBox.insertItem(0, "Pulse 1") #, const QVariant & userData = QVariant() )
self.ui.FIDProcComboBox.setCurrentIndex (0)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("updateProgress(int)"), self.updateProgressBar)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("enableDSP()"), self.enableDSP)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("doneStatus()"), self.doneStatus)
self.RAWDataProc.progressTrigger.connect(self.updateProgressBar)
self.RAWDataProc.enableDSPTrigger.connect(self.enableDSP)
self.RAWDataProc.doneTrigger.connect(self.doneStatus)
self.enableAll()
def loadMIDI2(self):
#################################################
# Check to make sure we are ready to process
# Header
if self.RAWDataProc == None:
err_msg = "You need to load a header first."
reply = QtWidgets.QMessageBox.critical(self, 'Error',
err_msg) #, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
return
# Stacks
try:
self.procStacks = np.array(eval(str("np.r_["+self.ui.stacksLineEdit.text())+"]"))
except:
err_msg = "You need to set your stacks correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of stack indices. For example 1:24 or 1:4,8:24"
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
return
######################
# Qs
#####################
#print("text", self.ui.QLineEdit.text())
if self.ui.QLineEdit.text() == "":
self.pulseMoments = [-1]
print("Setting pulse moments to [-1]")
else:
try:
self.pulseMoments = np.array(eval(str("np.r_["+self.ui.QLineEdit.text())+"]"))
except:
err_msg = "You need to set your pulse moments correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of stack indices. For example 1:24 or 1:4,8:24"
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
###################
# Data Channels
###################
try:
self.dataChan = np.array(eval(str("np.r_["+self.ui.dataChanLineEdit.text())+"]"))
except:
#QMessageBox messageBox;
#messageBox.critical(0,"Error","An error has occured !");
#messageBox.setFixedSize(500,200);
#quit_msg = "Are you sure you want to exit the program?"
#reply = QtWidgets.QMessageBox.question(self, 'Message',
# quit_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
err_msg = "You need to set your data channels correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of indices. For example 1 or 1:3 or 1:3 5\n\n" + \
"valid GMR data channels fall between 1 and 8. Note that\n" +\
"1:3 is not inclusive of 3 and is the same as 1,2 "
reply = QtWidgets.QMessageBox.critical(self, 'Error',
err_msg) #, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
return
#############################
# Reference Channels
# TODO make sure no overlap between data and ref channels
self.refChan = np.array( () )
if str(self.ui.refChanLineEdit.text()): # != "none":
try:
self.refChan = np.array(eval(str("np.r_["+self.ui.refChanLineEdit.text())+"]"))
except:
err_msg = "You need to set your reference channels correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of indices. For example 1 or 1:3 or 1:3 5\n\n" + \
"valid GMR data channels fall between 1 and 8. Note that\n" +\
"1:3 is not inclusive of 3 and is the same as 1,2 "
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
return
#####################################################
# Load data
self.lock("loading MIDI 2 dataset")
self.procThread = thread.start_new_thread(self.RAWDataProc.loadMIDI2, \
(str(self.headerstr), self.procStacks, self.dataChan, self.refChan, \
str(self.ui.FIDProcComboBox.currentText()), self.ui.mplwidget, \
1e-3 * self.ui.DeadTimeSpinBox.value( ), self.pulseMoments,
self.ui.plotImportCheckBox.isChecked() )) #, self))
self.YamlNode.Import["MIDI Header"] = self.headerstr
self.YamlNode.Import["opened"] = datetime.datetime.now().isoformat()
self.YamlNode.Import["pulse Type"] = str(self.RAWDataProc.pulseType)
self.YamlNode.Import["stacks"] = self.procStacks.tolist()
self.YamlNode.Import["data channels"] = self.dataChan.tolist()
self.YamlNode.Import["reference channels"] = self.refChan.tolist()
self.YamlNode.Import["pulse records"] = str(self.ui.FIDProcComboBox.currentText())
self.YamlNode.Import["instrument dead time"] = (1e-3 * self.ui.DeadTimeSpinBox.value( ))
self.Log ( )
# enable META tab
self.ui.METATab.setEnabled(1)
self.ui.siteBox.setEnabled(1)
# should be already done
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("updateProgress(int)"), self.updateProgressBar)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("enableDSP()"), self.enableDSP)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("doneStatus()"), self.doneStatus)
#self.ui.ProcessedBox.setEnabled(True)
self.ui.lcdNumberFID1Length.setEnabled(1)
self.ui.lcdNumberFID2Length.setEnabled(1)
self.ui.lcdNumberResampFreq.setEnabled(1)
self.ui.lcdTotalDeadTime.setEnabled(1)
self.ui.lcdTotalDeadTime.display( self.ui.DeadTimeSpinBox.value( ) )
#self.ui.lcdTotalDeadTime.display( round(1e3*(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0]-self.RAWDataProc.DATADICT["Pulse 1"]["PULSE_TIMES"][-1]), 3) )
#self.ui.lcdNumberFID1Length.display(0)
#self.ui.lcdNumberFID2Length.display(0)
#self.ui.lcdNumberResampFreq.display( self.RAWDataProc.samp )
self.mpl_toolbar = NavigationToolbar2QT(self.ui.mplwidget, self.ui.mplwidget)
self.ui.mplwidget.draw()
def loadRAW(self):
#################################################
# Check to make sure we are ready to process
# Header
if self.RAWDataProc == None:
err_msg = "You need to load a header first."
reply = QtWidgets.QMessageBox.critical(self, 'Error',
err_msg) #, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
return
# Stacks
try:
self.procStacks = np.array(eval(str("np.r_["+self.ui.stacksLineEdit.text())+"]"))
except:
err_msg = "You need to set your stacks correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of stack indices. For example 1:24 or 1:4,8:24"
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
return
# Qs
#print("pulse moment text", len(self.ui.QLineEdit.text()))
if self.ui.QLineEdit.text() == "":
self.pulseMoments = [-1]
else:
try:
self.pulseMoments = np.array(eval(str("np.r_["+self.ui.QLineEdit.text())+"]"))
except:
err_msg = "You need to set your pulse moments correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of stack indices. For example 1:24 or 1:4,8:25"
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
# Data Channels
#Chan = np.arange(0,9,1)
try:
self.dataChan = np.array(eval(str("np.r_["+self.ui.dataChanLineEdit.text())+"]"))
except:
#QMessageBox messageBox;
#messageBox.critical(0,"Error","An error has occured !");
#messageBox.setFixedSize(500,200);
#quit_msg = "Are you sure you want to exit the program?"
#reply = QtWidgets.QMessageBox.question(self, 'Message',
# quit_msg, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
err_msg = "You need to set your data channels correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of indices. For example 1 or 1:3 or 1:3 5\n\n" + \
"valid GMR data channels fall between 1 and 8. Note that\n" +\
"1:3 is not inclusive of 3 and is the same as 1,2 "
reply = QtWidgets.QMessageBox.critical(self, 'Error',
err_msg) #, QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
return
#############################
# Reference Channels
# TODO make sure no overlap between data and ref channels
self.refChan = np.array( () )
if str(self.ui.refChanLineEdit.text()): # != "none":
try:
self.refChan = np.array(eval(str("np.r_["+self.ui.refChanLineEdit.text())+"]"))
except:
err_msg = "You need to set your reference channels correctly.\n" + \
"This should be a Python Numpy interpretable list\n" + \
"of indices. For example 1 or 1:3 or 1:3 5\n\n" + \
"valid GMR data channels fall between 1 and 8. Note that\n" +\
"1:3 is not inclusive of 3 and is the same as 1,2 "
QtWidgets.QMessageBox.critical(self, 'Error', err_msg)
return
#####################################################
# Load data
self.lock("loading RAW GMR dataset")
if self.RAWDataProc.pulseType == "FID":
self.procThread = thread.start_new_thread(self.RAWDataProc.loadFIDData, \
(str(self.headerstr), self.procStacks, self.dataChan, self.refChan, \
str(self.ui.FIDProcComboBox.currentText()), self.ui.mplwidget, \
1e-3 * self.ui.DeadTimeSpinBox.value( ),
self.ui.plotImportCheckBox.isChecked() )) #, self))
elif self.RAWDataProc.pulseType == "4PhaseT1":
self.procThread = thread.start_new_thread(self.RAWDataProc.load4PhaseT1Data, \
(str(self.headerstr), self.procStacks, self.dataChan, self.refChan, \
str(self.ui.FIDProcComboBox.currentText()), self.ui.mplwidget, \
1e-3 * self.ui.DeadTimeSpinBox.value( ), self.ui.plotImportCheckBox.isChecked() )) #, self))
elif self.RAWDataProc.pulseType == "T1":
self.procThread = thread.start_new_thread(self.RAWDataProc.loadT1Data, \
(str(self.headerstr), self.procStacks, self.dataChan, self.refChan, \
str(self.ui.FIDProcComboBox.currentText()), self.ui.mplwidget, \
1e-3 * self.ui.DeadTimeSpinBox.value( ), self.ui.plotImportCheckBox.isChecked() )) #, self))
#self.procThread = thread.start_new_thread(self.RAWDataProc.load4PhaseT1Data, \
# (str(self.headerstr), self.procStacks, self.dataChan, self.refChan, \
# str(self.ui.FIDProcComboBox.currentText()), self.ui.mplwidget, \
# 1e-3 * self.ui.DeadTimeSpinBox.value( ), self.ui.plotImportCheckBox.isChecked() )) #, self))
self.YamlNode.Import["GMR Header"] = self.headerstr
self.YamlNode.Import["opened"] = datetime.datetime.now().isoformat()
self.YamlNode.Import["pulse Type"] = str(self.RAWDataProc.pulseType)
self.YamlNode.Import["stacks"] = self.procStacks.tolist()
self.YamlNode.Import["data channels"] = self.dataChan.tolist()
self.YamlNode.Import["reference channels"] = self.refChan.tolist()
self.YamlNode.Import["pulse records"] = str(self.ui.FIDProcComboBox.currentText())
self.YamlNode.Import["instrument dead time"] = (1e-3 * self.ui.DeadTimeSpinBox.value( ))
self.Log ( )
# enable META tab
self.ui.METATab.setEnabled(1)
self.ui.siteBox.setEnabled(1)
# should be already done
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("updateProgress(int)"), self.updateProgressBar)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("enableDSP()"), self.enableDSP)
# QtCore.QObject.connect(self.RAWDataProc, QtCore.SIGNAL("doneStatus()"), self.doneStatus)
#self.ui.ProcessedBox.setEnabled(True)
self.ui.lcdNumberFID1Length.setEnabled(1)
self.ui.lcdNumberFID2Length.setEnabled(1)
self.ui.lcdNumberResampFreq.setEnabled(1)
self.ui.lcdTotalDeadTime.setEnabled(1)
self.ui.lcdTotalDeadTime.display( self.ui.DeadTimeSpinBox.value( ) )
#self.ui.lcdTotalDeadTime.display( round(1e3*(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0]-self.RAWDataProc.DATADICT["Pulse 1"]["PULSE_TIMES"][-1]), 3) )
#self.ui.lcdNumberFID1Length.display(0)
#self.ui.lcdNumberFID2Length.display(0)
#self.ui.lcdNumberResampFreq.display( self.RAWDataProc.samp )
self.mpl_toolbar = NavigationToolbar2QT(self.ui.mplwidget, self.ui.mplwidget)
self.ui.mplwidget.draw()
def Log(self):
#for line in yaml.dump(self.YamlNode, default_flow_style=False):
#for line in nlogText:
# self.ui.logTextBrowser.append( line )
# self.logText.append( line )
self.ui.logTextBrowser.clear()
self.ui.logTextBrowser.append( yaml.dump(self.YamlNode ))
def disable(self):
self.ui.inputRAWParametersBox.setEnabled(False)
self.ui.BandPassBox.setEnabled(False)
self.ui.downSampleGroupBox.setEnabled(False)
self.ui.windowFilterGroupBox.setEnabled(False)
self.ui.harmonicBox.setEnabled(False)
# self.ui.despikeGroupBox.setEnabled(False)
self.ui.adaptBox.setEnabled(False)
self.ui.adaptFDBox.setEnabled(False)
self.ui.qCalcGroupBox.setEnabled(False)
self.ui.FDSmartStackGroupBox.setEnabled(False)
self.ui.sumDataBox.setEnabled(False)
self.ui.qdGroupBox.setEnabled(False)
self.ui.gateBox.setEnabled(False)
def enableAll(self):
self.enableDSP()
self.enableQC()
def enableDSP(self):
# Bandpass filter
self.ui.BandPassBox.setEnabled(True)
self.ui.BandPassBox.setChecked(True)
self.ui.bandPassGO.setEnabled(False) # need to design first
self.ui.plotBP.setEnabled(True)
self.ui.plotBP.setChecked(True)
# downsample
self.ui.downSampleGroupBox.setEnabled(True)
self.ui.downSampleGroupBox.setChecked(True)
# window
self.ui.windowFilterGroupBox.setEnabled(True)
self.ui.windowFilterGroupBox.setChecked(True)
# Despike
# self.ui.despikeGroupBox.setEnabled(True)
# self.ui.despikeGroupBox.setChecked(False)
# Adaptive filtering
self.ui.adaptBox.setEnabled(True)
self.ui.adaptBox.setChecked(True)
self.ui.plotRLS.setEnabled(True)
# FD Adaptive filtering
self.ui.adaptFDBox.setEnabled(True)
self.ui.adaptFDBox.setChecked(False)
# Harmonic
self.ui.harmonicBox.setEnabled(True)
self.ui.harmonicBox.setChecked(True)
self.LCDHarmonics()
self.LCDHarmonics2()
# sum group box
try:
if len(self.dataChan) > 1:
self.ui.sumDataBox.setEnabled(True)
self.ui.sumDataBox.setChecked(False)
except:
pass
# Quadrature Detect
self.ui.qdGroupBox.setEnabled(True)
self.ui.qdGroupBox.setChecked(True)
self.enableQC()
def enableQC(self):
# Q calc
self.ui.qCalcGroupBox.setEnabled(True)
self.ui.qCalcGroupBox.setChecked(True)
# FD SmartStack
self.ui.FDSmartStackGroupBox.setEnabled(True)
self.ui.FDSmartStackGroupBox.setChecked(True)
# Quadrature detect
try:
for pulse in self.RAWDataProc.DATADICT["PULSES"]:
np.shape(self.RAWDataProc.DATADICT[pulse]["Q"])
self.RAWDataProc.DATADICT["stack"]
self.ui.qdGroupBox.setEnabled(True)
self.ui.qdGroupBox.setChecked(True)
except:
self.ui.qdGroupBox.setEnabled(False)
self.ui.qdGroupBox.setChecked(False)
# Gating
try:
self.RAWDataProc.DATADICT["CA"]
self.ui.gateBox.setEnabled(True)
self.ui.gateBox.setChecked(True)
except:
self.ui.gateBox.setEnabled(False)
self.ui.gateBox.setChecked(False)
def despikeFilter(self):
self.lock("despike filter")
thread.start_new_thread(self.RAWDataProc.despike, \
(self.ui.windowSpinBox.value(), \
self.ui.thresholdSpinBox.value(), \
str(self.ui.replComboBox.currentText()), \
self.ui.rollOnSpinBox.value(), \
self.ui.despikeInterpWinSpinBox.value(),
self.ui.mplwidget))
def calcQ(self):
if "Calc Q" not in self.YamlNode.Stacking.keys():
#print("In CalcQ", yaml.dump(self.YamlNode.Processing) )
self.YamlNode.Stacking["Calc Q"] = True
#print( yaml.dump(self.YamlNode.Processing) )
self.Log()
else:
err_msg = "Q values have already been calculated"
reply =QtWidgets.QMessageBox.critical(self, 'Error',
err_msg)
return
self.lock("pulse moment calculation")
thread.start_new_thread(self.RAWDataProc.effectivePulseMoment, \
(self.ui.CentralVSpinBox.value(), \
self.ui.mplwidget))
def harmonicModel(self):
self.lock("harmonic noise modelling")
Harm = OrderedDict()
Harm["STEP " + str(len(self.YamlNode.Processing))] = "Harmonic modelling"
Harm["NF"] = str( self.ui.NHarmonicsFreqsSpin.value() )
Harm["Segments"] = str( self.ui.NSegments.value() )
Harm["Proc. ref."] = self.ui.harmRef.isChecked()
if self.ui.searchAll.currentText() == "All":
Harm["search"] = self.ui.searchAll.currentText()
Search = False
else:
#Harm["search"] = self.ui.searchAll.currentText()
Harm["search"] = str(self.ui.Nsearch.value())
Search = self.ui.Nsearch.value()
if self.ui.boundsCheck.isChecked():
Harm["Bounds"] = str(self.ui.bounds.value())
Bounds = self.ui.bounds.value()
else:
Harm["Bounds"] = self.ui.boundsCheck.isChecked()
Bounds = 0
Harm["f0K1"] = str( self.ui.f0K1Spin.value() )
Harm["f0KN"] = str( self.ui.f0KNSpin.value() )
Harm["f0Ks"] = str( self.ui.f0KsSpin.value() )
Harm["f0"] = str( self.ui.f0Spin.value() )
if self.ui.NHarmonicsFreqsSpin.value() > 1:
Harm["f1K1"] = str( self.ui.f1K1Spin.value() )
Harm["f1KN"] = str( self.ui.f1KNSpin.value() )
Harm["f1Ks"] = str( self.ui.f1KsSpin.value() )
Harm["f1"] = str( self.ui.f1Spin.value() )
self.YamlNode.Processing.append(Harm)
self.Log()
thread.start_new_thread(self.RAWDataProc.harmonicModel, \
( \
self.ui.NHarmonicsFreqsSpin.value(), \
self.ui.f0Spin.value(), \
self.ui.f0K1Spin.value(), \
self.ui.f0KNSpin.value(), \
self.ui.f0KsSpin.value(), \
self.ui.NSegments.value(), \
self.ui.f1Spin.value(), \
self.ui.f1K1Spin.value(), \
self.ui.f1KNSpin.value(), \
self.ui.f1KsSpin.value(), \
Search, \
Bounds, \
self.ui.harmRef.isChecked(), \
self.ui.plotHarmonic.isChecked(), \
self.ui.mplwidget \
) \
)
def FDSmartStack(self):
if "TD stack" not in self.YamlNode.Stacking.keys():
self.YamlNode.Stacking["TD stack"] = {}
self.YamlNode.Stacking["TD stack"]["outlier"] = str( self.ui.outlierTestCB.currentText() )
self.YamlNode.Stacking["TD stack"]["cutoff"] = str( self.ui.MADCutoff.value() )
self.Log()
else:
err_msg = "TD noise cancellation has already been applied!"
reply =QtWidgets.QMessageBox.critical(self, 'Error',
err_msg)
return
self.lock("time-domain smart stack")
thread.start_new_thread(self.RAWDataProc.TDSmartStack, \
(str(self.ui.outlierTestCB.currentText()), \
self.ui.MADCutoff.value(),
self.ui.mplwidget))
def adaptFilter(self):
self.lock("TD noise cancellation filter")
# Log processing
Adapt = OrderedDict()
Adapt["STEP " + str(len(self.YamlNode.Processing)) ] = "TD noise cancellation"
#print(Adapt) # this locks STEP in as first...
Adapt["n_Taps"] = self.ui.MTapsSpinBox.value()
Adapt["lambda"] = self.ui.adaptLambdaSpinBox.value()
Adapt["truncate"] = self.ui.adaptTruncateSpinBox.value()
Adapt["mu"] = self.ui.adaptMuSpinBox.value()
Adapt["PCA"] = self.ui.PCAComboBox.currentText()
#Adapt # this locsk in the dict ordering...
#print(Adapt) # this locks in the dict...
self.YamlNode.Processing.append(Adapt)
self.Log( )
thread.start_new_thread(self.RAWDataProc.adaptiveFilter, \
(self.ui.MTapsSpinBox.value(), \
self.ui.adaptLambdaSpinBox.value(), \
self.ui.adaptTruncateSpinBox.value(), \
self.ui.adaptMuSpinBox.value(), \
str(self.ui.PCAComboBox.currentText()), \
self.ui.plotRLS.isChecked(), \
self.ui.mplwidget))
def sumDataChans(self):
self.lock("Summing data channels")
Sum = OrderedDict()
Sum["STEP " + str(len(self.YamlNode.Processing))] = "Channel sum"
self.YamlNode.Processing.append(Sum)
self.Log( )
self.dataChan = [self.dataChan[0]]
self.ui.sumDataBox.setEnabled(False)
thread.start_new_thread( self.RAWDataProc.sumData, ( self.ui.mplwidget, self.ui.sumType.currentText(), self.ui.sumAll.isChecked() ) )
def adaptFilterFD(self):
self.lock("FD noise cancellation filter")
thread.start_new_thread(self.RAWDataProc.adaptiveFilterFD, \
(str(self.ui.windowTypeComboBox.currentText()), \
self.ui.windowBandwidthSpinBox.value(), \
self.ui.CentralVSpinBox.value(), \
self.ui.mplwidget))
def logGUI(self):
# You have a race condiditon where the GUI is modifying the Yaml node while you are updating it
# hence, we need to cache these. More elegant solutions exist
if "B_0" in self.YamlNode.META:
B0 = self.YamlNode.META["B_0"]["intensity"]
Bdec = self.YamlNode.META["B_0"]["dec"]
Binc = self.YamlNode.META["B_0"]["inc"]
if "DateTime" in self.YamlNode.META:
[Date,Time] = self.YamlNode.META["DateTime"].split("T")
year,month,day = Date.split("-")
if "Temp" in self.YamlNode.META:
temp = float(self.YamlNode.META["Temp"])
if "Coordinates" in self.YamlNode.META:
UTM = self.YamlNode.META["Coordinates"]["UTM"]
LatBand = self.YamlNode.META["Coordinates"]["LatBand"]
Ellipsoid = self.YamlNode.META["Coordinates"]["ellipsoid"]
# and set
if "Location" in self.YamlNode.META:
self.ui.locEdit.setText( self.YamlNode.META["Location"] )
if "Field Notes" in self.YamlNode.META:
self.ui.txtComments.setText( self.YamlNode.META["Field Notes"] )
if "B_0" in self.YamlNode.META:
self.ui.incSpinBox.setValue( Binc )
self.ui.decSpinBox.setValue( Bdec )
self.ui.intensitySpinBox.setValue( B0 )
if "DateTime" in self.YamlNode.META:
self.ui.dateEdit.setDate( datetime.date(int(year), int(month), int(day)) )
self.ui.timeEdit.setTime( datetime.time.fromisoformat( Time ) )
if "Temp" in self.YamlNode.META:
self.ui.tempSpinBox.setValue(temp)
if "Coordinates" in self.YamlNode.META:
self.ui.UTMzone.setCurrentText(UTM)
self.ui.latBand.setCurrentText(LatBand)
self.ui.ellipsoid.setCurrentText(Ellipsoid)
def logSite(self):
self.YamlNode.META["Location"] = self.ui.locEdit.text()
self.YamlNode.META["Coordinates"] = OrderedDict()
self.YamlNode.META["Coordinates"]["UTM"] = self.ui.UTMzone.currentText()
self.YamlNode.META["Coordinates"]["LatBand"] = self.ui.latBand.currentText()
self.YamlNode.META["Coordinates"]["ellipsoid"] = self.ui.ellipsoid.currentText()
self.YamlNode.META["DateTime"] = self.ui.dateEdit.date().toString("yyyy-MM-dd") + "T" + str( self.ui.timeEdit.time().toString("hh:mm") )
self.YamlNode.META["Temp"] = self.ui.tempSpinBox.value()
self.YamlNode.META["B_0"] = OrderedDict()
self.YamlNode.META["B_0"]["inc"] = self.ui.incSpinBox.value()
self.YamlNode.META["B_0"]["dec"] = self.ui.decSpinBox.value()
self.YamlNode.META["B_0"]["intensity"] = self.ui.intensitySpinBox.value()
self.YamlNode.META["Field Notes"] = self.ui.txtComments.toPlainText()
self.YamlNode.META["Loops"] = OrderedDict()
for loop in self.loops:
print(self.loops[loop])
self.YamlNode.META["Loops"][loop] = loop + ".pwa"
self.Log()
def bandPassFilter(self):
self.lock("bandpass filter")
# Log processing
Band = OrderedDict()
Band["STEP " + str(len(self.YamlNode.Processing))] = "Bandpass filter"
Band["central_nu"] = str(self.ui.CentralVSpinBox.value())
Band["passband"] = str(self.ui.passBandSpinBox.value())
Band["stopband"] = str(self.ui.stopBandSpinBox.value())
Band["gpass"] = str(self.ui.gpassSpinBox.value())
Band["gstop"] = str(self.ui.gstopSpinBox.value())
Band["type"] = str(self.ui.fTypeComboBox.currentText())
self.YamlNode.Processing.append(Band)
self.Log( )
nv = self.ui.lcdTotalDeadTime.value( ) + self.ui.lcdNumberFTauDead.value()
self.ui.lcdTotalDeadTime.display( nv )
thread.start_new_thread(self.RAWDataProc.bandpassFilter, \
(self.ui.mplwidget, 0, self.ui.plotBP.isChecked() ))
def downsample(self):
self.lock("resampling")
# Log processing
Resample = OrderedDict()
Resample["STEP "+ str(len(self.YamlNode.Processing))] = "Resample"
Resample["downsample factor"] = str(self.ui.downSampleSpinBox.value())
Resample["truncate length"] = str(self.ui.truncateSpinBox.value())
self.YamlNode.Processing.append(Resample)
self.Log( )
thread.start_new_thread(self.RAWDataProc.downsample, \
(self.ui.truncateSpinBox.value(), \
self.ui.downSampleSpinBox.value(), \
self.ui.dsPlot.isChecked(), \
self.ui.mplwidget))
def quadDet(self):
method = ['trf','dogbox','lm'][int(self.ui.QDMethod.currentIndex())]
loss = ['linear','soft_l1','cauchy','huber'][int(self.ui.QDLoss.currentIndex())]
# allow overwrite of Quad Det.
self.YamlNode.Stacking["Quadrature detection"] = {}
self.YamlNode.Stacking["Quadrature detection"]["trim"] = str( self.ui.trimSpin.value() )
self.YamlNode.Stacking["Quadrature detection"]["method"] = method
self.YamlNode.Stacking["Quadrature detection"]["loss"] = loss
self.Log()
#if "Quadrature detection" not in self.YamlNode.Processing.keys():
# self.YamlNode.Processing["Quadrature detection"] = {}
# self.YamlNode.Processing["Quadrature detection"]["trim"] = str( self.ui.trimSpin.value() )
# self.Log()
#else:
# self.YamlNode.Processing["Quadrature detection"] = {}
# self.YamlNode.Processing["Quadrature detection"]["trim"] = str( self.ui.trimSpin.value() )
# self.Log()
#err_msg = "Quadrature detection has already been done!"
#reply =QtWidgets.QMessageBox.critical(self, 'Error',
# err_msg)
#return
self.lock("quadrature detection")
thread.start_new_thread(self.RAWDataProc.quadDet, \
(self.ui.trimSpin.value(), method, loss, self.ui.mplwidget))
self.ui.plotQD.setEnabled(True)
def plotQD(self):
self.lock("plot QD")
thread.start_new_thread(self.RAWDataProc.plotQuadDet, \
(self.ui.trimSpin.value(), int(self.ui.QDType.currentIndex()), self.ui.mplwidget))
def gateIntegrate(self):
if "Gate integrate" not in self.YamlNode.Stacking.keys():
self.YamlNode.Stacking["Gate integrate"] = {}
self.YamlNode.Stacking["Gate integrate"]["gpd"] = str(self.ui.GPDspinBox.value( ) )
self.Log()
self.lock("gate integration")
thread.start_new_thread(self.RAWDataProc.gateIntegrate, \
(self.ui.GPDspinBox.value(), self.ui.trimSpin.value(), self.ui.mplwidget))
self.ui.actionExport_Preprocessed_Dataset.setEnabled(True)
self.ui.plotGI.setEnabled(True)
def plotGI(self):
self.lock("plot gate integrate")
thread.start_new_thread(self.RAWDataProc.plotGateIntegrate, \
(self.ui.GPDspinBox.value(), self.ui.trimSpin.value(), \
self.ui.QDType_2.currentIndex(), self.ui.mplwidget))
def designFilter(self):
[bord, fe] = self.RAWDataProc.designFilter( \
self.ui.CentralVSpinBox.value(), \
self.ui.passBandSpinBox.value(), \
self.ui.stopBandSpinBox.value(), \
self.ui.gpassSpinBox.value(), \
self.ui.gstopSpinBox.value(), \
str(self.ui.fTypeComboBox.currentText()),
self.ui.mplwidget
)
self.ui.lcdNumberFilterOrder.display(bord)
self.ui.lcdNumberFTauDead.display(1e3*fe)
self.ui.bandPassGO.setEnabled(1)
################################################################
# Hack for MacOS to force refresh of group box and plot
# this has an undesirable effect that it causes the groupbox to 'jump' up
# TODO come up with a better solution
self.ui.mplwidget.hide()
self.ui.mplwidget.show()
self.ui.BandPassBox.hide()
self.ui.BandPassBox.show()
def windowFilter(self):
self.lock("window filter")
# Log processing
Window = OrderedDict()
Window["STEP " + str(len(self.YamlNode.Processing))] = "Window filter"
Window["type"] = str(self.ui.windowTypeComboBox.currentText())
Window["width"] = str(self.ui.windowBandwidthSpinBox.value())
Window["centre"] = str(self.ui.CentralVSpinBox.value() )
Window["trim"] = str(self.ui.windowTrim.isChecked())
self.YamlNode.Processing.append(Window)
self.Log( )
if self.ui.windowTrim.isChecked():
nv = self.ui.lcdTotalDeadTime.value( ) + self.ui.lcdWinDead.value()
self.ui.lcdTotalDeadTime.display( nv )
thread.start_new_thread(self.RAWDataProc.windowFilter, \
(str(self.ui.windowTypeComboBox.currentText()), \
self.ui.windowBandwidthSpinBox.value(), \
self.ui.CentralVSpinBox.value(), \
self.ui.windowTrim.isChecked(), \
self.ui.mplwidget))
def designFDFilter(self):
mPulse = "None"
if u"Pulse 1" in self.RAWDataProc.DATADICT.keys():
mPulse = u"Pulse 1"
elif u"Pulse 2" in self.RAWDataProc.DATADICT.keys():
mPulse = u"Pulse 2"
a,b,c,d,dead,ndead = self.RAWDataProc.computeWindow( \
mPulse,
self.ui.windowBandwidthSpinBox.value(), \
self.ui.CentralVSpinBox.value(), \
str(self.ui.windowTypeComboBox.currentText()), \
self.ui.mplwidget )
self.ui.lcdWinDead.display(dead)
################################################################
# Hack for MacOS to force refresh of group box and plot
# this has an undesirable effect that it causes the groupbox to 'jump' up
# TODO come up with a better solution
self.ui.mplwidget.hide()
self.ui.mplwidget.show()
self.ui.windowFilterGroupBox.hide()
self.ui.windowFilterGroupBox.show()
def updateProgressBar(self, percent):
self.ui.barProgress.setValue(percent)
def updateProc(self):
if str(self.ui.FIDProcComboBox.currentText()) == "Pulse 1":
self.ui.lcdNumberFID1Length.display(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0])
elif str(self.ui.FIDProcComboBox.currentText()) == "Pulse 2":
self.ui.lcdNumberFID2Length.display(self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][0])
else:
self.ui.lcdNumberFID1Length.display(self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 1"]["TIMES"][0])
self.ui.lcdNumberFID2Length.display(self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][-1]- self.RAWDataProc.DATADICT["Pulse 2"]["TIMES"][0])
self.ui.lcdNumberResampFreq.display( self.RAWDataProc.samp )
def doneStatus(self): # unlocks GUI
self.ui.statusbar.clearMessage ( )
self.ui.barProgress.hide()
self.updateProc()
self.enableAll()
def lock(self, string):
self.ui.statusbar.showMessage ( string )
self.ui.barProgress.show()
self.ui.barProgress.setValue(0)
self.disable()
def unlock(self):
self.ui.statusbar.clearMessage ( )
self.ui.barProgress.hide()
self.enableAll()
def done(self):
self.ui.statusbar.showMessage ( "" )
################################################################
################################################################
# Boiler plate main function
import pkg_resources
from pkg_resources import resource_string
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from akvo.gui.logo import plotLogo
def main():
# splash screen logo
logo = pkg_resources.resource_filename(__name__, 'akvo.png')
logo2 = pkg_resources.resource_filename(__name__, 'akvo2.png')
qApp = QtWidgets.QApplication(sys.argv)
ssplash = True
if ssplash:
pixmap = QtGui.QPixmap(logo)
splash = QtWidgets.QSplashScreen(pixmap, QtCore.Qt.WindowStaysOnTopHint)
splash.show()
aw = ApplicationWindow()
#img=mpimg.imread(logo)
for ax in [ aw.ui.mplwidget ]:
ax.fig.clear()
subplot = ax.fig.add_subplot(211)
# old logo plot
ax.fig.patch.set_facecolor( None )
ax.fig.patch.set_alpha( .0 )
#subplot.imshow(img)
#ax.fig.patch.set_visible(False)
subplot.axis('off')
plotLogo(subplot)
subplot.xaxis.set_major_locator(plt.NullLocator())
subplot.yaxis.set_major_locator(plt.NullLocator())
subplot2 = ax.fig.add_subplot(212)
subplot2.text(0.5, 1.,'surface NMR workbench',
horizontalalignment='center',
verticalalignment='center',
size=22,
transform = subplot2.transAxes)
subplot2.xaxis.set_major_locator(plt.NullLocator())
subplot2.yaxis.set_major_locator(plt.NullLocator())
subplot2.axis('off')
ax.draw()
if ssplash:
splash.showMessage("Loading modules")
splash.finish(aw)
#time.sleep(1)
aw.setWindowTitle("Akvo v"+str(VERSION))
aw.show()
qApp.setWindowIcon(QtGui.QIcon(logo2))
sys.exit(qApp.exec_())
if __name__ == "__main__":
main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/akvoGUI.py | akvoGUI.py |
from __future__ import unicode_literals
import sys
import os
import random
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtWidgets
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=3, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
def clicked(self):
print ("Clicked")
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
self.ax1 = self.fig.add_axes([.125,.1,.725,.8])
self.ax2 = self.ax1.twinx() # fig.add_axes([.125,.1,.725,.8])
self.compute_initial_figure()
def reAxH(self, num, shx=True, shy=True):
try:
self.fig.clear()
except:
pass
for n in range(num):
if n == 0:
self.ax1 = self.fig.add_subplot( 1, num, 1 )
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
if n == 1:
self.ax2 = self.fig.add_subplot( 1, num, 2 )
self.ax2.tick_params(axis='both', which='major', labelsize=8)
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.yaxis.get_offset_text().set_size(8)
if n == 2:
self.ax3 = self.fig.add_subplot( 1, num, 3 )
self.ax3.tick_params(axis='both', which='major', labelsize=8)
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.yaxis.get_offset_text().set_size(8)
if n == 3:
self.ax4 = self.fig.add_subplot( 1, num, 4 )
self.ax4.tick_params(axis='both', which='major', labelsize=8)
self.ax4.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax4.yaxis.get_offset_text().set_size(8)
def reAxH2(self, num, shx=True, shy=True):
try:
for ax in fig.axes:
self.fig.delaxes(ax)
except:
pass
try:
self.fig.clear()
except:
pass
for n in range(num):
if n == 0:
self.ax1 = self.fig.add_subplot( 2, num, 1 )
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
self.ax21 = self.fig.add_subplot( 2, num, num+1 )
self.ax21.tick_params(axis='both', which='major', labelsize=8)
self.ax21.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax21.yaxis.get_offset_text().set_size(8)
if n == 1:
self.ax2 = self.fig.add_subplot( 2, num, 2, sharex=self.ax1, sharey=self.ax1 )
self.ax2.tick_params(axis='both', which='major', labelsize=8)
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.yaxis.get_offset_text().set_size(8)
self.ax22 = self.fig.add_subplot( 2, num, num+2, sharex=self.ax21, sharey=self.ax21 )
self.ax22.tick_params(axis='both', which='major', labelsize=8)
self.ax22.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax22.yaxis.get_offset_text().set_size(8)
if n == 2:
self.ax3 = self.fig.add_subplot( 2, num, 3, sharex=self.ax1, sharey=self.ax1 )
self.ax3.tick_params(axis='both', which='major', labelsize=8)
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.yaxis.get_offset_text().set_size(8)
self.ax23 = self.fig.add_subplot( 2, num, num+3, sharex=self.ax21, sharey=self.ax21 )
self.ax23.tick_params(axis='both', which='major', labelsize=8)
self.ax23.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax23.yaxis.get_offset_text().set_size(8)
if n == 3:
self.ax4 = self.fig.add_subplot( 2, num, 4, sharex=self.ax1, sharey=self.ax1 )
self.ax4.tick_params(axis='both', which='major', labelsize=8)
self.ax4.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax4.yaxis.get_offset_text().set_size(8)
self.ax24 = self.fig.add_subplot( 2, num, num+4, sharex=self.ax21, sharey=self.ax21 )
self.ax24.tick_params(axis='both', which='major', labelsize=8)
self.ax24.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax24.yaxis.get_offset_text().set_size(8)
if n == 4:
self.ax5 = self.fig.add_subplot( 2, num, 5, sharex=self.ax1, sharey=self.ax1 )
self.ax5.tick_params(axis='both', which='major', labelsize=8)
self.ax5.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax5.yaxis.get_offset_text().set_size(8)
self.ax25 = self.fig.add_subplot( 2, num, num+5, sharex=self.ax21, sharey=self.ax21 )
self.ax25.tick_params(axis='both', which='major', labelsize=8)
self.ax25.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax25.yaxis.get_offset_text().set_size(8)
if n == 5:
self.ax6 = self.fig.add_subplot( 2, num, 6, sharex=self.ax1, sharey=self.ax1 )
self.ax6.tick_params(axis='both', which='major', labelsize=8)
self.ax6.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax6.yaxis.get_offset_text().set_size(8)
self.ax26 = self.fig.add_subplot( 2, num, num+6, sharex=self.ax21, sharey=self.ax21 )
self.ax26.tick_params(axis='both', which='major', labelsize=8)
self.ax26.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax26.yaxis.get_offset_text().set_size(8)
if n == 6:
self.ax7 = self.fig.add_subplot( 2, num, 7, sharex=self.ax1, sharey=self.ax1 )
self.ax7.tick_params(axis='both', which='major', labelsize=8)
self.ax7.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax7.yaxis.get_offset_text().set_size(8)
self.ax27 = self.fig.add_subplot( 2, num, num+7, sharex=self.ax21, sharey=self.ax21 )
self.ax27.tick_params(axis='both', which='major', labelsize=8)
self.ax27.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax27.yaxis.get_offset_text().set_size(8)
if n == 7:
self.ax8 = self.fig.add_subplot( 2, num, 8, sharex=self.ax1, sharey=self.ax1 )
self.ax8.tick_params(axis='both', which='major', labelsize=8)
self.ax8.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax8.yaxis.get_offset_text().set_size(8)
self.ax28 = self.fig.add_subplot( 2, num, num+8, sharex=self.ax21, sharey=self.ax21 )
self.ax28.tick_params(axis='both', which='major', labelsize=8)
self.ax28.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax28.yaxis.get_offset_text().set_size(8)
def reAx2(self, shx=True, shy=True):
try:
self.fig.clear()
except:
pass
try:
self.ax1.clear()
self.delaxes(self.ax1) #.clear()
except:
pass
try:
self.delaxes(self.ax3) #.clear()
except:
pass
try:
self.ax2.clear()
self.delaxes(self.ax2) #.clear()
except:
pass
self.ax1 = self.fig.add_subplot(211)
if shx and shy:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1, sharey=self.ax1)
elif shx == True:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1)
elif shy == True:
self.ax2 = self.fig.add_subplot(212, sharey=self.ax1)
else:
self.ax2 = self.fig.add_subplot(212)
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax2.tick_params(axis='both', which='major', labelsize=8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
self.ax2.yaxis.get_offset_text().set_size(8)
def reAx3(self, shx=True, shy=True):
try:
self.fig.clear()
except:
pass
try:
self.ax1.clear()
self.delaxes(self.ax1) #.clear()
except:
pass
try:
self.ax2.clear()
self.delaxes(self.ax2) #.clear()
except:
pass
try:
self.ax3.clear()
self.delaxes(self.ax3) #.clear()
except:
pass
self.ax1 = self.fig.add_subplot(211)
if shx and shy:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1, sharey=self.ax1)
elif shx:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1)
elif shy:
self.ax2 = self.fig.add_subplot(212, sharey=self.ax1)
else:
self.ax2 = self.fig.add_subplot(212)
self.ax3 = self.ax1.twinx()
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax2.tick_params(axis='both', which='major', labelsize=8)
self.ax3.tick_params(axis='both', which='major', labelsize=8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
self.ax2.yaxis.get_offset_text().set_size(8)
self.ax3.yaxis.get_offset_text().set_size(8)
def reAx4(self):
try:
self.fig.clear()
except:
pass
# two main axes
self.ax1 = self.fig.add_axes([0.15, 0.55, 0.625, 0.3672])
self.ax2 = self.fig.add_axes([0.15, 0.135, 0.625, 0.3672])
# for colourbars
self.cax1 = self.fig.add_axes([0.8, 0.55, 0.025, 0.3672])
self.cax2 = self.fig.add_axes([0.8, 0.135, 0.025, 0.3672])
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax2.tick_params(axis='both', which='major', labelsize=8) if n == 2:
self.ax3 = self.fig.add_subplot( 2, num, 3, sharex=self.ax1, sharey=self.ax1 )
self.ax3.tick_params(axis='both', which='major', labelsize=8)
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.yaxis.get_offset_text().set_size(8)
self.ax23 = self.fig.add_subplot( 2, num, num+3, sharex=self.ax21, sharey=self.ax21 )
self.ax23.tick_params(axis='both', which='major', labelsize=8)
self.ax23.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax23.yaxis.get_offset_text().set_size(8)
if n == 3:
self.ax4 = self.fig.add_subplot( 2, num, 4, sharex=self.ax1, sharey=self.ax1 )
self.ax4.tick_params(axis='both', which='major', labelsize=8)
self.ax4.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax4.yaxis.get_offset_text().set_size(8)
self.ax24 = self.fig.add_subplot( 2, num, num+4, sharex=self.ax21, sharey=self.ax21 )
self.ax24.tick_params(axis='both', which='major', labelsize=8)
self.ax24.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax24.yaxis.get_offset_text().set_size(8)
if n == 4:
self.ax5 = self.fig.add_subplot( 2, num, 5, sharex=self.ax1, sharey=self.ax1 )
self.ax5.tick_params(axis='both', which='major', labelsize=8)
self.ax5.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax5.yaxis.get_offset_text().set_size(8)
self.ax25 = self.fig.add_subplot( 2, num, num+5, sharex=self.ax21, sharey=self.ax21 )
self.ax25.tick_params(axis='both', which='major', labelsize=8)
self.ax25.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax25.yaxis.get_offset_text().set_size(8)
if n == 5:
self.ax6 = self.fig.add_subplot( 2, num, 6, sharex=self.ax1, sharey=self.ax1 )
self.ax6.tick_params(axis='both', which='major', labelsize=8)
self.ax6.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax6.yaxis.get_offset_text().set_size(8)
self.ax26 = self.fig.add_subplot( 2, num, num+6, sharex=self.ax21, sharey=self.ax21 )
self.ax26.tick_params(axis='both', which='major', labelsize=8)
self.ax26.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax26.yaxis.get_offset_text().set_size(8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
self.ax2.yaxis.get_offset_text().set_size(8)
self.cax1.tick_params(axis='both', which='major', labelsize=8)
self.cax2.tick_params(axis='both', which='major', labelsize=8)
self.cax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.cax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.cax1.yaxis.get_offset_text().set_size(8) #.get_text()
self.cax2.yaxis.get_offset_text().set_size(8) #.get_text()
self.cax1.tick_params(labelsize=8)
self.cax2.tick_params(labelsize=8)
def compute_initial_figure(self):
t = np.arange(0,.3,1e-4)
x = np.cos(t*2000.*np.pi*2)*np.exp(-t/.07)
x2 = np.exp(-t/.07)
dp = self.ax1.plot(t, x, 'r',label='test function')
dp2 = self.ax2.plot(t, x2, 'r',label='test function2')
self.ax1.set_xlabel("Time [s]", fontsize=8)
self.ax1.set_ylabel("Signal [nV]", fontsize=8)
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax1.tick_params(axis='both', which='minor', labelsize=6)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.legend(prop={'size':6}) | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/mydynamicmplcanvas-broke.py | mydynamicmplcanvas-broke.py |
from __future__ import unicode_literals
import sys
import os
import random
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from PyQt5 import QtCore, QtWidgets
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import numpy as np
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=3, height=4, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi, facecolor='darkgrey')
#self.fig.patch.set_facecolor('blue')
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
FigureCanvas.updateGeometry(self)
def compute_initial_figure(self):
pass
def clicked(self):
print ("Clicked")
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
self.ax1 = self.fig.add_axes([.125,.1,.725,.8])
self.ax2 = self.ax1.twinx() # fig.add_axes([.125,.1,.725,.8])
self.compute_initial_figure()
def reAxH(self, num, shx=True, shy=True):
fs = 10
try:
for ax in fig.axes:
self.fig.delaxes(ax)
except:
pass
try:
self.fig.clear()
except:
pass
for n in range(num):
if n == 0:
self.ax1 = self.fig.add_subplot( 1, num, 1)
self.ax1.tick_params(axis='both', which='major', labelsize=fs)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='x')
self.ax1.yaxis.get_offset_text().set_size(fs)
self.ax1.xaxis.get_offset_text().set_size(fs)
if n == 1:
self.ax2 = self.fig.add_subplot( 1, num, 2)
self.ax2.tick_params(axis='both', which='major', labelsize=fs)
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.yaxis.get_offset_text().set_size(fs)
if n == 2:
self.ax3 = self.fig.add_subplot( 1, num, 3)
self.ax3.tick_params(axis='both', which='major', labelsize=fs)
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.yaxis.get_offset_text().set_size(fs)
if n == 3:
self.ax4 = self.fig.add_subplot( 1, num, 4)
self.ax4.tick_params(axis='both', which='major', labelsize=fs)
self.ax4.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax4.yaxis.get_offset_text().set_size(fs)
def reAxH2(self, num, shx=True, shy=True):
fs = 10
try:
for ax in fig.axes:
self.fig.delaxes(ax)
except:
pass
try:
self.fig.clear()
except:
pass
for n in range(num):
if n == 0:
self.ax1 = self.fig.add_subplot( 2, num, 1)
self.ax1.tick_params(axis='both', which='major', labelsize=fs)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(fs)
self.ax21 = self.fig.add_subplot( 2, num, num+1)
self.ax21.tick_params(axis='both', which='major', labelsize=fs)
self.ax21.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax21.yaxis.get_offset_text().set_size(fs)
if n == 1:
self.ax2 = self.fig.add_subplot( 2, num, 2, sharex=self.ax1, sharey=self.ax1)
self.ax2.tick_params(axis='both', which='major', labelsize=fs)
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.yaxis.get_offset_text().set_size(fs)
self.ax22 = self.fig.add_subplot( 2, num, num+2, sharex=self.ax21, sharey=self.ax21)
self.ax22.tick_params(axis='both', which='major', labelsize=fs)
self.ax22.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax22.yaxis.get_offset_text().set_size(fs)
if n == 2:
self.ax3 = self.fig.add_subplot( 2, num, 3, sharex=self.ax1, sharey=self.ax1)
self.ax3.tick_params(axis='both', which='major', labelsize=fs)
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.yaxis.get_offset_text().set_size(fs)
self.ax23 = self.fig.add_subplot( 2, num, num+3, sharex=self.ax21, sharey=self.ax21)
self.ax23.tick_params(axis='both', which='major', labelsize=fs)
self.ax23.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax23.yaxis.get_offset_text().set_size(fs)
if n == 3:
self.ax4 = self.fig.add_subplot( 2, num, 4, sharex=self.ax1, sharey=self.ax1 )
self.ax4.tick_params(axis='both', which='major', labelsize=fs)
self.ax4.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax4.yaxis.get_offset_text().set_size(fs)
self.ax24 = self.fig.add_subplot( 2, num, num+4, sharex=self.ax21, sharey=self.ax21 )
self.ax24.tick_params(axis='both', which='major', labelsize=fs)
self.ax24.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax24.yaxis.get_offset_text().set_size(fs)
if n == 4:
self.ax5 = self.fig.add_subplot( 2, num, 5, sharex=self.ax1, sharey=self.ax1 )
self.ax5.tick_params(axis='both', which='major', labelsize=8)
self.ax5.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax5.yaxis.get_offset_text().set_size(8)
self.ax25 = self.fig.add_subplot( 2, num, num+5, sharex=self.ax21, sharey=self.ax21 )
self.ax25.tick_params(axis='both', which='major', labelsize=8)
self.ax25.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax25.yaxis.get_offset_text().set_size(8)
if n == 5:
self.ax6 = self.fig.add_subplot( 2, num, 6, sharex=self.ax1, sharey=self.ax1 )
self.ax6.tick_params(axis='both', which='major', labelsize=8)
self.ax6.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax6.yaxis.get_offset_text().set_size(8)
self.ax26 = self.fig.add_subplot( 2, num, num+6, sharex=self.ax21, sharey=self.ax21 )
self.ax26.tick_params(axis='both', which='major', labelsize=8)
self.ax26.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax26.yaxis.get_offset_text().set_size(8)
if n == 6:
self.ax7 = self.fig.add_subplot( 2, num, 7, sharex=self.ax1, sharey=self.ax1 )
self.ax7.tick_params(axis='both', which='major', labelsize=8)
self.ax7.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax7.yaxis.get_offset_text().set_size(8)
self.ax27 = self.fig.add_subplot( 2, num, num+7, sharex=self.ax21, sharey=self.ax21 )
self.ax27.tick_params(axis='both', which='major', labelsize=8)
self.ax27.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax27.yaxis.get_offset_text().set_size(8)
if n == 7:
self.ax8 = self.fig.add_subplot( 2, num, 8, sharex=self.ax1, sharey=self.ax1 )
self.ax8.tick_params(axis='both', which='major', labelsize=8)
self.ax8.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax8.yaxis.get_offset_text().set_size(8)
self.ax28 = self.fig.add_subplot( 2, num, num+8, sharex=self.ax21, sharey=self.ax21 )
self.ax28.tick_params(axis='both', which='major', labelsize=8)
self.ax28.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax28.yaxis.get_offset_text().set_size(8)
def reAx2(self, shx=True, shy=True):
try:
self.fig.clear()
except:
pass
try:
self.ax1.clear()
self.delaxes(self.ax1) #.clear()
except:
pass
try:
self.delaxes(self.ax3) #.clear()
except:
pass
try:
self.ax2.clear()
self.delaxes(self.ax2) #.clear()
except:
pass
#self.fig.patch.set_facecolor('red')
self.ax1 = self.fig.add_subplot(211)
if shx and shy:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1, sharey=self.ax1)
elif shx == True:
self.ax2 = self.fig.add_subplot(212, sharex=self.ax1)
elif shy == True:
self.ax2 = self.fig.add_subplot(212, sharey=self.ax1)
else:
self.ax2 = self.fig.add_subplot(212)
self.ax1.tick_params(axis='both', which='major', labelsize=10)
self.ax2.tick_params(axis='both', which='major', labelsize=10)
self.ax1.ticklabel_format(style='scientific', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='scientific', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(10)
self.ax2.yaxis.get_offset_text().set_size(10)
def softClear(self):
for ax in self.fig.get_axes():
for artist in ax.lines + ax.collections:
artist.remove()
ax.set_prop_cycle(None)
def reAx3(self, shx=True, shy=True):
fs = 10
try:
self.fig.clear()
except:
pass
try:
self.ax1.clear()
self.delaxes(self.ax1) #.clear()
except:
pass
try:
self.ax2.clear()
self.delaxes(self.ax2) #.clear()
except:
pass
try:
self.ax3.clear()
self.delaxes(self.ax3) #.clear()
except:
pass
self.ax2 = self.fig.add_subplot(211)
if shx and shy:
self.ax1 = self.fig.add_subplot(212, sharex=self.ax2, sharey=self.ax2)
elif shx:
self.ax1 = self.fig.add_subplot(212, sharex=self.ax2)
elif shy:
self.ax1 = self.fig.add_subplot(212, sharey=self.ax2)
else:
self.ax1 = self.fig.add_subplot(212)
self.ax3 = self.ax1.twinx()
self.ax2.yaxis.set_label_position("right")
self.ax2.yaxis.set_ticks_position("right")
#self.ax2.yaxis.tick_right()
#self.ax1.set_facecolor('red')
#self.ax2.set_facecolor('red')
#self.ax3.set_facecolor('red')
#self.fig.set_facecolor('red')
#self.fig.set_edgecolor('red')
#self.ax1.set_axis_bgcolor('green')
self.ax1.tick_params(axis='both', which='major', labelsize=fs)
self.ax2.tick_params(axis='both', which='major', labelsize=fs)
self.ax3.tick_params(axis='both', which='major', labelsize=fs)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax3.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(fs)
self.ax2.yaxis.get_offset_text().set_size(fs)
self.ax3.yaxis.get_offset_text().set_size(fs)
def reAx4(self):
try:
self.fig.clear()
except:
pass
# two main axes
self.ax1 = self.fig.add_axes([0.15, 0.55, 0.625, 0.3672])
self.ax2 = self.fig.add_axes([0.15, 0.135, 0.625, 0.3672])
# for colourbars
self.cax1 = self.fig.add_axes([0.8, 0.55, 0.025, 0.3672])
self.cax2 = self.fig.add_axes([0.8, 0.135, 0.025, 0.3672])
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax2.tick_params(axis='both', which='major', labelsize=8)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.yaxis.get_offset_text().set_size(8)
self.ax2.yaxis.get_offset_text().set_size(8)
self.cax1.tick_params(axis='both', which='major', labelsize=8)
self.cax2.tick_params(axis='both', which='major', labelsize=8)
self.cax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.cax2.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.cax1.yaxis.get_offset_text().set_size(8) #.get_text()
self.cax2.yaxis.get_offset_text().set_size(8) #.get_text()
self.cax1.tick_params(labelsize=8)
self.cax2.tick_params(labelsize=8)
def compute_initial_figure(self):
t = np.arange(0,.3,1e-4)
x = np.cos(t*2000.*np.pi*2)*np.exp(-t/.07)
x2 = np.exp(-t/.07)
dp = self.ax1.plot(t, x, 'r',label='test function')
dp2 = self.ax2.plot(t, x2, 'r',label='test function2')
self.ax1.set_xlabel("Time [s]", fontsize=8)
self.ax1.set_ylabel("Signal [nV]", fontsize=8)
self.ax1.tick_params(axis='both', which='major', labelsize=8)
self.ax1.tick_params(axis='both', which='minor', labelsize=6)
self.ax1.ticklabel_format(style='sci', scilimits=(0,0), axis='y')
self.ax1.legend(prop={'size':6}) | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/mydynamicmplcanvas.py | mydynamicmplcanvas.py |
import sys
from PyQt5.QtWidgets import (QPushButton, QDialog, QTreeWidget,
QTreeWidgetItem, QVBoxLayout,
QHBoxLayout, QFrame, QLabel,
QApplication)
class SectionExpandButton(QPushButton):
"""a QPushbutton that can expand or collapse its section
"""
def __init__(self, item, text = "", parent = None):
super().__init__(text, parent)
self.section = item
self.clicked.connect(self.on_clicked)
def on_clicked(self):
"""toggle expand/collapse of section by clicking
"""
if self.section.isExpanded():
self.section.setExpanded(False)
else:
self.section.setExpanded(True)
class CollapsibleDialog(QDialog):
"""a dialog to which collapsible sections can be added;
subclass and reimplement define_sections() to define sections and
add them as (title, widget) tuples to self.sections
"""
def __init__(self):
super().__init__()
self.tree = QTreeWidget()
self.tree.setHeaderHidden(True)
layout = QVBoxLayout()
layout.addWidget(self.tree)
self.setLayout(layout)
self.tree.setIndentation(0)
self.sections = []
self.define_sections()
self.add_sections()
def add_sections(self):
"""adds a collapsible sections for every
(title, widget) tuple in self.sections
"""
for (title, widget) in self.sections:
button1 = self.add_button(title)
section1 = self.add_widget(button1, widget)
button1.addChild(section1)
def define_sections(self):
"""reimplement this to define all your sections
and add them as (title, widget) tuples to self.sections
"""
widget = QFrame(self.tree)
layout = QHBoxLayout(widget)
layout.addWidget(QLabel("Bla"))
layout.addWidget(QLabel("Blubb"))
title = "Section 1"
self.sections.append((title, widget))
def add_button(self, title):
"""creates a QTreeWidgetItem containing a button
to expand or collapse its section
"""
item = QTreeWidgetItem()
self.tree.addTopLevelItem(item)
self.tree.setItemWidget(item, 0, SectionExpandButton(item, text = title))
return item
def add_widget(self, button, widget):
"""creates a QWidgetItem containing the widget,
as child of the button-QWidgetItem
"""
section = QTreeWidgetItem(button)
section.setDisabled(True)
self.tree.setItemWidget(section, 0, widget)
return section
if __name__ == "__main__":
app = QApplication(sys.argv)
window = CollapsibleDialog()
window.show()
sys.exit(app.exec_()) | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/gui/Spoiler.py | Spoiler.py |
import os, sys
import numpy as np
from ruamel import yaml
import pyLemma.LemmaCore as lc
import pyLemma.Merlin as mrln
import pyLemma.FDEM1D as em1d
import numpy as np
#import matplotlib.pyplot as plt
#import seaborn as sns
#sns.set(style="ticks")
#import cmocean
#from SEGPlot import *
#from matplotlib.ticker import FormatStrFormatter
#import matplotlib.ticker as plticker
# Converts Lemma/Merlin/Akvo serialized Eigen arrays into numpy ones for use by Python
class VectorXr(yaml.YAMLObject):
"""
Converts Lemma/Merlin/Akvo serialized Eigen arrays into numpy ones for use by Python
"""
yaml_tag = u'VectorXr'
def __init__(self, array):
self.size = np.shape(array)[0]
self.data = array.tolist()
def __repr__(self):
# Converts to numpy array on import
return "np.array(%r)" % (self.data)
class AkvoData(yaml.YAMLObject):
"""
Reads an Akvo serialized dataset into a standard python dictionary
"""
yaml_tag = u'AkvoData'
def __init__(self, array):
pass
#self.size = np.shape(array)[0]
#self.Imp = array.tolist()
def __repr__(self):
# Converts to a dictionary with Eigen vectors represented as Numpy arrays
return self
def loadAkvoData(fnamein):
""" Loads data from an Akvo YAML file. The 0.02 is hard coded as the pulse length. This needs to be
corrected in future kernel calculations. The current was reported but not the pulse length.
"""
fname = (os.path.splitext(fnamein)[0])
with open(fnamein, 'r') as stream:
try:
AKVO = (yaml.load(stream, Loader=yaml.Loader))
except yaml.YAMLError as exc:
print(exc)
return AKVO
def main():
if len(sys.argv) < 3:
print ("usage python calcAkvoKernel.py AkvoDataset.yaml Coil1.yaml " )
exit()
AKVO = loadAkvoData(sys.argv[1])
B_inc = AKVO.META["B_0"]["inc"]
B_dec = AKVO.META["B_0"]["dec"]
B0 = AKVO.META["B_0"]["intensity"]
gamma = 2.67518e8
fT = AKVO.transFreq
#B0 = (fL*2.*np.pi) /gamma * 1e9
Coil1 = em1d.PolygonalWireAntenna.DeSerialize( sys.argv[2] )
Coil1.SetNumberOfFrequencies(1)
Coil1.SetFrequency(0, fT)
Coil1.SetCurrent(1.)
lmod = em1d.LayeredEarthEM()
lmod.SetNumberOfLayers(4)
lmod.SetLayerThickness([15.49, 28.18])
lmod.SetLayerConductivity([0.0, 1./16.91, 1./24.06, 1./33.23])
lmod.SetMagneticFieldIncDecMag( B_inc, B_dec, B0, lc.NANOTESLA )
exit()
Kern = mrln.KernelV0()
Kern.PushCoil( "Coil 1", Coil1 )
Kern.SetLayeredEarthEM( lmod );
Kern.SetIntegrationSize( (200,200,200) )
Kern.SetIntegrationOrigin( (0,0,0) )
Kern.SetTolerance( 1e-9 )
Kern.SetMinLevel( 3 )
Kern.SetHankelTransformType( lc.FHTKEY201 )
Kern.AlignWithAkvoDataset( sys.argv[1] )
thick = np.geomspace(.5, 10,num=40)
iface = np.cumsum(thick)
Kern.SetDepthLayerInterfaces(iface)
#Kern.SetDepthLayerInterfaces(np.geomspace(1, 110, num=40))
#Kern.SetDepthLayerInterfaces(np.linspace(1, 110, num=50))
#Kern.SetDepthLayerInterfaces(np.geomspace(1, 110, num=40))
# autAkvoDataNode = YAML::LoadFile(argv[4]);
# Kern->AlignWithAkvoDataset( AkvoDataNode );
#Kern.SetPulseDuration(0.040)
#Kern.SetPulseCurrent( [1.6108818092452406, 1.7549935078885168, 1.7666319459646016, 1.9270787752430283,
# 1.9455431806179229, 2.111931346726564, 2.1466747256211747, 2.3218217392379588,
# 2.358359967649008, 2.5495654202189058, 2.5957289164577992, 2.8168532605800802,
# 2.85505242699187, 3.1599429539069774, 3.2263673040205068, 3.6334182368296544,
# 3.827985200119751, 4.265671313014058, 4.582237014873297, 5.116839616183394,
# 5.515173073160611, 6.143620383280934, 6.647972282096122, 7.392577402979211,
# 8.020737177449933, 8.904435233295793, 9.701975105606063, 10.74508217792577,
# 11.743887525923592, 12.995985956061467, 14.23723766879807, 15.733870137824457,
# 17.290155933625808, 19.07016662950366, 21.013341340455703, 23.134181634845618,
# 25.570925414182238, 28.100862178905476, 31.13848909847073, 34.16791099558486,
# 37.95775984680512, 41.589619321873165, 46.327607251605286, 50.667786337299205,
# 56.60102493062895, 61.81174065797068, 69.23049946198458, 75.47409803238031,
# 84.71658869065816, 92.1855007134236, 103.77129947551164, 112.84577430578537,
# 127.55127257092909, 138.70199812969176, 157.7443764728878, 171.39653462998626]
#)
Kern.CalculateK0( ["Coil 1"], ["Coil 1"], False )
yml = open('akvoK3-' + str(Kern.GetTolerance()) + '.yaml', 'w')
print(Kern, file=yml)
K0 = Kern.GetKernel()
#plt.matshow(np.abs(K0))
#plt.show()
if __name__ == "__main__":
main() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/calcAkvoKernel--save.py | calcAkvoKernel--save.py |
from numpy import vstack, hstack, eye, ones, zeros, linalg, \
newaxis, r_, flipud, convolve, matrix, array
from scipy.signal import lfilter
def lfilter_zi(b,a):
#compute the zi state from the filter parameters. see [Gust96].
#Based on:
# [Gust96] Fredrik Gustafsson, Determining the initial states in forward-backward
# filtering, IEEE Transactions on Signal Processing, pp. 988--992, April 1996,
# Volume 44, Issue 4
n=max(len(a),len(b))
zin = ( eye(n-1) - hstack( (-a[1:n,newaxis],
vstack((eye(n-2), zeros(n-2))))))
zid= b[1:n] - a[1:n]*b[0]
zi_matrix=linalg.inv(zin)*(matrix(zid).transpose())
zi_return=[]
#convert the result into a regular array (not a matrix)
for i in range(len(zi_matrix)):
zi_return.append(float(zi_matrix[i][0]))
return array(zi_return)
def filtfilt(b,a,x):
#For now only accepting 1d arrays
ntaps=max(len(a),len(b))
edge=ntaps*3
if x.ndim != 1:
raise ValueError("Filiflit is only accepting 1 dimension arrays.")
#x must be bigger than edge
if x.size < edge:
raise ValueError("Input vector needs to be bigger than 3 * max(len(a),len(b).")
if len(a) < ntaps:
a=r_[a,zeros(len(b)-len(a))]
if len(b) < ntaps:
b=r_[b,zeros(len(a)-len(b))]
zi=lfilter_zi(b,a)
#Grow the signal to have edges for stabilizing
#the filter with inverted replicas of the signal
s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
#in the case of one go we only need one of the extrems
# both are needed for filtfilt
(y,zf)=lfilter(b,a,s,-1,zi*s[0])
(y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])
return flipud(y[edge-1:-edge+1])
if __name__=='__main__':
from scipy.signal import butter
from scipy import sin, arange, pi, randn
from pylab import plot, legend, show, hold
t=arange(-1,1,.01)
x=sin(2*pi*t*.5+2)
#xn=x + sin(2*pi*t*10)*.1
xn=x+randn(len(t))*0.05
[b,a]=butter(3,0.05)
z=lfilter(b,a,xn)
y=filtfilt(b,a,xn)
plot(x,'c')
hold(True)
plot(xn,'k')
plot(z,'r')
plot(y,'g')
legend(('original','noisy signal','lfilter - butter 3 order','filtfilt - butter 3 order'))
hold(False)
show() | Akvo | /Akvo-1.7.2-py3-none-any.whl/akvo/tressel/filtfilt.py | filtfilt.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.