code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from pysiriproxy.objects.baseObject import SiriObject
class _RequestCompleted(SiriObject):
'''The _RequestCompleted object notifies the iPhone that the current
request can be completed.
'''
def __init__(self, callbacks=None):
'''
* callbacks -- The list of callbacks
'''
SiriObject.__init__(self, "RequestCompleted", "com.apple.ace.system")
self.callbacks = [] if callbacks is None else callbacks
class _StartRequest(SiriObject):
'''The _StartRequest object signifies that a request is being started.
'''
def __init__(self, utterance="Testing", handsFree=False, proxyOnly=False):
'''
* utterance -- The utterance to perform
* handsFree -- True if in hands free mode, False otherwise
* proxyOnly -- True if proxy only mode, False otherwise
'''
SiriObject.__init__(self, "StartRequest", "com.apple.ace.system")
self.utterance = utterance
self.handsFree = handsFree
if proxyOnly: # dont send local when false since its non standard
self.proxyOnly = proxyOnly
class _GetRequestOrigin(SiriObject):
'''The _GetRequestOrigin class creates an object that gets the
origin of the request.
'''
def __init__(self, desiredAccuracy="HundredMeters", searchTimeout=8.0,
maxAge=1800):
'''
* desiredAccuracy -- The desired accuracy for the result
* searchTimeout -- The timeout for the result
* maxAge -- The maximum age for the result
'''
SiriObject.__init__(self, "GetRequestOrigin", "com.apple.ace.system")
self.desiredAccuracy = desiredAccuracy
self.searchTimeout = searchTimeout
self.maxAge = maxAge
class _SetRequestOrigin(SiriObject):
'''The _SetRequestOrigin class creates an object to set the origin of
a request.
'''
def __init__(self, longitude=-122.030089795589, latitude=37.3317031860352,
desiredAccuracy="HundredMeters", altitude=0.0, speed=1.0,
direction=1.0, age=0, horizontalAccuracy=50.0,
verticalAccuracy=10.0):
'''
* longitude -- The longitude for the request
* latitude -- The latitude for the request
* desiredAccuracy -- The desired accuracy for the request
* altitude -- The altitude for the request
* speed -- The speed for the request
* direction -- The direction for the request
* age -- The age for the request
* horizontalAccuracy -- The horizontal accuracy for the request
* verticalAccuracy -- The vertical accuracy for the request
'''
SiriObject.__init__(self, "SetRequestOrigin", "com.apple.ace.system")
self.horizontalAccuracy = horizontalAccuracy
self.latitude = latitude
self.desiredAccuracy = desiredAccuracy
self.altitude = altitude
self.speed = speed
self.longitude = longitude
self.verticalAccuracy = verticalAccuracy
self.direction = direction
self.age = age
class Requests:
'''The Requests class contains the various types of Requests as well as a
function for creating Requests of a specific type.
This class contains a factory method for creating Request object of
a specific type.
'''
GetRequestOrigin = "GetRequestOrigin"
'''The GetRequestOrigin object type.'''
RequestCompleted = "Completed"
'''The RequestCompleted object type.'''
SetRequestOrigin = "SetRequestOrigin"
'''The SetRequestOrigin object type.'''
StartRequest = "StartRequest"
'''The StartRequest object type.'''
# Create a dictionary mapping the types to their respective objects
__TypeMap = {
GetRequestOrigin: _GetRequestOrigin,
RequestCompleted: _RequestCompleted,
SetRequestOrigin: _SetRequestOrigin,
StartRequest: _StartRequest
}
@classmethod
def create(cls, requestType, *args, **kwargs):
'''Create a Request of the given type.
* requestType -- The request type
* args -- The arguments
* kwargs -- The keyword arguments
'''
request = None
# Create the request object if it is found
requestClass = Requests.__TypeMap.get(requestType)
if requestClass is not None:
request = requestClass(*args, **kwargs)
return request | zopyx.pysiriproxy | /zopyx.pysiriproxy-0.2.2.zip/zopyx.pysiriproxy-0.2.2/pysiriproxy/objects/requests.py | requests.py |
from pysiriproxy.objects.commands import Commands
from pysiriproxy.objects.baseObject import SiriObject
class _CustomCommand(SiriObject):
'''The _CustomCommand class creates a StartRequest object which contains a
custom command.
'''
def __init__(self, command):
'''
* command -- The text command to issue
'''
SiriObject.__init__(self, "StartRequest", "com.apple.ace.system")
self.utterance = self.__createCommand(command)
self.handsFree = False
def __createCommand(self, command):
'''Create the command string.
* command -- The command to execute
'''
return "%s" % command
class _WebSearch(SiriObject):
'''The _WebSearch class creates a command to perform a web search
for a particular search phrase.
'''
def __init__(self, query):
'''
* query -- The text to query on the web
'''
SiriObject.__init__(self, "StartRequest", "com.apple.ace.system")
self.utterance = self.__createQuery(query)
self.handsFree = False
def __createQuery(self, query):
'''Create the query string for the web search.
* query -- The query
'''
before = "^webSearchQuery^=^"
after = "^^webSearchConfirmation^=^Yes^"
return "%s%s%s" % (before, query, after)
class Actions:
'''The Actions class contains a list of Action types as well as a
function for creating specific types of Actions.
This class provides a factory function for creating Actions of a specific
type.
'''
CustomCommand = "CustomCommand"
'''The CustomCommand action type.'''
WebSearch = "WebSearch"
'''The WebSearch action type.'''
# Create a dictionary mapping the types to their respective objects
__TypeMap = {
CustomCommand: _CustomCommand,
WebSearch: _WebSearch
}
@classmethod
def create(cls, actionType, *args, **kwargs):
'''Return a specific Action wrapped in a SendCommands object so
it can be sent to Siri as a command.
* actionType -- The type of Action to create
* args -- The arguments
* kwargs -- The keyword arguments
'''
sendCommand = None
# Create the action object if it is found
actionClass = Actions.__TypeMap.get(actionType)
if actionClass is not None:
action = actionClass(*args, **kwargs)
sendCommand = Commands.create(Commands.SendCommands, [action])
return sendCommand | zopyx.pysiriproxy | /zopyx.pysiriproxy-0.2.2.zip/zopyx.pysiriproxy-0.2.2/pysiriproxy/objects/actions.py | actions.py |
from pysiriproxy.constants import DirectionTypes
from pysiriproxy.objects.baseObject import SiriObject
# Include all the various types of objects we can create
from pysiriproxy.objects.views import Views
from pysiriproxy.objects.actions import Actions
from pysiriproxy.objects.buttons import Buttons
from pysiriproxy.objects.commands import Commands
from pysiriproxy.objects.requests import Requests
from pysiriproxy.objects.dataObjects import DataObjects
class ObjectFactory:
'''The ObjectFactory provides factory methods for constructing concrete
:class:`.SiriObject` objects of specific types.
'''
@classmethod
def action(cls, actionType, *args, **kwargs):
'''Create a :class:`.SiriObject` :class:`.Action` of the specific type.
* actionType -- The type of action to create
* args -- The arguments
* kwargs -- The keyword arguments
'''
return Actions.create(actionType, *args, **kwargs)
@classmethod
def button(cls, buttonType, buttonText, *args, **kwargs):
'''Create a :class:`.SiriObject` :class:`.Button` of the specific type.
* buttonType -- The type of Button to create
* buttonText -- The button text
* args -- The argumnets
* kwargs -- The keyword arguments
'''
return Buttons.create(buttonType, buttonText, *args, **kwargs)
@classmethod
def utterance(cls, displayText, spokenText=None, listenAfterSpeaking=False,
identifier="Misc#ident"):
'''Create a :class:`.SiriObject` utterance.
* displayText -- The text to display
* spokenText -- The text that Siri will speak
* listenAfterSpeaking -- True for Siri to listen for a response
* identifier -- The identifier for the utterance
'''
return Views.create(Views.Utterance, displayText=displayText,
spokenText=spokenText,
listenAfterSpeaking=listenAfterSpeaking,
dialogIdentifier=identifier)
@classmethod
def location(cls, street=None, city=None, stateCode=None, countryCode=None,
postalCode=None, latitude=None, longitude=None):
'''Create a :class:`.SiriObject` location.
* street -- The string containing the street for the location
* city -- The string containing the city for the location
* stateCode -- The string containing the state code for the location
* countryCode -- The string containing the country code for the
location
* postalCode -- The string containing the postal code for the location
* latitude -- The string containing the latitude for the location
* longitude -- The string containing the longitude for the location
'''
return DataObjects.create(DataObjects.Location, street=street,
city=city, stateCode=stateCode, countryCode=countryCode,
postalCode=postalCode, latitude=latitude, longitude=longitude)
@classmethod
def currentLocation(cls, label=None):
'''Create a :class:`.SiriObject` for the current location.
* label -- The label to display on the map pin
'''
return DataObjects.create(DataObjects.CurrentLocation, label=label)
@classmethod
def mapItem(cls, locations):
'''Create a :class:`.SiriObject` map item.
* locations -- The list of locations to display on the map
'''
items = []
# Create locations for all of the locations in the given list
for label, location in locations:
mapItem = DataObjects.create(DataObjects.MapItem, label=label,
location=location)
items.append(mapItem)
return Views.create(Views.MapItemSnippet, useCurrentLocation=False,
items=items)
@classmethod
def directions(cls, directionsType, source, destination, utterance=None):
'''Create a :class:`.SiriObject` to display directions between two
locations.
* directionsType -- The type of directions to provide
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
# @todo: Allow source and destination to be passed as
# Locations, OR MapItems, and convert the Locations
# to MapItems accordingly.
mapPoints = Views.create(Views.MapPoints, source=source,
destination=destination,
directionsType=directionsType)
commands = [mapPoints]
resultCallback = Commands.create(Commands.ResultCallback,
commands=commands)
callbacks = [resultCallback]
views = []
# Add the utternace to the views, if an utterance is given
if utterance is not None:
views.append(utterance)
addViews = Views.create(Views.AddViews, callbacks=callbacks,
views=views)
# Note: Adding the ace id makes the map points work properly
addViews.setAceId()
commands = [addViews]
resultCallback = Commands.create(Commands.ResultCallback,
commands=commands)
callbacks = [resultCallback]
completed = Requests.create(Requests.RequestCompleted,
callbacks=callbacks)
return completed
@classmethod
def drivingDirections(cls, source, destination, utterance=None):
'''Create driving directions between the two locations.
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.direction(DirectionTypes.Driving, source, destination,
utterance=utterance)
@classmethod
def walkingDirections(cls, source, destination, utterance=None):
'''Create walking directions between the two locations.
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.direction(DirectionTypes.Walking, source, destination,
utterance=utterance)
@classmethod
def publicTransitDirections(cls, source, destination, utterance=None):
'''Create public transportation directions between the two locations.
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.direction(DirectionTypes.PublicTransit, source, destination,
utterance=utterance)
class ResponseFactory:
'''The ResponseFactory is responsible for creating specific
:class:`.SiriObject` responses to be sent from pysiriproxy to the iPhone
user. These responses include things such as, creating a view composed of
:class:`.SiriObjects`, sending a request completed object, and others.
'''
@classmethod
def directions(cls, refId, directionsType, source, destination,
utterance=None):
'''Create directions to be sent to the iPhone.
* refId -- The reference id
* directionsType -- The type of directions to provide
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
directions = ObjectFactory.directions(directionsType, source,
destination, utterance=utterance)
directions.makeRoot(refId)
return directions.toDict()
@classmethod
def drivingDirections(cls, refId, source, destination, utterance=None):
'''Create driving directions to be sent to the iPhone.
* refId -- The reference id
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.directions(DirectionTypes.Driving, source, destination,
utterance=utterance)
@classmethod
def walkingDirections(cls, refId, source, destination, utterance=None):
'''Create walking directions to be sent to the iPhone.
* refId -- The reference id
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.directions(DirectionTypes.Walking, source, destination,
utterance=utterance)
@classmethod
def publicTransitDirections(cls, refId, source, destination,
utterance=None):
'''Create public transportation directions to be sent to the iPhone.
* refId -- The reference id
* source -- The source location
* destination -- The destination location
* utterance -- The utterance to speak
'''
return cls.directions(DirectionTypes.PublicTransit, source,
destination, utterance=utterance)
@classmethod
def view(cls, refId, subObjects, dialogPhase="Completion"):
'''Create an utterance view composed of several sub objects.
* refId -- The reference id
* subObjects -- The list of SiriObjects the view will be composed of
or a list of tuple arguments to create SiriObjects
* dialogPhase -- The dialogPhase
'''
addViews = Views.create(Views.AddViews, dialogPhase=dialogPhase,
views=subObjects)
addViews.makeRoot(refId)
return addViews.toDict()
@classmethod
def utterance(cls, refId, displayText, spokenText=None,
listenAfterSpeaking=False, identifier="Misc#ident"):
'''Create an utterance with the given display text, and spoken text.
* refId -- The reference id
* displayText -- The text to be displayed
* spokenText -- The text to be spoken by Siri
* listenAfterSpeaking -- True for Siri to listen for a response
after speaking, False otherwise
'''
utterance = ObjectFactory.utterance(displayText, spokenText,
listenAfterSpeaking, identifier)
return ResponseFactory.view(refId, [utterance])
@classmethod
def requestCompleted(cls, refId, callbacks=None):
'''Create a request completed object.
* refId -- The reference id
* callbacks -- The list of callbacks
'''
completed = Requests.create(Requests.RequestCompleted,
callbacks=callbacks)
completed.makeRoot(refId)
return completed.toDict() | zopyx.pysiriproxy | /zopyx.pysiriproxy-0.2.2.zip/zopyx.pysiriproxy-0.2.2/pysiriproxy/objects/factory.py | factory.py |
zopyx.sharepoint
================
``zopyx.sharepoint`` allows to interact Python-based application with
Sharepoint ``lists`` through the Sharepoint SOAP API (tested against
Microsoft Sharepoint Services 3.0).
Features
--------
* retrieve Sharepoint list definitions
* retrieve all list items
* add list items
* delete list items
* update list items
* generic queries
* authentication over NTLM
Usage
-----
In order to connect to Sharepoint you need the following parameters
- the Lists WSDL URL
- the ID/Name of the related Sharepoint list you want to interact with
- a valid Sharepoint username and password (having the related rights)
API usage
---------
Connecting to sharepoint
++++++++++++++++++++++++
In order to connect to Sharepoint you need to import the ``Connector``
method which is a factory return a ``ListEndPoint`` instance::
> from zopyx.sharepoint import Connector
> url = 'http://sharepoint/bereiche/onlineschulungen/'
> username = 'YourDomain\\account'
> password = 'secret'
> list_id = '60e3f442-6faa-4b49-814d-2ce2ec88b8d5'
> service = Connector(url, username, password, list_id)
Sharepoint list model introspection
+++++++++++++++++++++++++++++++++++
The internals of the list schema is available through the ``model`` property
of the ``ListEndPoint`` instance::
> fields = service.model
The primary key of the list is exposed through the ``primary_key`` property::
> primary_key = service.primary_key
The lists of all required field names and all fields is available through::
> all_fields = service.all_fields
> required_fields = service.required_fields
List item deletion
++++++++++++++++++
In order to delete list items by their primary key values, you can use
the ``deleteItems()`` method::
> result = service.deleteItems('54', '55')
> print result
> print result.result
> print result.ok
The ``result`` object is an instance of ``ParsedSoapResult`` providing a
flag ``ok`` (True|False) indicating the overall success or overall failure
of the operation. The individual error codes are available by iterating over the
``result`` property of the ``ParsedSoapResult`` instance.
Updating list items
+++++++++++++++++++
You can update existing list items by passing one or multiple dictionaries
to ``updateItems()``. Each dict must contain the value of the related primary key
(in this case the ``ID`` field)::
> data = dict(ID='77', Title=u'Ruebennase', FirstName=u'Heinz')
> result = service.updateItems(data)
> print result
> print result.result
> print result.ok
``updateItems()`` will not raise any exception. Instead you need to
check the ``ok`` property of the result object and if needed the individual
items of the ``result`` property::
# update an item (non-existing ID)
> data = dict(ID='77000', Title=u'Becker')
> result = service.updateItems(data)
> print result
> print result.result
> print result.ok
Adding items to a list
++++++++++++++++++++++
The ``addItems()`` method works like the ``updateItems()`` method
except that do not have pass in a primary key (since it is not known
on the client side). The assigned primary key value after adding
the item to the list should be available from the ``result`` object::
> data = dict(Title=u'Ruebennase', FirstName=u'Heinz')
> result = service.addItems(data)
> print result
> print result.result
> print result.ok
> print 'assigned ID:', result.result[0]['row']._ows_ID
Retrieving a single list item
+++++++++++++++++++++++++++++
``getItem()`` will return a single item by its primary key value::
> data = service.getItem('77')
Retrieving all list items
+++++++++++++++++++++++++
``getItems()`` will return all list items (use with care!)::
> items = service.getItems()
Generic query API
+++++++++++++++++
``query(**kw)`` can be used to query the list with arbitrary query parameters
where each subquery must perform an exact match. All subqueries are combined
using a logical AND::
> items = service.query(FirstName='Heinz', Title='Becker')
The result is returned a Python list of dictified list items.
All query parameters must represent a valid field name of the list (ValueError
exception raised otherwise).
In order to perform a substring search across _all_ query parameter you may
pass the ``mode='contains'`` parameter. To specify a prefix search across
all query parameters, use ``mode='beginswith'``.
View support
++++++++++++
``zopyx.sharepoint`` supports list views of Sharepoint. You can either
set a default view used for querying Sharepoint like::
> service.setDefaultView('{D9DF14B-21F2-4D75-B796-EA74647C30C6'}')
or you select the view on a per-query basis by passing the view name
as ``viewName`` method parameter (applies to ``getItem()``,
``getItems()`` and ``query()``)::
> items = service.getItems(viewName='{D9DF14B-21F2-4D75-B796-EA74647C30C6'}')
Command line usage
------------------
``zopyx.sharepoint`` comes with a small ``sharepoint-inspector`` commandline utility::
sharepoint-inspector --url <URL> --list <LIST-ID-OR-NAME> --username <USERNAME> --password <PASSWORD> --cmd <CMD>
where <CMD> is either ``fields`` or ``items``
Requirements
------------
* Python 2.4 or higher (no support for Python 3.X)
Tested
------
* tested with Python 2.4-2.6
* suds 0.4.1 beta or checkout of the suds trunk (https://fedorahosted.org/suds/). suds 0.4.0 is _not_ sufficient!
* python-ntlm 1.0
* Microsoft Sharepoint Services 3.0 API
Author
------
Written for Haufe-Lexware GmbH, Freiburg, Germany.
| ZOPYX Limited
| Andreas Jung
| Charlottenstr. 37/1
| D-72070 Tuebingen
| www.zopyx.com
| [email protected]
| zopyx.sharepoint | /zopyx.sharepoint-0.2.0.zip/zopyx.sharepoint-0.2.0/README.txt | README.txt |
################################################################
# zopyx.sharepoint
################################################################
import re
import pprint
import logging
from ntlm import HTTPNtlmAuthHandler
from suds.client import Client
from suds.sax.element import Element
from suds.sax.attribute import Attribute
from suds.transport.https import WindowsHttpAuthenticated
from logger import logger as LOG
import patches
# Sharepoint field descriptors start with *one* underscore (hopefully)
field_regex = re.compile(r'^_[a-zA-Z0-9]')
_marker = object
class OperationalError(Exception):
""" Generic error """
class NotFound(Exception):
""" List item not found """
class DictProxy(dict):
""" Dict-Proxy for mapped objects providing attribute-style access.
"""
def __getattribute__(self, name):
if name in dict.keys(self):
return self.get(name)
return super(dict, self).__getattribute__(name)
def __getattr__(self, name, default=None):
if name in dict.keys(self):
return self.get(name, default)
return super(dict, self).__getattr__(name, default)
def Connector(url, username, password, list_id, timeout=65):
""" Sharepoint SOAP connector factory """
LOG.info('Connecting to Sharepoint (%s, %s, %s)' % (url, username, list_id))
transport = WindowsHttpAuthenticated(username=username,
password=password,
timeout=timeout)
if not 'Lists.asxml' in url:
url = url + '/_vti_bin/Lists.asmx?WSDL'
try:
client = Client(url, transport=transport)
client.set_options(service='Lists', port='ListsSoap12')
return ListEndpoint(client, list_id)
except Exception, e:
# *try* to capture authentication related error.
# Suds fails dealing with a 403 response from Sharepoint in addition
# it is unable to deal with the error text returned from Sharepoint as
# *HTML*.
if '<unknown>' in str(e):
raise OperationalError('Unknown bug encountered - *possibly* an authentication problem (%s)' % e)
raise
class ParsedSoapResult(object):
""" Represent he result datastructure from sharepoint in a
mode Pythonic way. The ParsedSoapResult class exposes two attributes:
``ok`` - True if all operations completed successfully,
False otherwise
``result`` - a list of dicts containing the original SOAP
response ``code`` and ``text``
"""
def __init__(self, results):
self.raw_result = results
self.ok = True
self.result = list()
# Stupid SOAP response are either returned as single 'Result'
# instance or a list (depending on the number of list items touched
# during one SOAP operation.
if isinstance(results.Results.Result, (list, tuple)):
results = [r for r in results.Results.Result]
else:
results = [results.Results.Result]
for item_result in results:
d = dict(code=item_result.ErrorCode,
success=item_result.ErrorCode=='0x00000000')
for key in ('ErrorText', ):
value = getattr(item_result, key, _marker)
if value is not _marker:
d[key.lower()] = value
row = getattr(item_result, 'row', _marker)
if row is not _marker:
# should be serialized
d['row'] = item_result.row
self.result.append(d)
if item_result.ErrorCode != '0x00000000':
self.ok = False
class ListEndpoint(object):
def __init__(self, client, list_id):
self.client = client
self.service = client.service
self.list_id = list_id
self.viewName = None
# perform some introspection on the list
self.model = self._getFields()
self.required_fields = self._required_fields()
self.all_fields = self.model.keys()
self.primary_key = self._find_primary_key()
def _getFields(self):
""" extract field list """
list_ = self.service.GetList(self.list_id)
fields = dict()
for row in list_.List.Fields.Field:
if row._Name.startswith('_'):
continue
# dictify field description (chop of leading underscore)
d = dict()
for k, v in row.__dict__.items():
if field_regex.match(k):
# chop of leading underscore
d[unicode(k[1:])] = v
fields[row._Name] = d
return fields
def _find_primary_key(self):
""" Return the name of the primary key field of the list """
for k, field_d in self.model.items():
if field_d.get('PrimaryKey') == u'TRUE':
return k
raise OperationalError('No primary key found in sharepoint list description')
def _required_fields(self):
""" Return the list of required field names in Sharepoint """
return [d['Name']
for d in self.model.values()
if d.get('Required') == 'TRUE']
def _serializeListItem(self, item):
""" Serialize a list item as dict """
d = DictProxy()
for fieldname in self.model:
v = getattr(item, '_ows_' + fieldname, _marker)
if v is _marker:
v = None
d[fieldname] = v
return d
def _preflight(self, data, primary_key_check=True):
""" Perform some sanity checks on data """
# data must include the value of the primary key field
value_primary_key = data.get(self.primary_key)
if primary_key_check and value_primary_key is None:
raise ValueError('No value for primary key "%s" found in update dict (%s)' % (self.primary_key, data))
data_keys = set(data.keys())
all_fields = set(self.all_fields)
if not data_keys.issubset(all_fields):
disallowed = ', '.join(list(data_keys - all_fields))
raise ValueError('Data dictionary contains fieldnames unknown to the Sharepoint list model (Disallowed field names: %s)' % disallowed)
def setDefaultView(self, viewName):
""" set the default viewName parameter """
self.viewName = viewName
def getItems(self, rowLimit=999999999, viewName=None):
""" Return all list items without further filtering """
items = self.service.GetListItems(self.list_id, viewName=viewName or self.viewName, rowLimit=rowLimit)
if int(items.listitems.data._ItemCount) > 0:
return [self._serializeListItem(item) for item in items.listitems.data.row]
return []
def getItem(self, item_id, viewName=None):
""" Return all list items without further filtering """
query0= Element('ns1:query')
query = Element('Query')
query0.append(query)
where = Element('Where')
query.append(where)
eq = Element('Eq')
where.append(eq)
fieldref = Element('FieldRef').append(Attribute('Name', self.primary_key))
value = Element('Value').append(Attribute('Type', 'Number')).setText(item_id)
eq.append(fieldref)
eq.append(value)
viewfields = Element('ViewFields')
viewfields.append(Element('FieldRef').append(Attribute('Name', self.primary_key)))
queryOptions = Element('queryOptions')
result = self.service.GetListItems(self.list_id,
viewName=viewName or self.viewName,
query=query0,
viewFields=viewfields,
queryOptions=queryOptions,
rowLimit=1)
if int(result.listitems.data._ItemCount) > 0:
return self._serializeListItem(result.listitems.data.row)
return []
def query(self, mode='exact', viewName=None, **kw):
""" A generic query API. All list field names can be passed to query()
together with the query values. All subqueries are combined using AND.
All search criteria must perform an exact match. A better
implementation of query() may support the 'Contains' or 'BeginsWith'
query options (as given through CAML). The mode=exact ensures an exact
match of all query parameter. mode=contains performs a substring search
across *all* query parameters. mode=beginswith performs a prefix search
across *all* query parameters.
"""
if not mode in ('exact', 'contains', 'beginswith'):
raise ValueError('"mode" must be either "exact", "beginswith" or "contains"')
# map mode parameters to CAML query options
query_modes = {'exact' : 'Eq', 'beginswith' : 'BeginsWith', 'contains' : 'Contains'}
query0= Element('ns1:query')
query = Element('Query')
query0.append(query)
where = Element('Where')
query.append(where)
if len (kw) > 1: # more than one query parameter requires <And>
and_= Element('And')
where.append(and_)
where = and_
# build query
for k, v in kw.items():
if not k in self.all_fields:
raise ValueError('List definition does not contain a field "%s"' % k)
query_mode = Element(query_modes[mode])
where.append(query_mode)
fieldref = Element('FieldRef').append(Attribute('Name', k))
value = Element('Value').append(Attribute('Type', self.model[k]['Type'])).setText(v)
query_mode.append(fieldref)
query_mode.append(value)
viewfields = Element('ViewFields')
viewfields.append(Element('FieldRef').append(Attribute('Name', self.primary_key)))
queryOptions = Element('queryOptions')
result = self.service.GetListItems(self.list_id,
viewName=viewName or self.viewName,
query=query0,
viewFields=viewfields,
queryOptions=queryOptions,
)
row_count = int(result.listitems.data._ItemCount)
if row_count == 1:
return [self._serializeListItem(result.listitems.data.row)]
elif row_count > 1:
return [self._serializeListItem(item) for item in result.listitems.data.row]
else:
return []
def deleteItems(self, *item_ids):
""" Remove list items given by value of their primary key """
batch = Element('Batch')
batch.append(Attribute('OnError','Return')).append(Attribute('ListVersion','1'))
for i, item_id in enumerate(item_ids):
method = Element('Method')
method.append(Attribute('ID', str(i+1))).append(Attribute('Cmd', 'Delete'))
method.append(Element('Field').append(Attribute('Name', self.primary_key)).setText(item_id))
batch.append(method)
updates = Element('ns0:updates')
updates.append(batch)
result = self.service.UpdateListItems(self.list_id, updates)
return ParsedSoapResult(result)
def updateItems(self, *update_items):
""" Update list items as given through a list of update_item dicts
holding the data to be updated. The list items are identified
through the value of the primary key inside the update dict.
"""
batch = Element('Batch')
batch.append(Attribute('OnError','Return')).append(Attribute('ListVersion','1'))
for i, d in enumerate(update_items):
self._preflight(d)
method = Element('Method')
method.append(Attribute('ID', str(i+1))).append(Attribute('Cmd', 'Update'))
for k,v in d.items():
method.append(Element('Field').append(Attribute('Name', k)).setText(v))
batch.append(method)
updates = Element('ns0:updates')
updates.append(batch)
result = self.service.UpdateListItems(self.list_id, updates)
return ParsedSoapResult(result)
def addItems(self, *addable_items):
""" Add a sequence of items to the list. All items must be passed as dict.
The list of assigned primary key values should from the 'row' values of
the result object.
"""
batch = Element('Batch')
batch.append(Attribute('OnError','Return')).append(Attribute('ListVersion','1'))
for i, d in enumerate(addable_items):
self._preflight(d, primary_key_check=False)
method = Element('Method')
method.append(Attribute('ID', str(i+1))).append(Attribute('Cmd', 'New'))
for k,v in d.items():
method.append(Element('Field').append(Attribute('Name', k)).setText(v))
batch.append(method)
updates = Element('ns0:updates')
updates.append(batch)
result = self.service.UpdateListItems(self.list_id, updates)
return ParsedSoapResult(result)
def checkout_file(self, pageUrl, checkoutToLocal=False):
""" Checkout a file """
return self.service.CheckOutFile(pageUrl, checkoutToLocal)
def checkin_file(self, pageUrl, comment=''):
return self.service.CheckInFile(pageUrl, comment) | zopyx.sharepoint | /zopyx.sharepoint-0.2.0.zip/zopyx.sharepoint-0.2.0/zopyx/sharepoint/connector.py | connector.py |
import suds.metrics as metrics
from suds.transport import TransportError, Request
from suds.plugin import PluginContainer
from logger import logger as log
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
nosend = self.options.nosend
prettyxml = self.options.prettyxml
timer = metrics.Timer()
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
soapenv = soapenv.str()
soapenv = soapenv.encode('utf-8')
# Hack ajung to make the SOAP request fit
soapenv = soapenv.replace('ns0:Body', 'SOAP-ENV:Body')
soapenv = soapenv.replace('<ns0:', '<ns1:')
soapenv = soapenv.replace('</ns0:', '</ns1:')
ctx = plugins.message.sending(envelope=soapenv)
soapenv = ctx.envelope
log.debug(soapenv)
if nosend:
return RequestContext(self, binding, soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
timer.start()
reply = transport.send(request)
log.debug(reply)
timer.stop()
metrics.log.debug('waited %s on server reply', timer)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
# Monkey patch SoapClient.send()
from suds.client import SoapClient
SoapClient.send = send
log.debug('Patched suds.client.SoapClient.send()') | zopyx.sharepoint | /zopyx.sharepoint-0.2.0.zip/zopyx.sharepoint-0.2.0/zopyx/sharepoint/patches.py | patches.py |
import os
import sys
import random
import time
from optparse import OptionParser
from zopyx.slimp import slimp
MAX_LENGTH_PLAYLIST = 40
mp3files = '/tmp/mp3files.txt'
random.seed(time.time()*os.getpid())
MP3FILES = []
def myrandom(max):
return random.randint(0, max)
s = ''.join([open('/dev/urandom').read(20) for i in range(100)])
n = 0
for c in s: n+=ord(c)
return divmod(n, max) [1]
def rescan_mp3list(options):
""" create a local cache file with the paths of all MP3 files """
global MP3FILES
if not os.path.exists(mp3files) or options.scanmp3:
os.system('find %s -type d -exec chmod a+rx {} \;' % options.rootdir)
os.system('find %s -type f -exec chmod a+r {} \;' % options.rootdir)
os.system('/usr/bin/find %s -name \*mp3 |grep -vi badesalz|grep -v Suspektes |grep -v Texte >%s' % (options.rootdir, mp3files))
MP3FILES = open(mp3files).readlines()
def choose_file(single=1):
""" return a random sequence of MP3 files """
item = MP3FILES[myrandom(len(MP3FILES))]
if single:
return [item]
else:
dirname = os.path.dirname(item)
files = [os.path.join(dirname, f) for f in os.listdir(dirname) if f.endswith('.mp3') ]
files.sort()
return files
def files_for_playlist(slimp, options):
if not os.path.exists(options.rootdir):
raise ValueError, 'Directory "%s" does not exist' % options.rootdir
if options.init: slimp.playlist_clear()
current_track = int(slimp.playlist_tracknum())
abc = int(slimp.playlist_numtracks())
if abc - current_track > options.min_tracks: return []
files = list()
while len(files) < MAX_LENGTH_PLAYLIST:
rnd = myrandom(100) # 10% for complete albums
if rnd < 10:
result = choose_file(single=0)
else:
result = choose_file(single=1)
for f in result:
if not f in files:
files.append(f.strip())
return files
def main():
parser = OptionParser()
parser.add_option('-d', '--rootdir', dest='rootdir', default='/raid/media2/media/mp3',
help='root directory')
parser.add_option('-i', '--init-playlist', dest='init', action='store_true',
help='init the playlist')
parser.add_option('-s', '--scan-mp3', dest='scanmp3', action='store_true',
help='rescan MP3 directory')
parser.add_option('-n', '--min-tracks', dest='min_tracks', type='int',
help='minimal number of elements in playlist', default=5)
options, args = parser.parse_args()
slim = slimp.SLIM()
rescan_mp3list(options)
files = files_for_playlist(slim, options)
slim.playlist_add(files)
if files:
current_track = int(slim.playlist_tracknum())
for i in range(0, current_track): slim.playlist_delete(0)
if options.init or not slim.playing():
slim.shuffle(0)
slim.play()
slim.display('Playlist initialized', 'Have fun', 10)
if __name__ == '__main__':
sys.exit(main()) | zopyx.slimp | /zopyx.slimp-0.2.1.tar.gz/zopyx.slimp-0.2.1/src/zopyx/slimp/slimplayer.py | slimplayer.py |
##########################################################################
# zopyx.smartprintng.client - client library for the SmartPrintNG server
# (C) 2009, ZOPYX Ltd & Co. KG, Tuebingen, Germany
##########################################################################
import os
import sys
import base64
import time
import xmlrpclib
import zipfile
import tempfile
import zipfile
import warnings
import logging
LOG = logging.getLogger()
KEEP_ZIP = os.environ.get('KEEP_ZIP')
class Proxy(object):
""" THIS IMPLEMENTATION IS OBSOLETE, USE 'Proxy2' INSTEAD """
def __init__(self, host='localhost', port=6543, username='', password=''):
self.host = host
self.port = port
self.username = username
self.password = password
self.setOutputDirectory(os.path.join(tempfile.gettempdir(), 'smartprintng_client', str(time.time())))
def setOutputDirectory(self, output_directory):
if not os.path.exists(output_directory):
os.makedirs(output_directory)
self.output_directory = output_directory
# check if directory is writeable
try:
tmpname = os.path.join(self.output_directory, 'test')
file(tmpname, 'w').write('foo')
os.unlink(tmpname)
except IOError:
raise IOError('Spool directory %s is not writeable' % self.output_directory)
LOG.info('Using spool directory %s' % self.output_directory)
def _makeZipFromDirectory(self, directory):
""" Generate a ZIP file from a directory containing all its
contents. Returns the filename of the generated ZIP file.
"""
directory = os.path.abspath(directory)
zip_filename = tempfile.mktemp()
ZF = zipfile.ZipFile(zip_filename, 'w')
for dirname, dirnames, filenames in os.walk(directory):
for fname in filenames:
arcname = os.path.join(dirname, fname).replace(directory + os.path.sep, '')
fullname = os.path.abspath(os.path.join(dirname, fname))
ZF.write(fullname, arcname)
ZF.close()
return zip_filename
def _authenticate(self):
server = xmlrpclib.ServerProxy('http://%s:%d/authenticate' % (self.host, self.port))
return server(self.username, self.password)
def ping(self):
server = xmlrpclib.ServerProxy('http://%s:%d/ping' % (self.host, self.port))
return server()
def availableConverters(self):
server = xmlrpclib.ServerProxy('http://%s:%d/availableConverters' % (self.host, self.port))
return server()
def convertZIP2(self, dirname, converter_name='pdf-prince', workdir=None):
""" XMLRPC client to SmartPrintNG server """
auth_token = self._authenticate()
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy('http://%s:%d/convertZIP' % (self.host, self.port))
zip_data = server(auth_token,
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name)
# and receive the conversion result as base64 encoded ZIP archive
# (it will contain only *one* file)
zip_temp = tempfile.mktemp()
file(zip_temp, 'wb').write(base64.decodestring(zip_data))
result = dict()
ZF = zipfile.ZipFile(zip_temp, 'r')
for name in ZF.namelist():
fullname = os.path.join(workdir or self.output_directory, os.path.basename(name))
file(fullname, 'wb').write(ZF.read(name))
if name.startswith('output.'):
result['output_filename'] = fullname
elif name.startswith('conversion-output'):
result['conversion_output'] = fullname
ZF.close()
os.unlink(zip_filename)
os.unlink(zip_temp)
return result
def convertZIPandRedirect(self, dirname, converter_name='pdf-prince', prefix=None):
""" XMLRPC client to SmartPrintNG server """
# Authenticate first
auth_token = self._authenticate()
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy('http://%s:%d/convertZIPandRedirect' % (self.host, self.port))
location = server(auth_token,
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name,
prefix)
os.unlink(zip_filename)
return location
def convertZIPEmail(self, dirname, converter_name='pdf-prince',
sender=None, recipients=None, subject=None, body=None):
# Authenticate first
auth_token = self._authenticate()
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy('http://%s:%d/convertZIPEmail' % (self.host, self.port))
result = server.convertZIPEmail(auth_token,
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name,
sender,
recipients,
subject,
body)
return result
class Proxy2(Proxy):
""" ZIP Client proxy """
def __init__(self, url):
""" Accepts the url of P&P server as
http://user:password@host:port
"""
self.url = url
self.setOutputDirectory(os.path.join(tempfile.gettempdir(), 'smartprintng_client', str(time.time())))
def setOutputDirectory(self, output_directory):
if not os.path.exists(output_directory):
os.makedirs(output_directory)
self.output_directory = output_directory
# check if directory is writeable
try:
tmpname = os.path.join(self.output_directory, 'test')
file(tmpname, 'w').write('foo')
os.unlink(tmpname)
except IOError:
raise IOError('Spool directory %s is not writeable' % self.output_directory)
LOG.info('Using spool directory %s' % self.output_directory)
def _makeZipFromDirectory(self, directory):
""" Generate a ZIP file from a directory containing all its
contents. Returns the filename of the generated ZIP file.
"""
directory = os.path.abspath(directory)
zip_filename = tempfile.mktemp()
ZF = zipfile.ZipFile(zip_filename, 'w')
for dirname, dirnames, filenames in os.walk(directory):
for fname in filenames:
arcname = os.path.join(dirname, fname).replace(directory + os.path.sep, '')
fullname = os.path.abspath(os.path.join(dirname, fname))
ZF.write(fullname, arcname)
ZF.close()
return zip_filename
def ping(self):
server = xmlrpclib.ServerProxy(self.url + '/ping')
return server.ping()
def availableConverters(self):
server = xmlrpclib.ServerProxy(self.url + '/availableConverters')
return server.availableConverters()
def convertZIP2(self, dirname, converter_name='pdf-prince', workdir=None):
""" XMLRPC client to SmartPrintNG server """
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy(self.url + '/convertZIP')
zip_data = server.convertZIP('',
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name)
# and receive the conversion result as base64 encoded ZIP archive
# (it will contain only *one* file)
zip_temp = tempfile.mktemp()
file(zip_temp, 'wb').write(base64.decodestring(zip_data))
result = dict()
ZF = zipfile.ZipFile(zip_temp, 'r')
for name in ZF.namelist():
fullname = os.path.join(workdir or self.output_directory, os.path.basename(name))
file(fullname, 'wb').write(ZF.read(name))
if name.startswith('output.'):
result['output_filename'] = fullname
elif name.startswith('conversion-output'):
result['conversion_output'] = fullname
ZF.close()
if not KEEP_ZIP:
os.unlink(zip_filename)
os.unlink(zip_temp)
return result
def convertZIPandRedirect(self, dirname, converter_name='pdf-prince', prefix=None):
""" XMLRPC client to SmartPrintNG server """
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy(self.url + '/convertZIPandRedirect')
location = server.convertZIPandRedirect('',
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name,
prefix)
if not KEEP_ZIP:
os.unlink(zip_filename)
return location
def convertZIPEmail(self, dirname, converter_name='pdf-prince',
sender=None, recipients=None, subject=None, body=None):
zip_filename = self._makeZipFromDirectory(dirname)
server = xmlrpclib.ServerProxy(self.url + '/convertZIPEmail')
result = server.convertZIPEmail('',
base64.encodestring(file(zip_filename, 'rb').read()),
converter_name,
sender,
recipients,
subject,
body)
return result
if __name__ == '__main__':
proxy = Proxy2('http://localhost:6543')
print proxy.ping()
print proxy.availableConverters()
print proxy.convertZIP2(sys.argv[1]) | zopyx.smartprintng.client | /zopyx.smartprintng.client-0.8.5.zip/zopyx.smartprintng.client-0.8.5/zopyx/smartprintng/client/zip_client.py | zip_client.py |
zopyx.smartprintng.core
=======================
This package contains the refactored code-base of the SmartPrintNG
product for Plone. The former monolithic implementation is broken into
a three-level architecture (top-down):
- ``zopyx.smartprintng.plone`` - the integration layer with Plone
(status: not implemented)
- ``zopyx.smartprintng.core`` - the SmartPrintNG backend
(status: implemented and almost finished)
- ``zopyx.convert2`` - the low-level converter API
(status: implemented and almost finished)
Contact
=======
| ZOPYX Ltd. & Co. KG
| c/o Andreas Jung,
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| E-mail: info at zopyx dot com
| Web: http://www.zopyx.com
| zopyx.smartprintng.core | /zopyx.smartprintng.core-2.0.0b4.tar.gz/zopyx.smartprintng.core-2.0.0b4/README.txt | README.txt |
import re
from BeautifulSoup import BeautifulSoup, NavigableString, Tag
import zope.component
from zope.component.factory import Factory
from zope.component.interfaces import IFactory
from zope.interface import implements
from interfaces import IHTMLTransformation
def registerTransformation(cls):
""" Register factories for transformation classes """
gsm = zope.component.getGlobalSiteManager()
f = Factory(cls, cls.name)
gsm.registerUtility(f, IFactory, cls.name)
gsm.registerUtility(cls, IHTMLTransformation, cls.name)
class BaseTransform(object):
def __init__(self, html=None, soup=None):
if not html and not soup:
raise ValueError("'html' and 'soup' can not be both None")
self.soup = (soup is not None) and soup or BeautifulSoup(html)
def __str__(self):
return self.soup.renderContents()
def transform(self):
raise NotImplementedError("Do not instantiate BaseTransform directly")
class LinkRemover(BaseTransform):
implements(IHTMLTransformation)
name = 'zopyx.smartprintng.linkremover'
description = 'Removes links from HTML'
def transform(self):
""" replace all links with a <span> tag and the anchor text """
soup = self.soup
refs = soup.findAll('a')
for x in refs:
tag = Tag(soup, 'span')
tag.insert(0, x.renderContents().strip())
soup.a.replaceWith(tag)
class ReviewHistoryRemover(BaseTransform):
implements(IHTMLTransformation)
name = 'zopyx.smartprintng.reviewhistoryremover'
description = 'Removes the review history from HTML'
def transform(self):
""" replace all links with a <span> tag and the anchor text """
soup = self.soup
for x in soup.findAll(id='review-history'):
x.extract()
class ImageRemover(BaseTransform):
implements(IHTMLTransformation)
name = 'zopyx.smartprintng.imageremover'
description = 'Removes images from HTML'
def transform(self):
""" Remove all images """
soup = self.soup
images = soup.findAll('img')
[img.extract() for img in images]
class LinkListAdder(BaseTransform):
implements(IHTMLTransformation)
name = 'zopyx.smartprintng.linklistadder'
description = 'Add a numbered link list to the end of the document'
def _pcdataFromNode(self, s, lst=[]):
""" recursive pcdata collector """
if s.string is not None:
lst.append(s.string)
else:
for n in s.contents:
self._pcdataFromNode(n, lst)
return ' '.join(lst)
def checkHref(self, href):
""" Return False for mailto|javascript or internal
anchors or views.
"""
if 'mailto:' in href or \
'javascript:' in href or \
href.startswith('#') or \
href.startswith('@@'):
return False
return True
def getLinksInHtml(self):
""" return all links inside a HTML fragment """
soup = self.soup
hrefs = []
for anchor in soup.findAll('a'):
# we need to capture internal anchors
try:
href = anchor['href']
except:
continue
if href in hrefs or not self.checkHref(href):
continue
hrefs.append(str(href))
return hrefs
def enumerateLinks(self):
count = 1
links = []
soup = self.soup
for anchor in soup.findAll('a'):
# capture internal anchors
try:
href = anchor['href']
except KeyError:
continue
if not self.checkHref(href):
continue
anchor['class'] = 'enumerated-link'
s = self._pcdataFromNode(anchor) + ' [%d]' % count
anchor.contents = [NavigableString(s)]
links.append(anchor['href'])
count += 1
return links
def transform(self):
links = self.getLinksInHtml()
self.enumerateLinks()
soup = self.soup
if links:
pat = '<li>[%d] %s</li>'
div = Tag(soup, 'div')
div['id'] = 'enumerated-links'
ol = Tag(soup, 'ol')
div.append(ol)
soup.append(div)
for i, link in enumerate(links):
li = Tag(soup, 'li')
li.append(NavigableString(u'[%d] %s' % (i+1, link)))
ol.append(li)
class PageBreaker(BaseTransform):
implements(IHTMLTransformation)
name = 'zopyx.smartprintng.pagebreaker'
description = 'Adds page breaks at H1/H2 elements'
def transform(self, separator='(h1|h2)'):
html = str(self)
breaker = re.compile('<%s' % separator, re.I|re.M|re.S)
div_start = '<div class="chapter sp-page">'
div_start2 = '<div class="chapter">'
div_end = '</div>'
positions = []
for mo in breaker.finditer(html):
positions.append(mo.start())
positions.append(len(html))
parts = []
len_positions = len(positions) - 1
for i in range(len_positions):
start = positions[i]
end = positions[i+1]
if i == len_positions - 1:
parts.append(div_start2 + html[start: end].strip() + div_end)
else:
parts.append(div_start + html[start: end].strip() + div_end)
html2 = '\n'.join(parts)
self.soup = BeautifulSoup(html2)
# register factories for all transformations
import inspect
for x in locals().values():
if inspect.isclass(x) and IHTMLTransformation.implementedBy(x):
registerTransformation(x) | zopyx.smartprintng.core | /zopyx.smartprintng.core-2.0.0b4.tar.gz/zopyx.smartprintng.core-2.0.0b4/zopyx/smartprintng/core/transformation.py | transformation.py |
import os
import random
import tempfile
import urlparse
import urllib2
import PIL.Image
import cStringIO
from BeautifulSoup import BeautifulSoup
import zope.component
from zope.component.interfaces import ComponentLookupError
from zope.interface import implements, implementedBy
from zope.pagetemplate.pagetemplatefile import PageTemplateFile
from resources import Resource
from interfaces import IImageFetcher
from logger import LOG
class Renderer(object):
def __init__(self, context=None):
self.id = str(random.random())
self.context = context
self.tempdir = os.path.join(tempfile.gettempdir(), 'smartprinting', self.id)
LOG.debug('New renderer session: %s' % self.id)
if not os.path.exists(self.tempdir):
os.makedirs(self.tempdir)
def render(self,
html,
template=None,
resource=None,
styles=[],
transformations=[],
transformation_options={},
template_options={},
beautify_html=False,
):
""" 'html' - HTML snippet to be rendered
'template' - either a filename of pagetemplate or a
ViewPageTemplateFile instance
'resource' - an instance of resource.Resource
'styles' - a list of style names to be passed down as full
expanded stylesheet files to the template engine.
The name of the style reference to the 'styles'
value within the given 'resource' parameter. This
option can only be used with a valid 'resource'
parameter.
'transformations' - a list or registered transformation names
'transformation_options' - options passed down to the specific
transformations as specified in 'transformations'.
This parameter is a dict of dicts where the keys
reference a transformation name and the value is
the related parameter dict for each transformation.
'template_options' - an options dictionary passed directly
to the template
Note: using 'template' and 'resource' a mutually-exclusive
"""
if template is None and resource is None:
raise ValueError('"resource" and "template" parameters can not be both None')
if template is not None and resource is not None:
raise ValueError('Using "resource" and "template" is mutually-exclusive')
if styles and not resource:
raise ValueError('Using "styles" without setting "resource" is not possible')
# The rendering template is either passed directly....
if template is not None:
if isinstance(template, str):
template = PageTemplateFile(template)
else:
# or taken from the resource
template = PageTemplateFile(resource.template_filename)
assert isinstance(template, PageTemplateFile), 'not a PageTemplateFile'
if not isinstance(transformations, (list, tuple)):
raise TypeError('"transformations" must be list/tuple of strings')
# proceed with transformations based on BeautifulSoup
soup = BeautifulSoup(html)
for name in transformations:
params = transformation_options.get(name, {})
try:
T = zope.component.createObject(name, soup=soup, **params)
except ComponentLookupError:
raise ValueError('Transformation "%s" is not registred' % name)
T.transform()
soup = T.soup
# Download remote images and make them local
# ATT: handling of local images
# moved code to the end of the pipline because
# the render template might also contain images
# soup = self.makeImagesLocal(soup)
# now pass the modified HTML fragment to the template
# in order to render a proper HTML document
html2 = soup.renderContents()
stylesheets = dict()
for style in styles:
try:
stylesheet_filename = resource.styles[style]
except KeyError:
raise ValueError('No style with name "%s" configured' % style)
stylesheets[style] = '\n%s\n' % file(stylesheet_filename, 'rb').read()
params = {'body' : unicode(html2, 'utf-8'),
'stylesheets' : stylesheets,
}
params.update(template_options)
rendered_html = template(**params)
if beautify_html:
rendered_html = BeautifulSoup(rendered_html).prettify()
# Download remote images and make them local
# ATT: handling of local images
soup2 = BeautifulSoup(rendered_html)
soup2 = self.makeImagesLocal(soup2)
rendered_html = soup2.renderContents()
self.rendered_html = rendered_html
return self.rendered_html
def save(self, output_filename=None):
""" Save rendered html to the filesystem"""
if output_filename is None:
output_filename = os.path.join(self.tempdir, 'index.html')
file(output_filename, 'wb').write(self.rendered_html)
LOG.debug('HTML written to %s' % output_filename)
return output_filename
def makeImagesLocal(self, soup):
""" All referencered images (local/remote) must be downloaded
or obtained and put as local copies into the renderers
working directory.
"""
for img in soup.findAll('img'):
src = img['src'].encode('ascii', 'ignore')
try:
image_path = self._fetchImage(src)
img['src'] = image_path
except Exception, e:
LOG.error('Error handling image (%s)' % e, exc_info=True)
# remove image from soup since we can handle # the error on our
# own
img.extract()
return soup
def _fetchImage(self, src):
# Image fetching is delegated to an adapter
image_fetcher = IImageFetcher(self.context, None)
if not image_fetcher:
raise ValueError('No adapter for IImageFetcher found')
img_data = image_fetcher.fetch(src)
if not img_data:
raise ValueError('No image data found for %s' % img)
# We got hold of the image data. Now convert it to PNG and save it
# within working directory of the renderer. We convert always to PNG
# in order to support *all* external converters because they support
# different image types.
new_img_path = '%s/%s.png' % (self.tempdir, random.random())
pil_img = PIL.Image.open(cStringIO.StringIO(img_data))
pil_img.save(new_img_path, 'PNG')
del pil_img
LOG.debug('Image %s stored as %s' % (src, new_img_path))
return new_img_path | zopyx.smartprintng.core | /zopyx.smartprintng.core-2.0.0b4.tar.gz/zopyx.smartprintng.core-2.0.0b4/zopyx/smartprintng/core/renderer.py | renderer.py |
import os
from zope.interface import Interface, providedBy, implements
from ConfigParser import ConfigParser
from logger import LOG
# mapping interface -> resource name -> Resource instance
resources_registry = dict()
class Resource(dict):
""" A resource is a simple data-structure (mapping) that
contains a registered set of data (template, styles, etc.)
for a given interface. Keys used so far:
name - name of resource as defined in the INI file
template_filename - path to template file
description - textual description of the template
styles - dict mapping style names to their full filenames
for_converters - list of converter name as defined through zopyx.convert2
configuration_filename - full path of defining configuration file
"""
def __getattr__(self, k, default=None):
""" make dict keys available as attributes """
if k in self.keys():
return self[k]
return super(Resource, self).__getattr__(k, default)
class ConfigurationError(Exception):
pass
def registerResource(iface, configuration_filename):
""" Parses a resources configuration with the following format
and adds it to the resources_registry.
[demo]
description = demo demo demo
template = demo_template.pt
styles = demo1.css
demo2.css
# or
# styles = demo1.css, demo2.css
for-converter = pdf-fop
pdf-prince
"""
configuration_filename = os.path.abspath(configuration_filename)
if not os.path.exists(configuration_filename):
raise ConfigurationError('Configuration file %s does not exist' % configuration_filename)
if not issubclass(iface, Interface):
raise ConfigurationError('"iface" parameter must be a subclass of '
'zope.interface.Interface (found %s)' % iface.__class__)
configuration_directory = os.path.dirname(configuration_filename)
CP = ConfigParser()
LOG.debug('Parsing %s' % configuration_filename)
CP.read([configuration_filename])
for section in CP.sections():
for option in ('template', 'for-converter', 'styles', 'description'):
if not CP.has_option(section, option):
raise ConfigurationError('Configuration file %s has no option "%s" in section "%s"' %
(configuration_filename, option, section))
description = CP.get(section, 'description')
template = CP.get(section, 'template')
template_filename = os.path.join(configuration_directory, template)
if not os.path.exists(template_filename):
raise ConfigurationError('Template %s does not exist' % template_filename)
items = CP.get(section, 'styles')
uses_comma = ',' in items
items = items.split(uses_comma and ',' or '\n')
styles = [item.strip() for item in items if item]
styles2 = dict()
for style in styles:
style_filename = os.path.join(configuration_directory, style)
if not os.path.exists(style_filename):
raise ConfigurationError('Style %s does not exist' % style_filename)
styles2[style] = style_filename
items = CP.get(section, 'for-converter')
uses_comma = ',' in items
items = items.split(uses_comma and ',' or '\n')
converters = [item.strip() for item in items if item]
for converter in converters :
pass
if not resources_registry.has_key(iface):
resources_registry[iface] = dict()
if resources_registry[iface].has_key(section):
raise ConfigurationError('Section "%s" of configuration file %s is already registered' %
(section, configuration_filename))
# creating and registering a new Resource instance for the given interface
r = Resource(name=section,
template_filename=template_filename,
description=description,
styles=styles2,
for_converters=converters,
configuration_filename=configuration_filename)
resources_registry[iface][section] = r
LOG.debug('Adding configuration: %s' % r)
def getResourcesFor(context, name=None):
""" Returns all resources for a given object based on the
interfaces they implement. A single resource can be picked
by name.
"""
all_registered_ifaces = resources_registry.keys()
candidates = list()
for iface in providedBy(context).flattened():
if iface in all_registered_ifaces:
candidates.append(resources_registry[iface])
if name:
for item in candidates:
for k, v in item.items():
if k == name:
return v
raise ValueError('No resources with name "%s" registered' % name)
return candidates
if __name__ == '__main__':
import sys
class ISample(Interface):
pass
registerResource(ISample, sys.argv[1])
print resources_registry
print getResourcesFor(object)
class Sample(object):
implements(ISample)
print getResourcesFor(Sample()) | zopyx.smartprintng.core | /zopyx.smartprintng.core-2.0.0b4.tar.gz/zopyx.smartprintng.core-2.0.0b4/zopyx/smartprintng/core/resources.py | resources.py |
import os
from zope.interface import Interface, implements
# initialize/register all HTML transformations
import zopyx.smartprintng.core.transformation
from zopyx.smartprintng.core.highlevel import convert
from zopyx.smartprintng.core.interfaces import IImageFetcher
from zopyx.smartprintng.core.adapters import ExternalImageFetcher
from zopyx.smartprintng.core.renderer import Renderer
from zopyx.smartprintng.core.interfaces import IImageFetcher
from zopyx.smartprintng.core.adapters import ExternalImageFetcher
# register resources directory for demo purposes
from zopyx.smartprintng.core import resources
def demo_convert(fullname, orientation='horizontal', debug=False):
from zope.app.testing import ztapi
class ITestContent(Interface):
pass
class TestContent(object):
implements(ITestContent)
try:
ztapi.provideAdapter(ITestContent, IImageFetcher, ExternalImageFetcher)
except:
pass
resources_configuration_file = os.path.join(os.path.dirname(__file__), 'resources', 'resources.ini')
resources.registerResource(ITestContent, resources_configuration_file)
css = orientation == 'horizontal' and 'business_card.css' or 'business_card2.css'
orientation_ext = orientation=='horizontal' and '_landscape' or '_upside'
styles = debug and [css, 'debug.css'] or [css]
ext = debug and '_debug' or ''
context = TestContent()
result = convert(context=context,
html='',
styles=styles,
resource_name='demo',
converter='pdf-prince',
template_options=dict(fullname=fullname),
destination_filename=os.path.join(os.getcwd(),
'%s%s%s.pdf' % (fullname, orientation_ext, ext)),
)
return os.path.abspath(result)
if __name__ == '__main__':
for fullname in ('Andreas Jung', 'Heinz Becker', 'Hilde Becker'):
for orientation in ('horizontal', 'vertical'):
filename = demo_convert(fullname, orientation)
print fullname, orientation, filename | zopyx.smartprintng.core | /zopyx.smartprintng.core-2.0.0b4.tar.gz/zopyx.smartprintng.core-2.0.0b4/zopyx/smartprintng/core/demo2/demo_app.py | demo_app.py |
Produce & Publish Lite
======================
Produce & Publish Lite is a stripped down version of the Produce & Publish
platform (www.produce-and-publish.com). It implements a PDF generation
functionality for the Plone content-management system (www.plone.org).
Limitations
-----------
* supports Apache FOP 1.0 or PISA as PDF backend
* no support for the Produce & Publish server (conversion will
take place inside Plone - may block worker threads of Zope)
Requirements
------------
* requires Plone 4 or higher (untested with Plone 3.x)
Installation
------------
* add ``zopyx.smartprintng.lite`` to the ``eggs`` option
of your buildout.cfg and re-run buildout (as usual)
* read carefully the ``zopyx.convert2`` documentation (especially the
section related to the configuration of Apache FOP)
Installation of Apache using ``zc.buildout``
--------------------------------------------
You can automatically install and configure Apache FOP through
your buildout configuration::
[buildout]
parts =
...
fop
[instance]
...
environment-vars =
FOP_HOME ${buildout:directory}/parts/fop
[fop]
recipe = hexagonit.recipe.download
strip-top-level-dir = true
url = http://apache.openmirror.de/xmlgraphics/fop/binaries/fop-1.0-bin.tar.gz
Usage
-----
The module supports conversion to PDF either using the FOP converter or PISA
(installed automatically). Add ``@@asPlainPDF`` to the URL of a Plone
document, news item or topic in order to convert the current content to PDF.
This is use the default PDF converter (Apache FOP). In order to convert the
HTML content using PISA you have to use ``@@asPlainPDF?converter=pdf-pisa``.
Supplementary information
-------------------------
PDF support for your own content-types:
http://docs.produce-and-publish.com/connector/content-types.html
Registering your own resource directories:
http://docs.produce-and-publish.com/connector/resource-directories.html#registering-your-own-resource-directory
License
-------
Produce & Publish Lite is published under the GNU Public License V 2 (GPL 2).
Author
------
| ZOPYX Limited
| c/o Andreas Jung
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| www.zopyx.com
| [email protected]
| zopyx.smartprintng.lite | /zopyx.smartprintng.lite-1.0.6.zip/zopyx.smartprintng.lite-1.0.6/README.txt | README.txt |
import os
import re
import copy
import tempfile
import urllib
import urllib2
import logging
import urlparse
from BeautifulSoup import BeautifulSoup, NavigableString, Tag
from Products.CMFCore.utils import getToolByName
from Acquisition import aq_base
from zopyx.smartprintng.lite.logger import LOG
uid_reg = re.compile('([\dA-Fa-f]{32})')
level_match = re.compile('^l\d$')
url_match = re.compile(r'^(http|https|ftp)://')
TRANSFORMATIONS = dict()
LOG = logging.getLogger('zopyx.smartprintng.lite')
_log = LOG.debug
def nodeProcessed(node):
if node.get('processed'):
return True
def registerTransformation(method, name=''):
name = name or method.__name__
TRANSFORMATIONS[name] = method
class Transformer(object):
def __init__(self, html, destdir, context):
self.destdir = destdir
self.soup = BeautifulSoup(html)
self.context = context
self.images = self._collectImages()
def __call__(self, transformations):
for transform in transformations:
method = TRANSFORMATIONS.get(transform)
params = dict(context=self.context,
request=self.context.REQUEST,
destdir=self.destdir,
images=self.images)
if method is None:
raise ValueError('No transformation "%s" registered' % transform)
method(self.soup, params)
def render(self):
return self.soup.renderContents()
def _collectImages(self):
""" Collect paths of all images within subtree """
images = list()
for brain in self.context.portal_catalog(portal_type='Image',
path='/'.join(self.context.getPhysicalPath())):
images.append(brain.getPath())
return images
def pathFromParent(soup, node):
""" For a given node, walk up the hierarchy in order to find
the first node in the hierarchy above with a 'path' attribute
"""
running = True
current = node
while running:
current = current.parent
path = current.get('path')
if path:
return str(path)
if current == soup.body:
running = False
return None
def _findAll(soup, *args, **kw):
try:
return soup(*args, **kw)
except:
LOG.error('soup.findAll(%s, %s) failed' % (args, kw), exc_info=True)
return ()
################################################################
#
################################################################
def makeImagesLocal(soup, params):
""" deal with internal and external image references """
for img in soup.findAll('img'):
# 'internal' images are marked with class="internal resource"
# in order to prevent image fetching later on
if 'internal-resource' in (img.get('class') or ''):
continue
src = img['src']
if params['request'] and src.startswith(params['request'].BASE0) \
and '++resource++' not in src:
src = src.replace(params['request'].BASE0 + '/', '')
if src.startswith('http'):
try:
img_data = urllib2.urlopen(str(src)).read()
except urllib2.URLError:
LOG.warn('No image found: %s - removed from output' % src)
img.extract()
continue
tmpname = tempfile.mktemp(dir=params['destdir'])
file(tmpname, 'wb').write(img_data)
img['src'] = os.path.basename(tmpname)
else:
# image with relative URL
# first lookup image by direct traversal
img_path = urllib.unquote(str(src))
img_obj = params['context'].restrictedTraverse(img_path, None)
if img_obj is None:
img_path2 = getToolByName(params['context'], 'portal_url').getPortalPath() + img_path
img_obj = params['context'].restrictedTraverse(img_path2, None)
if img_obj is None and 'resolveuid' in src:
mo = uid_reg.search(src)
if mo:
uid = mo.group(0)
img_obj = params['context'].reference_catalog.lookupObject(uid)
# For scaled images ('_preview', '_large' etc.) use the original
# image always (which is stored as acquisition parent)
if img_obj:
has_portal_type = hasattr(aq_base(img_obj.aq_inner), 'portal_type')
if has_portal_type and img_obj.portal_type == img_obj.aq_parent.portal_type:
img_obj = img_obj.aq_parent
if img_obj is None:
# nothing found, check the next parent node with a 'path' parameter
# referring to the origin document
parent_container_path = pathFromParent(soup, img)
if parent_container_path is not None:
img_obj = params['context'].restrictedTraverse('%s/%s' % (parent_container_path, img_path), None)
# still nothing found
if img_obj is None:
img_split = img_path.split('/')
if img_split[-1].startswith('image_') or img_split[-1].startswith('image-'):
img_path = '/'.join(img_split[:-1])
for image_path in params['images']:
if image_path.endswith(img_path):
img_obj = params['context'].restrictedTraverse(image_path, None)
break
# get hold of the image in original size
if img_obj:
# thumbnails have an Image as aq_parent
if img_obj.aq_parent.portal_type == 'Image':
img_obj = img_obj.aq_parent
if img_obj:
img_data = None
for attr in ['data', '_data']:
try:
img_data = str(getattr(img_obj, attr))
continue
except AttributeError:
pass
if img_data == None:
LOG.warn('No image found: %s - removed from output' % img_path)
img.extract()
continue
tmpname = tempfile.mktemp(dir=params['destdir'])
file(tmpname, 'wb').write(img_data)
img['src'] = os.path.basename(tmpname)
# image scaling
try:
scale = img_obj.getField('pdfScale').get(img_obj)
except AttributeError:
scale = 100
# add content-info debug information
# don't add scale as style since the outer image-container
# has the style set
img['scale'] = str(scale)
# now move <img> tag into a dedicated <div>
div = Tag(soup, 'div')
div['class'] = 'image-container'
# div['style'] = 'width: %d%%' % scale
div['scale'] = str(scale)
div.insert(0, copy.copy(img))
# image caption
img_description = img_obj.Description()
img_caption = Tag(soup, 'div')
img_caption['class'] = 'image-caption'
# exclude from image enumeration
context = params['context']
exclude_field = img_obj.getField('excludeFromImageEnumeration')
if exclude_field and not exclude_field.get(img_obj):
span = Tag(soup, 'span')
classes = ['image-caption-text']
description = img_obj.Description()
if description:
classes.append('image-caption-text-with-text')
else:
classes.append('image-caption-text-without-text')
span['class'] = ' ' .join(classes)
if description:
span.insert(0, NavigableString(description))
img_caption.insert(0, span)
div.append(img_caption)
img.replaceWith(div)
else:
LOG.warn('No image found: %s - not removed, keeping it' % img_path)
registerTransformation(makeImagesLocal) | zopyx.smartprintng.lite | /zopyx.smartprintng.lite-1.0.6.zip/zopyx.smartprintng.lite-1.0.6/zopyx/smartprintng/lite/browser/transformation.py | transformation.py |
import os
import glob
import shutil
import tempfile
import urllib2
import BeautifulSoup
from compatible import InitializeClass
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile as ViewPageTemplateFile2
from zopyx.convert2 import Converter
from zopyx.smartprintng.lite.logger import LOG
from zopyx.smartprintng.lite.resources import resources_registry
from transformation import Transformer
from util import getLanguageForObject
cwd = os.path.dirname(os.path.abspath(__file__))
class PDFView(BrowserView):
""" PDF view (using SmartPrintNG server) """
template = ViewPageTemplateFile('resources/pdf_template.pt')
resources = 'resources'
# default transformations used for the default PDF view.
# 'transformations' can be overriden within a derived PDFView.
# If you don't need any transformation -> redefine 'transformations'
# as empty list or tuple
transformations = (
'makeImagesLocal',
)
def copyResources(self, resources_dir, destdir):
""" Copy over resources for a global or local resources directory into the
destination directory.
"""
if os.path.exists(resources_dir):
for name in os.listdir(resources_dir):
fullname = os.path.join(resources_dir, name)
if os.path.isfile(fullname):
shutil.copy(fullname, destdir)
def transformHtml(self, html, destdir, transformations=None):
""" Perform post-rendering HTML transformations """
if transformations is None:
transformations = self.transformations
# the request can override transformations as well
if self.request.has_key('transformations'):
t_from_request = self.request['transformations']
if isinstance(t_from_request, basestring):
transformations = t_from_request and t_from_request.split(',') or []
else:
transformations = t_from_request
T = Transformer(html, destdir, self.context)
T(transformations)
return T.render()
def __call__(self, *args, **kw):
try:
return self.__call2__(*args, **kw)
except:
LOG.error('Conversion failed', exc_info=True)
raise
def __call2__(self, *args, **kw):
""" URL parameters:
'language' - 'de', 'en'....used to override the language of the
document
'converter' - default to on the converters registered with
zopyx.convert2 (default: pdf-prince)
'resource' - the name of a registered resource (directory)
'template' - the name of a custom template name within the choosen
'resource'
"""
# Output directory
destdir = tempfile.mkdtemp()
# debug/logging
params = kw.copy()
params.update(self.request.form)
LOG.debug('new job (%s, %s) - outdir: %s' % (args, params, destdir))
# get hold of the language (hyphenation support)
language = getLanguageForObject(self.context)
if params.get('language'):
language = params.get('language')
# Check for CSS injection
custom_css = None
custom_stylesheet = params.get('custom_stylesheet')
if custom_stylesheet:
custom_css = str(self.context.restrictedTraverse(custom_stylesheet, None))
if custom_css is None:
raise ValueError('Could not access custom CSS at %s' % custom_stylesheet)
# check for resource parameter
resource = params.get('resource')
if resource:
resources_directory = resources_registry.get(resource)
if not resources_directory:
raise ValueError('No resource "%s" configured' % resource)
if not os.path.exists(resources_directory):
raise ValueError('Resource directory for resource "%s" does not exist' % resource)
self.copyResources(resources_directory, destdir)
# look up custom template in resources directory
template_name = params.get('template', 'pdf_template')
if not template_name.endswith('.pt'):
template_name += '.pt'
template_filename = os.path.join(resources_directory, template_name)
if not os.path.exists(template_filename):
raise IOError('No template found (%s)' % template_filename)
template = ViewPageTemplateFile2(template_filename)
else:
template = self.template
# call the dedicated @@asHTML on the top-level node. For a leaf document
# this will return either a HTML fragment for a single document or @@asHTML
# might be defined as an aggregator for a bunch of documents (e.g. if the
# top-level is a folderish object
html_fragment = ''
html_view = self.context.restrictedTraverse('@@asHTML', None)
if html_view:
html_fragment = html_view()
# arbitrary application data
data = params.get('data', None)
# Now render the complete HTML document
html = template(self,
language=language,
request=self.request,
body=html_fragment,
custom_css=custom_css,
data=data,
)
html = self.transformHtml(html, destdir)
# hack to replace '&' with '&'
html = html.replace('& ', '& ')
# and store it in a dedicated working directory
dest_filename = os.path.join(destdir, 'index.html')
# inject BASE tag
pos = html.lower().find('<head>')
if pos == -1:
raise RuntimeError('HTML does not contain a HEAD tag')
html = html[:pos] + '<head><base href="%s"/>' % dest_filename + html[pos+6:]
file(dest_filename, 'wb').write(html)
# copy over global styles etc.
resources_dir = os.path.join(cwd, 'resources')
self.copyResources(resources_dir, destdir)
# copy over language dependent hyphenation data
if language:
hyphen_file = os.path.join(resources_dir, 'hyphenation', language + '.hyp')
if os.path.exists(hyphen_file):
shutil.copy(hyphen_file, destdir)
hyphen_css_file = os.path.join(resources_dir, 'languages', language + '.css')
if os.path.exists(hyphen_css_file):
shutil.copy(hyphen_css_file, destdir)
# now copy over resources (of a derived view)
self.copyResources(getattr(self, 'local_resources', ''), destdir)
c = Converter(dest_filename)
result = c(params.get('converter', 'pdf-fop'))
if result['status'] != 0:
raise RuntimeError('Error during PDF conversion (%r)' % result)
pdf_file = result['output_filename']
LOG.debug('Output file: %s' % pdf_file)
return pdf_file
InitializeClass(PDFView)
class PDFDownloadView(PDFView):
def __call__(self, *args, **kw):
pdf_file = super(PDFDownloadView, self).__call__(*args, **kw)
# return PDF over HTTP
R = self.request.response
R.setHeader('content-type', 'application/pdf')
R.setHeader('content-disposition', 'attachment; filename="%s.pdf"' % self.context.getId())
R.setHeader('content-length', os.stat(pdf_file)[6])
R.setHeader('pragma', 'no-cache')
R.setHeader('cache-control', 'no-cache')
R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT')
R.setHeader('content-length', os.stat(pdf_file)[6])
return file(pdf_file, 'rb').read()
InitializeClass(PDFDownloadView) | zopyx.smartprintng.lite | /zopyx.smartprintng.lite-1.0.6.zip/zopyx.smartprintng.lite-1.0.6/zopyx/smartprintng/lite/browser/pdf.py | pdf.py |
The Produce & Publish Client Connector for Plone
================================================
Documentation
-------------
See http://packages.python.org/zopyx.smartprintng.plone
Source code
-----------
See https://github.com/zopyx/zopyx.smartprintng.plone
Bugtracker
----------
See https://github.com/zopyx/zopyx.smartprintng.plone/issues
Licence
-------
Published under the GNU Public Licence Version 2 (GPL 2)
Author
------
| ZOPYX Limited
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| [email protected]
| www.zopyx.com
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/README.txt | README.txt |
Changelog
=========
2.1.26 (2012-08-24)
-------------------
- some fixes/workaround for exporting (embedded) images from
special types like News Item
2.1.25 (2012-08-14)
-------------------
- added .mode-flat and mode-nested CSS classes depending
on the folder aggregation mode in order to fix a problem
with the makeImagesLocal transformation for nested folders
2.1.23 (2012-05-08)
-------------------
- fix in image resolver
2.1.22 (2012-04-04)
-------------------
- locale aware sorting of index terms in addIndex()
2.1.21 (2012-04-03)
-------------------
- fixed bug in endnote number handling in convertEndNotes()
2.1.20 (2012-02-26)
-------------------
- fixed CSS counter bug (related to PrinceXML 7.1 vs. 8.0)
2.1.19 (2012-02-20)
-------------------
- unicode fix in HTML splitter code
2.1.18 (2012-02-10)
-------------------
- folder aggregator now support (experimental) document filter
based on UIDs (filter_uid parameter)
- makeImageLocal() transformation removed parts of the document
while replacing the old image with an image container
- added experimental addIndexList transformation
2.1.17 (2011-12-28)
-------------------
- checking html input in transformation machinery for empty strings
2.1.16 (2011-12-21)
-------------------
- better footnote handling
2.1.15 (2011-12-20)
-------------------
- convertWordEndnotes() transformation added
2.1.14 (2011-12-19)
-------------------
- fixed image width 100% for images inside an image-container
(PrinceXML 8 compatibility)
2.1.13 (2011-12-18)
-------------------
- some fixes discovered using PyFlakes
2.1.12 (2011-12-12)
-------------------
- added some transformations for better
Word import
2.1.11 (2011-11-23)
-------------------
- update trove classifiers
2.1.10 (2011-11-17)
-------------------
- improved aggregator for nested content folders
- support for content hierarchies up to level 8
- support for new environment variable SMARTPRINTNG_ZIP_OUTPUT
2.1.9 (2011-11-11)
------------------
- fixed bug in makeImagesLocal() transformation
where the document root has not been used properly
for finding images by traversal
2.1.8 (2011-11-11)
------------------
- support for new ``SMARTPRINTNG_LOCAL_CONVERSION`` environment
variable
2.1.7 (2011-11-08)
------------------
- removed some baggage in order to make distro smaller
2.1.6 (2011-11-07)
------------------
- minor fixes in handling of generated files for download
2.1.5 (2011-11-07)
------------------
- first public (open-source) release
2.1.4 (2011-10-25)
------------------
- fixed unicode/utf-8 issue in makeImagesLocal transformation
2.1.3 (2011-10-14)
------------------
- added fixAmpersand transformation
2.1.2 (2011-10-10)
------------------
- transformations for dealing with converted footnotes from Word
2.1.1 (2011-10-08)
------------------
- compatibility with Dexterity
2.1.0 (2011-09-22)
------------------
- final 2.1.0 release
2.0.9 (2011-09-20)
------------------
- fixed bug in xpath_query() (using relative query)
2.0.8 (2011-09-11)
------------------
- more cleanup
2.0.7 (2011-09-10)
------------------
- some ZCML fixes in order to avoid Plone 4.x startup failures under
some conditions
- restored compatibility with Plone 3.3.X
2.0.6 (2011-09-08)
------------------
- image exporter did not deal proper with duplicate image ids
- minor fixes
2.0.5 (2011-09-02)
------------------
- new lxml backed transformation pipeline
- more tests
2.0.4 (2011-08-26)
------------------
- logging resource registration using INFO severity
- new lxml dependency
2.0.3 (2011/08/15)
------------------
- catching HTTPError in image resolver
- fixed another BeautifulSoup misbehaviour in fixHeadingAfterOfficeImport()
2.0.2 (2011-08-02)
------------------
- minor fix
2.0.1 (2011-08-02)
------------------
- integration with new zip client version (Proxy2 implementation)
2.0.0 (2011-07-25)
---------------------
* final release
2.0.0rc2 (2011-07-04)
---------------------
* fix in logger call in folder.py
2.0.0rc1 (2011-07-01)
---------------------
* don't extend images an authoring project
* remove class attributes from headings after office import
* added ignoreHeadingsForStructure transformation
2.0.0b2 (2011-06-16)
--------------------
* minor fixes related to office data import
2.0.0b1 (2011-05-24)
--------------------
* fixes related to office format input
2.0.0a3 (2011-05-17)
--------------------
* added some workaround for image resolver in order to deal with images
referenced through a fully specified URL with a redirection included
(TQM issue)
2.0.0a2 (2011-05-14)
--------------------
* minor fix in safe_get()
2.0.0a1 (2011-05-10)
--------------------
* simplification and refacoring
0.7.0 (2011-02-11)
-------------------
* updated for use with zopyx.authoring 1.5.X
* added GenericDownloadView aka '@@ppConvert'
* exported images now contain a proper extension (fixes issues
with the XFC converter depending on extension for determining
the image format)
0.6.24 (2010-12-09)
-------------------
* added addDocumentLinks() transformation
* including content ids of aggregated content
0.6.23 (2010-09-10)
-------------------
* addImageCaptionsInHTML(): honour excludeFromImageEnumeration
0.6.22 (2010-09-09)
-------------------
* fixed improper stripping of image names using an image scale
(causing issues in the consolidated HTML view of the authoring
environment)
0.6.21 (2010-08-09)
-------------------
* added support '++resource++' image references (Patrick Gerken)
* added support for FSImage (Patrick Gerken)
0.6.20 (2010-08-05)
-------------------
* added 'removeComments' transformation
* added 'makeImageSrcLocal' transformation
0.6.19 (2010-07-13)
-------------------
* fixed race condition in makeImagesLocal()
0.6.18 (2010-06-14)
-------------------
* images got a new PDF conversion option "Exclude from image enumeration"
0.6.17 (2010-06-12)
-------------------
* inserting H1 title for consolidated HTML
* added extra class to folder title for consolidated HTML
0.6.16 (2010-05-29)
-------------------
* inserting space for found anchors
0.6.15 (2010-04-15)
-------------------
* minor fix in image handling
0.6.14 (2010-04-14)
-------------------
* minor tweaks for image caption markup
0.6.13 (2010-03-26)
-------------------
* support for span.footnoteText
0.6.12 (2010-03-21)
-------------------
* support for image urls 'resolveuid/<uid>'
* minor fixes and tweaking in image handling (caption generation)
0.6.11 (2010-03-10)
-------------------
* added document extender
* document option for suppressing the title in PDF
* image caption support
* changed default transformations (to makeImagesLocal only)
* removed TOC from default PDF template
0.6.10 (2010-03-03)
-------------------
* support for request/transformations parameter
* various fixes
0.6.9 (2010-02-22)
------------------
* added <em>[[text:footnote-text]]</em> support for generating footnotes
* various changes related to zopyx.authoring integration
0.6.8 (2010-02-03)
------------------
* Folder aggregation now works with all folderish objects providing IATFolder
0.6.7 (2009-11-30)
------------------
* makeImagesLocal: better dealing with virtual hosting
0.6.6 (2009-11-15)
------------------
* fixed CSS issue with TOC markup
0.6.5 (2009-11-12)
------------------
* always use images in their original resolution
* optional content information with link to the edit mode
of the aggregated document (you must change the visibility
of the .content-info class through CSS)
* a request parameter 'show-debug-info' will enable the
additional content-info view
* better error handling
* better logging
* tweaked markup of generated TOC
0.6.3 (2009-10-27)
------------------
* refactored language handling
* refactored PDF view in order to provide a low-level view
returning a reference to the generated PDF file instead
providing it for HTTP download
0.6.2 (2009-10-24)
------------------
* setting anti-cache headers
* locale-aware sorting in PloneGlossary code
0.6.1 (2009-10-23)
------------------
* PloneGlossary integration: compare title case-insensitive
(IDG project)
0.6.0 (2009-10-21)
------------------
* refactored and simplified transformation machinery
0.5.0 (2009-10-09)
------------------
* major rewrite
0.3.0 (2009-09-24)
------------------
* refactored views
0.2.0 (2009-09-23)
------------------
* more hyphenation dicts
* restructured resources directory
0.1 (xxxx-xx-xx)
----------------
* Initial release
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/HISTORY.rst | HISTORY.rst |
Integration with PloneFormGen
=============================
Using Produce & Publish with PloneFormGen - generating PDF
documents from form data.
Installation
~~~~~~~~~~~~
- Install **PloneFormGen**
Converting form data to PDF
~~~~~~~~~~~~~~~~~~~~~~~~~~~
- use a script adapter with the following code
::
view = context.restrictedTraverse('@@asPFGPDF')
pdf = view()
R = context.REQUEST.RESPONSE
R.setHeader('content-type', 'application/pdf')
R.setHeader('content-length', len(pdf))
R.setHeader('content-disposition', 'attachment; filename=%s.pdf' % context.getId())
R.write(pdf)
- You can access PFG form values within your PDF template using
(in this case we have a form parameter ``fullname``)
::
<span tal:replace="options/request/fullname | nothing">Fullname</span>
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/integration-ploneformgen.rst | integration-ploneformgen.rst |
.. Produce & Publish Plone Client Connector documentation master file, created by
sphinx-quickstart on Sun Nov 13 15:03:42 2011.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Produce & Publish Plone Client Connector
========================================
The Produce & Publish Plone Client connector integrates the Plone
CMS with the Produce & Publishing platform and supports the
generation of PDF, EPUB and other office formats (depending on the installed
externals converters on the Produce & Publish server side).
Documentation
-------------
Primary documentation: http://docs.produce-and-publish.com
Produce & Publish website: http://www.produce-and-publish.com
Source code
-----------
See https://github.com/zopyx/zopyx.smartprintng.plone
Bugtracker
----------
See https://github.com/zopyx/zopyx.smartprintng.plone/issues
Licence
-------
Published under the GNU Public Licence Version 2 (GPL 2)
Author
------
| ZOPYX Limited
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| [email protected]
| www.zopyx.com
Contents:
.. toctree::
:maxdepth: 2
installation.rst
resource-directories.rst
content-types.rst
integration-ploneformgen.rst
HISTORY.rst
Indices and tables
==================
/ :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/index.rst | index.rst |
Installation
============
This documentation assumes that your installation of Plone/Zope is based on
zc.buildout.
- edit your *buildout.cfg* - add *zopyx.smartprintng.plone* to the
**eggs** options of your buildout.cfg:
::
# For Plone 3.x (Client Connector < 2.0)
find-links = ...
http://username:[email protected]
# For Plone 4.x (Client Connector >2.0)
find-links = ...
http://username:[email protected]
eggs = ...
zopyx.smartprintng.plone
# only needed for Plone 3.x
zcml = ...
zopyx.smartprintng.plone
- in addition (and effective 17.08.2011 or later) you need to pin
the version of the ``BeautifulSoup`` module:
::
[buildout]
versions = versions
...
[versions]
BeautifulSoup = 3.2.0
...
- re-run *bin/buildout*
- restart Zope/Plone
- When running the Produce & Publish server on a different server, you must
adjust the ``SMARTPRINTNG_SERVER`` environment variables within your
*.bashrc* file (or a similar file) or you put those variables into your
buildout configuration using the *<environment>* section. Username and
password are only needed when you run the Produce & Publish server behind a
reverse proxy (requiring authentcation).
::
export SMARTPRINTNG_SERVER=http://user:[email protected]:6543/
or
::
<environment>
SMARTPRINTNG_SERVER=http://user:[email protected]:6543/
</environment>
Supported Plone content-types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Document
- Folder (nested structure)
- News item
- PloneGlossary
- Collection
Usage
~~~~~
The Plone connector provides a dedicated @@asPlainPDF view that can
be added to the URL of any of the supported content-types of Plone
(Document, Folder, Newsitem, PloneGlossary). So when your document
is for example associated with the URL
::
http://your.server/plone/my-page
you can generate a PDF by using the URL
::
http://your.server/plone/my-page/@@asPlainPDF
Parameters
~~~~~~~~~~
The @@asPlainPDF view accepts the following parameters controlling
certain aspects of the PDF conversion:
- **language** - can be set to 'de', 'en', 'fr' etc. in order to
control language-specific aspects of the PDF conversion. Most
important: this parameter controls the hyphenation. The Plone
connector comes out-of-the-box with hypenation tables for several
languages. You can omit this URL parameter if the **Language**
metadata parameter (of the top-level document) to be converted is
set within Plone.
- **converter** - if you are using the Produce & Publish server
with a converter backend other than PrinceXML you can specify a
different name (default is *pdf-prince*). See zopyx.convert2
documentation for details.
- **resource** - can be set in order to specify a registered resource
directory to be used for running the conversion. The ```resource``
parameter must be identical with the ``name`` parameter of
the related ZCML ``<smartprintng:resourceDirectory>`` directive.
- **template** - can be used to specify the name of template to be
used for running the conversion. The ``template`` parameter usually
refers to a .pt filename inside the ``resource`` directory.
Miscellaneous
~~~~~~~~~~~~~
You may set the ``SMARTPRINTNG_LOCAL_CONVERSION`` environment variable
(to some value) in order to run the conversion locally inside the Plone
process without using an external Produce & Publish server.
The environment varialble ``SMARTPRINTNG_ZIP_OUTPUT`` can be set to export
all resources used for the conversion into a ZIP file for debugging purposes.
The path of the generated ZIP file is logged within the standard Zope/Plone
logfile (or the console if Plone is running in foreground).
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/installation.rst | installation.rst |
Adding custom content-types to the Plone Client Connector
=========================================================
This documentation explains how to extend the Plone Client Connector with your
own or custom Plone content-types.
Custom content-types can be registered with the Produce & Publish server using
the Zope Component Architecture. The one single contact of the P&P server with a
content-type is the existence of a ``@@asHTML`` view for the related content-type.
The ``@@asHTML`` view must return a HTML snippet that will be used by the P&P
within the main body of its own rendering PDF template.
As an example look at the ``@@asHTML`` view for Plone news items.
The ``@@asHTML`` view is configured through ZCML (within your
configure.zcml file):
::
<browser:page
name="asHTML"
for="Products.ATContentTypes.interface.news.IATNewsItem"
permission="zope2.View"
class=".newsitem.HTMLView"
/>
and implemented as browser view (newsitem.py):
::
from Globals import InitializeClass
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
class HTMLView(BrowserView):
""" This view renders a HMTL fragment for the configured content type """
template = ViewPageTemplateFile('newsitem_raw.pt')
def __call__(self, *args, **kw):
return self.template(self.context)
InitializeClass(HTMLView)
The related templates renders a snippet of code for a news item
object:
::
<div class="type-newsitem document-body">
<h1 class="title bookmark-title" tal:content="context/Title" />
<div class="description" tal:content="context/Description" />
<div>
<div class="image-box" tal:condition="nocall: context/image | nothing">
<img class="teaser-image" src="image" />
<div class="image-caption" tal:content="context/getImageCaption | nothing" />
</div>
<div class="body" tal:content="structure context/getText" />
</div>
</div>
In addition your content-type implementation **must** provide the
``zopyx.smartprintng.plone.interfaces.IPPContent`` interface - either by
specifying this interface as part of the class definition in your code
::
class MyContentType(...):
implements(IPPContent)
or you add the interfaces as a marker interface through ``ZCML``
::
<five:implements
class="my.package.contents.mytype.MyContentType"
interface="zopyx.smartprintng.plone.interfaces.IPPContent"
/>
Only content objects providing the ``IPPContent`` interface are being considered
during the aggregation phase of the Plone Client Connector.
For further example code, please refer to the
*zopyx/smartprintng/plone/browser* directory. The ``folder`` integration
*(folder.py)* shows you a more complex example and involves aggregation of
other content.
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/content-types.rst | content-types.rst |
Resource directories
====================
The Plone Client connector allows you to define your own resource directories
containing
- the PDF main template
- style sheets
- font files
- hyphenation files
Registering your own resource directory
---------------------------------------
First you need your own policy - e.g. *zopyx.theme*. Inside the configure.zcml
file of your *zopyx.theme* you need to register a sub-directory using the
``smartprintng:resourceDirectory`` directive:
::
<configure
xmlns="http://namespaces.zope.org/zope"
xmlns:zcml="http://namespaces.zope.org/zcml"
xmlns:smartprintng="http://namespaces.zopyx.com/smartprintng"
>
<smartprintng:resourceDirectory
name="zopyx_resource"
directory="resources_pdf"
/>
</configure>
The registered ``resources_pdf`` directory must contain all resource files as
flat structure (no sub-directories). The ``name`` parameter relates to the
optional ``resource`` URL parameter as use for the ``@@asPlainPDF`` browser
view.
Naming conventions
------------------
- PDF template: .pt
- Stylesheets: .css, .styles
- Images: .gif, .jpg, .png
- Hyphenation files: .hyp
- Coverpage templates (only used with Authoring Environment): .cover
- Font files: .otf, .ttf
| zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/docs/source/resource-directories.rst | resource-directories.rst |
import os
import re
import urllib2
import cgi
import tempfile
import inspect
import uuid
import time
import locale
import lxml.html
import PIL
from lxml.cssselect import CSSSelector
from Products.CMFCore.utils import getToolByName
from zopyx.smartprintng.plone.logger import LOG
from zopyx.smartprintng.plone.browser.images import resolveImage
from Products.CMFPlone.utils import safe_hasattr
_marker = object()
TRANSFORMATIONS = dict()
url_match = re.compile(r'^(http|https|ftp)://')
leading_numbers = re.compile('^(\d*)', re.UNICODE|re.MULTILINE)
ALL_HEADINGS = ('h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8', 'h9', 'h10')
def nodeProcessed(node):
if node.get('processed'):
return True
def registerTransformation(method):
""" Decorator to register a method as a transformation"""
name = method.__name__
TRANSFORMATIONS[name] = method
def availableTransformations():
return TRANSFORMATIONS.keys()
def hasTransformations(transformations):
available_transformations = availableTransformations()
for t in transformations:
if not t in available_transformations:
return False
return True
class Transformer(object):
def __init__(self, transformation_names, context=None, destdir=None):
self.transformation_names = transformation_names
self.context = context
self.destdir = destdir
def __call__(self, html, input_encoding=None, output_encoding=unicode, return_body=False):
if not isinstance(html, unicode):
if not input_encoding:
raise TypeError('Input data must be unicode')
html = unicode(html, input_encoding)
html = html.strip()
if not html:
return u''
root = lxml.html.fromstring(html)
for name in self.transformation_names:
method = TRANSFORMATIONS.get(name)
params = dict(context=self.context,
request=getattr(self.context, 'REQUEST', None),
destdir=self.destdir,
)
if method is None:
raise ValueError('No transformation "%s" registered' % name)
ts = time.time()
argspec = inspect.getargspec(method)
if isinstance(argspec, tuple):
args = argspec[0] # Python 2.4
else:
args = argspec.args
if 'params' in args:
method(root, params)
else:
method(root)
LOG.info('Transformation %-30s: %3.6f seconds' % (name, time.time()-ts))
if return_body:
body = root.xpath('//body')[0]
html_new = body.text + u''.join([lxml.html.tostring(b, encoding=output_encoding) for b in body])
else:
html_new = lxml.html.tostring(root, encoding=output_encoding)
if html_new.startswith('<div>') and html_new.endswith('</div>'):
html_new = html_new[5:-6].strip()
return html_new.strip()
def xpath_query(node_names):
if not isinstance(node_names, (list, tuple)):
raise TypeError('"node_names" must be a list or tuple (not %s)' % type(node_names))
return './/*[%s]' % ' or '.join(['name()="%s"' % name for name in node_names])
@registerTransformation
def dummyTransformation(root):
""" Dummy transformation doing nothing """
pass
@registerTransformation
def cleanupEmptyElements(root, tags=['div']):
""" Remove tags with empty subtree (no text in subtree) """
for node in root.xpath(xpath_query(tags)):
if not node.text_content().strip():
node.getparent().remove(node)
UUID4TAGS = ALL_HEADINGS + ('img', 'table', 'li', 'dt', 'ul', 'ol', 'dl')
@registerTransformation
def addUUIDs(root, tags=UUID4TAGS):
""" Add a unique/random UUID to all (specified) tags """
for node in root.xpath(xpath_query(tags)):
node_id = node.get('id', _marker)
if node_id is _marker:
node.attrib['id'] = str(uuid.uuid4())
@registerTransformation
def addUUIDsToAllTags(root, tags=UUID4TAGS):
""" Add a unique/random UUID to all tags """
for node in root.xpath('//*'):
node_id = node.get('id', _marker)
if node_id is _marker:
node.attrib['id'] = str(uuid.uuid4())
@registerTransformation
def shiftHeadings(root):
""" H1 -> H2, H2 -> H3.... """
for node in root.xpath(xpath_query(ALL_HEADINGS)):
level = int(node.tag[1:])
node.tag = 'h%d' % (level+1)
@registerTransformation
def cleanupTables(root):
""" Remove crap from table markup """
# adjust table class
for table in root.xpath(xpath_query(('table',))):
# remove col/colgroup tags
for node in table.xpath(xpath_query(('col', 'colgroup'))):
node.getparent().remove(node)
# remove attribute crap
for node in table.xpath('//*'):
for name in ('border', 'width', 'bordercolor', 'cellspacing', 'cellpadding'):
if name in node.attrib:
del node.attrib[name]
# adjust table class
table.attrib['class'] = 'plain grid'
@registerTransformation
def adjustImagesAfterOfficeImport(root):
""" This method is called after the conversion using
Open-Office. All images must be transformed to
scale 'medium'. In addition we remove all other attributes.
"""
for img in root.xpath(xpath_query(('img',))):
img.attrib['src'] = img.attrib['src'] + '/image_medium'
for k in img.attrib.keys()[:]:
if k not in ('src',):
del img.attrib[k]
@registerTransformation
def fixHeadingsAfterOfficeImport(root):
""" Remove leading section numbers from headings """
regex = re.compile(r'^([\d*\.]*)', re.UNICODE)
for heading in root.xpath(xpath_query(ALL_HEADINGS)):
text = heading.text_content().strip()
text = regex.sub('', text)
heading.clear()
heading.text = text.strip()
@registerTransformation
def ignoreHeadingsForStructure(root):
""" Inspect all div.ignore-headings-for-structure and
convert all headings into div.level-hX. The purpose
of this method is to exclude generated HTML snippets
from being relevant for the overall structure since
all headings are relevant for numbering and the table of
contents.
"""
for div in root.xpath('//div'):
cls = div.get('class', '')
if not 'ignore-headings-for-structure' in cls:
continue
for heading in div.xpath(xpath_query(ALL_HEADINGS)):
level = int(heading.tag[1:])
heading.tag = 'div'
heading.attrib['class'] = 'heading-level-%d' % level
@registerTransformation
def adjustAnchorsToLinkables(root):
""" Links to linkables (tables, headings and images) in the P&P source
files are stored as "resolveuid/<UID><id-of-linkable>". Since the PDF
conversion is based on a single HTML file we to replace the href
values with the value of <id-of-linkable>.
(used for PDF only)
"""
# first collect all possible target link-id
link_ids = [node.get('id') for node in root.xpath('//*[@id]')]
# now search for links
for link in root.xpath('//a'):
href = link.get('href')
# check for the typical internal link pattern
if href and href.startswith('resolveuid') and '#' in href:
ref_id = href.rsplit('#')[-1]
if ref_id in link_ids:
# replace target link if it exists
link.attrib['href'] = '#%s' % ref_id
else:
# otherwise convert the link into a span
text = link.text_content()
link.tag = 'span'
link.text = text
del link.attrib['href']
@registerTransformation
def cleanupForEPUB(root):
""" Run some cleanup transformation in order to make Calibre
(almost) happy.
"""
# set class=chapter for all h1+h2 node. This is the default trigger
# for the structure detection of Calibre
# The epub_pagebreak parameter is submitted as h1|h2|... string from the
# epub conversion form
# break_at = params['request'].get('epub_pagebreak', 'h1').split('|')
break_at = ('h1', 'h2')
for node in root.xpath(xpath_query(break_at)):
node.attrib['class'] = 'chapter'
# remove div.contentinfo (containing the EDIT link)
# remove table list
# remove image list
selector = CSSSelector('div.contentinfo')
for div in selector(root):
div.getparent().remove(div)
selector = CSSSelector('div#table-list,div#images-lists,div#table-of-contents')
for div in selector(root):
div.getparent().remove(div)
# image captions
selector = CSSSelector('span.image-caption-with-title')
for span in selector(root):
span.getparent().remove(span)
# all image 'src' attributes must be prefixed with 'preview_'
for img in root.xpath('//img'):
src = img.get('src')
if src.endswith('image_preview'):
img.attrib['src'] = 'preview_' + src.split('/')[0]
else:
img.attrib['src'] = 'preview_' + src
@registerTransformation
def makeImagesLocal(root, params):
""" deal with internal and external image references """
ref_catalog = getToolByName(params['context'], 'reference_catalog')
destdir = params['destdir']
ini_filename = os.path.join(destdir, 'images.ini')
fp_ini = file(ini_filename, 'w')
images_seen = dict()
for document_node in CSSSelector('div.mode-flat.level-0,div.mode-nested.level-1')(root):
document_obj = ref_catalog.lookupObject(document_node.get('uid'))
for img in document_node.xpath(xpath_query(['img'])):
# 'internal' images are marked with class="internal resource"
# in order to prevent image fetching later on
if 'internal-resource' in (img.get('class') or '') or img.get('processed'):
continue
scale = ''
src = img.get('src')
LOG.info('Introspecting image: %s' % src)
img_obj = resolveImage(document_obj, src)
if img_obj is None:
# like some external image URL
LOG.info(' Remote image fetching: %s' % src)
try:
response = urllib2.urlopen(str(src))
img_data = response.read()
img_basename = src.split('/')[-1]
except (ValueError, urllib2.URLError), e:
LOG.warn('No image found: %s - removed from output (reason: %s)' % (src, e))
img.getparent().remove(img)
continue
tmpname = tempfile.mktemp(dir=destdir) + '_' + img_basename
file(tmpname, 'wb').write(img_data)
# write supplementary information to an .ini file per image
img_id = os.path.basename(tmpname)
print >>fp_ini, '[%s]' % img_id
print >>fp_ini, 'id = %s' % img_id
print >>fp_ini, 'filename = %s' % tmpname
print >>fp_ini, 'url = %s' % str(src)
print >>fp_ini, 'scale = %s' % ''
img.attrib['src'] = img_id
img.attrib['originalscale'] = ''
images_seen[src] = img_id
LOG.info(' Assigned new id: %s' % img_id)
continue
# resolved did find a local image
LOG.info(' Local processing: %s' % src)
img_filename = images_seen.get(src)
if not img_filename:
img_data = None
for attr in ['data', '_data']:
try:
img_data = str(getattr(img_obj, attr))
continue
except AttributeError:
pass
if img_data is None:
LOG.warn('No image found: %s - removed from output' % src)
img.extract()
continue
tmpname = tempfile.mktemp(dir=destdir)
file(tmpname, 'wb').write(img_data)
# determine graphic format using PIL
pil_image = PIL.Image.open(tmpname)
format = pil_image.format.lower()
# generate unique and speaking image names
img_id = img_obj.getId()
dest_img_name = os.path.join(destdir, img_id)
if not os.path.exists(dest_img_name):
os.rename(tmpname, dest_img_name)
else:
running = True
count = 0
while running:
img_id = os.path.splitext(img_obj.getId())[0]
img_id = '%s-%d.%s' % (img_id, count, format)
dest_img_name = os.path.join(params['destdir'], img_id)
if not os.path.exists(dest_img_name):
os.rename(tmpname, dest_img_name)
tmpname = dest_img_name
running = False
del pil_image
else:
count += 1
LOG.info(' Exported to: %s' % dest_img_name)
# now also export the preview scale as well
# (needed for EPUB export/conversion)
preview_filename = os.path.join(os.path.dirname(dest_img_name), 'preview_' + os.path.basename(dest_img_name))
try:
preview_img = img_obj.Schema().getField('image').getScale(img_obj, scale='preview')
if preview_img == '': # no scales created?
img_obj.Schema().getField('image').createScales(img_obj)
preview_img = img_obj.Schema().getField('image').getScale(img_obj, scale='preview')
except AttributeError: # Schema (for News Item images)
preview_img = None
if preview_img and safe_hasattr(preview_img, 'data'):
file(preview_filename, 'wb').write(str(preview_img.data))
LOG.info(' Exported preview scale to: %s' % preview_filename)
# determine image scale from 'src' attribute
src_parts = src.split('/')
if '@@images' in src_parts:
scale = src_parts[-1]
elif src_parts[-1].startswith('image_'):
scale = src_parts[-1][6:]
print >>fp_ini, '[%s]' % os.path.basename(dest_img_name)
print >>fp_ini, 'filename = %s' % dest_img_name
print >>fp_ini, 'id = %s' % img_id
try:
print >>fp_ini, 'title = %s' % img_obj.Title()
print >>fp_ini, 'description = %s' % img_obj.Description()
except AttributeError:
print >>fp_ini, 'title = s'
print >>fp_ini, 'description = s'
print >>fp_ini, 'scale = %s' % scale
images_seen[src] = os.path.basename(dest_img_name)
img_filename = dest_img_name
img.attrib['src'] = os.path.basename(img_filename)
LOG.info(' Assigned new id: %s' % img.get('src'))
img.attrib['originalscale'] = scale
img.attrib['style'] = 'width: 100%' # need for PrinceXML8
img.attrib['processed'] = '1'
# image scaling
# add content-info debug information
# don't add scale as style since the outer image-container
# has the style set
try:
pdf_scale = img_obj.getField('pdfScale').get(img_obj)
except AttributeError:
pdf_scale = 100
img.attrib['scale'] = str(pdf_scale)
# now move <img> tag into a dedicated <div>
div = lxml.html.Element('div')
div.attrib['class'] = 'image-container'
div.attrib['style'] = 'width: %d%%' % pdf_scale
div.attrib['scale'] = str(pdf_scale)
new_img = lxml.html.Element('img')
new_img.attrib.update(img.attrib.items())
div.insert(0, new_img)
try:
displayInline_field = img_obj.getField('displayInline')
except AttributeError:
displayInline_field = False
if displayInline_field and not displayInline_field.get(img_obj):
# image caption
img_caption_position = img_obj.getField('captionPosition').get(img_obj)
img_caption = lxml.html.Element('div')
img_caption.attrib['class'] = 'image-caption'
# exclude from image enumeration
exclude_field = img_obj.getField('excludeFromImageEnumeration')
if exclude_field and not exclude_field.get(img_obj):
# add description
span = lxml.html.Element('span')
description = unicode(img_obj.Description(), 'utf-8')
class_ = description and 'image-caption-description image-caption-with-description' or \
'image-caption-description image-caption-without-description'
if description:
span.text = description
span.attrib['class'] = class_
img_caption.insert(0, span)
if not description:
warn = lxml.html.Element('span')
warn.attrib['class'] = 'warning-no-description'
warn.text = u'image has no description'
img_caption.append(warn)
# add title
span = lxml.html.Element('span')
title = unicode(img_obj.Title(), 'utf-8')
class_ = description and 'image-caption-title image-caption-with-title' or \
'image-caption-title image-caption-without-title'
if title:
span.text = title
span.attrib['class'] = class_
img_caption.insert(0, span)
if not title:
warn = lxml.html.Element('span')
warn.attrib['class'] = 'warning-no-title'
warn.text = u'image has no title'
img_caption.append(warn)
# add title and description to container
if img_caption_position == 'top':
div.insert(0, img_caption)
else:
div.append(img_caption)
div.tail = img.tail
img.getparent().replace(img, div)
fp_ini.close()
@registerTransformation
def cleanupHtml(root):
""" Perform some basic HTML cleanup """
forbidden_tags = ['meta']
for node in root.xpath('//*'):
# remove style attributes
try:
del node.attrib['style']
except KeyError:
pass
# remove forbidden tags
if node.tag in forbidden_tags:
node.getparent().remove(node)
# remove links using href with empty contents
if node.tag == 'a':
if node.get('href') and not node.text_content().strip():
node.getparent().remove(node)
@registerTransformation
def footnotesForHtml(root):
"""" Convert <em>[[text:footnote-text]]</em for consolidated HTML
representation.
"""
for node in CSSSelector('span.footnoteText')(root):
footnote_text = node.text_content()
if footnote_text:
node.attrib['title'] = footnote_text
node.text = u'Remark'
@registerTransformation
def rescaleImagesToOriginalScale(root):
""" makeImagesLocal() exports all images in their original size
and adjust the URL according to that. For HTML we need to
restore the original scale which is stored by makeImagesLocal()
on the 'originalscale' attribute on each <img> tag.
"""
for img in root.xpath('//img'):
scale = img.get('originalscale')
if scale:
img.attrib['src'] = img.attrib['src'] + '/image_%s' % scale
@registerTransformation
def addAnchorsToHeadings(root):
""" obsolete """
@registerTransformation
def removeTableOfContents(root):
""" Remove some of the listings not needed for HTML view """
for toc in CSSSelector('div#table-of-contents')(root):
toc.getparent().remove(toc)
@registerTransformation
def addTableOfContents(root):
""" Add a table of contents to the #toc node """
toc = list()
# first find all related entries (.bookmark-title class)
for count, e in enumerate(root.xpath(xpath_query(ALL_HEADINGS))):
level = int(e.tag[-1]) - 1 # in Plone everything starts with H2
text = e.text_content()
id = 'toc-%d' % count
new_anchor = lxml.html.Element('a')
new_anchor.attrib['name'] = id
e.insert(0, new_anchor)
toc.append(dict(text=text,
level=level,
id=id))
div_toc = lxml.html.Element('div')
div_toc.attrib['id'] = 'toc'
div_ul = lxml.html.Element('ul')
div_toc.append(div_ul)
for d in toc:
li = lxml.html.Element('li')
li.attrib['class'] = 'toc-%s' % d['level']
a = lxml.html.Element('a')
a.attrib['href'] = '#' + d['id']
a.attrib['class'] = 'toc-%s' % d['level']
span = lxml.html.Element('span')
span.text = d['text']
a.insert(0, span)
li.append(a)
div_ul.append(li)
# check for an existing TOC (div#toc)
nodes = CSSSelector('div#toc')(root)
if nodes:
# replace it with the generated TOC
toc = nodes[0]
toc.getparent().replace(toc, div_toc)
else:
# append generated TOC to body tag
body = root.xpath('//body')[0]
body.insert(0, div_toc)
@registerTransformation
def addTableList(root):
""" Add a table list based on the <caption> tags """
tables = list()
for count, caption in enumerate(root.xpath('//caption')):
text = caption.text_content()
id = 'table-%d' % count
new_anchor = lxml.html.Element('a')
new_anchor.attrib['name'] = id
caption.insert(0, new_anchor)
tables.append(dict(text=text,
count=count,
id=id))
if tables:
div_tables = lxml.html.Element('div')
div_tables.attrib['id'] = 'table-list'
div_ul = lxml.html.Element('ul')
div_tables.append(div_ul)
for d in tables:
li = lxml.html.Element('li')
li.attrib['class'] = 'table-list-entry'
a = lxml.html.Element('a')
a.attrib['href'] = '#' + d['id']
a.attrib['class'] = 'table-list-entry'
span = lxml.html.Element('span')
span.text = d['text']
a.insert(0, span)
li.append(a)
div_ul.append(li)
# check for an existing div#table-list)
nodes = CSSSelector('div#table-list')(root)
if nodes:
# replace it
nodes[0].replace(nodes[0], div_tables)
else:
body = root.xpath('//body')[0]
body.append(div_tables)
@registerTransformation
def addImageList(root):
""" Add an image list based on the <caption> tags """
images = list()
count = 0
for caption in root.xpath('//span'):
# <span> with image captions may contain several css classes. Unfortunately
# BeautifulSoup is unable to find elements by-CSS-class if the related element
# contains more than one CSS class
if not 'image-caption-with-description' in caption.get('class', ''):
continue
text = caption.text_content()
id = 'image-%d' % count
new_anchor = lxml.html.Element('a')
new_anchor.attrib['name'] = id
caption.insert(0, new_anchor)
images.append(dict(text=text,
id=id))
count += 1
div_images = lxml.html.Element('div')
div_images.attrib['id'] = 'images-list'
div_ul = lxml.html.Element('ul')
div_images.append(div_ul)
if images:
for d in images:
li = lxml.html.Element('li')
li.attrib['class'] = 'image-list-entry'
a = lxml.html.Element('a')
a.attrib['href'] = '#' + d['id']
a.attrib['class'] = 'image-list-entry'
span = lxml.html.Element('span')
span.text = d['text']
a.insert(0, span)
li.append(a)
div_ul.append(li)
# check for an existing div#image-list)
nodes = CSSSelector('div#image-list')(root)
if nodes:
# replace it
nodes[0].replace(nodes[0], div_images)
else:
# add to end of document
body = root.xpath('//body')[0]
body.append(div_images)
@registerTransformation
def leaveLinksToPrinceXML(root):
""" Replace all special CSS classes for A nodes in order to leave
the rendering of links to PrinceXML.
"""
for link in root.xpath('//a'):
href = link.get('href')
if href:
class_ = link.get('class', '')
link.attrib['class'] = class_ + ' no-decoration'
@registerTransformation
def removeLinks(root):
""" replace all links with a <span> tag and the anchor text """
for link in root.xpath('//a'):
tag = lxml.html.Element('span')
tag.text = link.text_content()
link.getparent().replace(link, tag)
@registerTransformation
def convertFootnotes(root):
# Special format for footnotes:
# <span class="footnoteText">some footnote text</span>
for node in CSSSelector('span.footnoteText')(root):
footnote_text = node.text_content()
if footnote_text:
node.attrib['class'] = 'generated-footnote'
# generate footnotes from <a href>...</a> fields
for a in root.xpath('//a'):
href = a.get('href', '')
if not href or not url_match.match(href) or 'editlink' in a.get('class', ''):
continue
text = a.text_content().strip()
if text:
# don't convert URL links with an URL as pcdata into a footnote
if url_match.match(text):
continue
new_a = lxml.html.Element('a')
new_a.text = cgi.escape(href)
new_a.attrib['href'] = href
span = lxml.html.Element('span')
span.attrib['class'] = 'generated-footnote'
span.append(new_a)
span2 = lxml.html.Element('span')
span2.attrib['class'] = 'generated-footnote-text'
span2.text = text
span2.append(span)
a.getparent().replace(a, span2)
@registerTransformation
def removeInternalLinks(root):
for a in root.xpath('//a'):
href = a.get('href')
if nodeProcessed(a) or not href:
continue
# internal links _don't_ start with http:// etc. so we perform a
# negative check
if not url_match.match(href): #
span = lxml.html.Element('span')
span.text = a.text_content()
a.getparent().replace(a, span)
@registerTransformation
def removeListings(root):
""" Remove some of the listings not needed for HTML view """
# Toc first
for toc in CSSSelector('div#table-of-contents')(root):
toc.getparent().remove(toc)
# remove all image containers
for container in CSSSelector('div.image-container')(root):
img = container.xpath('//img')[0]
container.getparent().replace(container, img)
@registerTransformation
def removeProcessedFlags(root):
""" This method is called while generating PDF
from the (splitted) HTML (HTML+PDF view).
Remove the 'processed="1" attribute.
"""
for node in root.xpath('//*[@processed]'):
del node.attrib['processed']
@registerTransformation
def replaceUnresolvedLinks(root):
""" This transformation replaces all a.external-link
nodes with a proper footnote.
Used for PDF generation only (html_mode = 'split')
"""
for link in CSSSelector('a.external-link')(root):
href = link.attrib['href']
span1 = lxml.html.Element('span')
span1.attrib['class'] = 'generated-footnote-text'
span1.text = link.text_content()
span2 = lxml.html.Element('span')
span2.attrib['class'] = 'generated-footnote'
span2.text = href
span1.insert(1, span2)
link.getparent().replace(link, span1)
@registerTransformation
def removeCrapFromHeadings(root):
""" Ensure that HX tags containing only text """
for node in root.xpath(xpath_query(ALL_HEADINGS)):
text = node.text_content()
if text:
node.clear()
node.text = text
else:
node.getparent().remove(node)
@registerTransformation
def fixHierarchies(root):
""" Iterate of all boundary documents. For documents
with level > 0 we need to shift to hierarchies down.
"""
for doc in root.xpath('//div'):
if not 'document-boundary' in doc.get('class', ''):
continue
level = int(doc.get('level', '0'))
if level > 0:
for heading in doc.xpath(xpath_query(ALL_HEADINGS)):
heading_level = int(heading.tag[-1])
heading.tag = 'h%d' % (heading_level + level)
@registerTransformation
def convertWordFootnotes(root):
""" Convert footnotes from Word conversion to PrinceXML format """
# iterate over all <a href="ftn.."> elements
# <p class="P2"><span class="footnodeNumber"><a class=
# "Footnote_20_Symbol" id="ftn2" href="#body_ftn2" name=
# "ftn2">2</a></span> Fussnotentext 2</p>
for anchor in root.xpath('//a'):
anchor_id = anchor.get('id')
if not anchor_id or not anchor_id.startswith('ftn'):
continue
# get hold of the outer <p> tag
p_tag = anchor.getparent().getparent()
assert p_tag.tag.lower() == 'p'
# 'text' is now "2 Fussnotentext"
text = p_tag.text_content()
# get rid of the leading footnote number
text = leading_numbers.sub(u'', text).strip()
# now find referencing footnote
# <div class="Standard">
# Noch eine Fussnote <span class="Footnote_20_anchor" title=
# "Footnote: Fussnotentext 2"><a href="#ftn2" id="body_ftn2"
# name="body_ftn2">2</a></span>
# </div>
result = root.xpath("//a[@href='#%s']" % anchor_id)
if not result:
continue
footnote_anchor = result[0]
span = lxml.html.Element('span')
span.attrib['class'] = 'footnoteText'
span.text = text
span.append(span)
footnote_anchor.getparent().replace(footnote_anchor, span)
p_tag.getparent().remove(p_tag)
@registerTransformation
def fixAmpersand(root):
""" Convert solitary '&' to '&' """
for node in root.xpath('//*'):
if not '&' in (node.text or ''):
continue
text = node.text
text = text.replace('&', '&')
node.text = text
@registerTransformation
def convertWordFootnotes2(root):
""" Convert footnotes from Word conversion to PrinceXML format """
# iterate over all
# <div id="sdfootnote1">
# <p class="c2"><a name="_GoBack"></a> <a class="sdfootnotesym" name="sdfootnote1sym" href="#sdfootnote1anc" id="sdfootnote1sym">1</a>
# Das ist der Fussnotentext</p>
# <p class="sdfootnote-western"><br></p>
# </div>
# elements
selector = CSSSelector('a.sdfootnotesym')
for anchor in selector(root):
anchor_id = anchor.get('id') or anchor.get('name')
# get hold of the outer tag
parent= anchor.getparent()
# 'text' is now "2 Fussnotentext"
text = parent.text_content()
text = leading_numbers.sub(u'', text).strip()
# now find referencing footnote
# <p class="western c1">Beispiel Text (dies ist eine Fussnote
# <a class="sdfootnoteanc" name="sdfootnote1anc" href="#sdfootnote1sym" id="sdfootnote1anc"><sup>1</sup></a> )</p>
result = root.xpath("//a[@href='#%s']" % anchor_id)
if not result:
continue
# and replace it with a span.footnoteText
footnote_anchor = result[0]
span = lxml.html.Element('span')
span.attrib['class'] = 'footnoteText'
span.text = text
footnote_anchor.getparent().replace(footnote_anchor, span)
# remove footnote (the outer div, see above)
div_parent = parent.getparent()
div_parent.getparent().remove(div_parent)
@registerTransformation
def adjustHeadingsFromAggregatedHTML(root):
""" For an aggregated HTML documented from a nested folder
structure we need to adjust the HX headings of the contained
AuthoringContentPage documents. The 'level' attribute of the
related document nodes is taken as an offset for recalculating
the headings.
"""
# search all documents first
selector = CSSSelector('div.portal-type-authoringcontentpage')
for node in selector(root):
# get their level
level = int(node.get('level'))
# create a sorted list of used headings
heading_levels_used = list()
for heading in node.xpath(xpath_query(ALL_HEADINGS)):
heading_level = int(heading.tag[1:])
if not heading_level in heading_levels_used:
heading_levels_used.append(heading_level)
heading_levels_used.sort()
# now add an offset to the heading level
for heading in node.xpath(xpath_query(ALL_HEADINGS)):
heading_level = int(heading.tag[1:])
new_level = level + heading_levels_used.index(heading_level)
heading.tag = 'h%d' % new_level
@registerTransformation
def removeEmptyNodesFromWord(root):
""" Remove empty paragraphs from imported Word markup """
for node in root.xpath('//p'):
# don't touch nodes containing images
if node.xpath('.//img'):
continue
text = lxml.html.tostring(node, encoding=unicode, method='text').strip()
if not text:
node.getparent().remove(node)
# also remove the FOOTER culprit
selector = CSSSelector('div[type="FOOTER"]')
for node in selector(root):
node.getparent().remove(node)
@registerTransformation
def mergeSingleSpanIntoParagraph(root):
""" Merge solitaire <span> element inside a paragraph
into the paragraph content.
"""
for node in root.xpath('//p'):
spans = node.xpath('.//span')
if len(spans) == 1:
if not spans[0].getchildren():
text = spans[0].text
spans[0].getparent().remove(spans[0])
node.text = text
@registerTransformation
def convertWordEndnotes(root):
""" Convert Word endnotes into a simple list """
endnotes = list()
for node in root.xpath('//div'):
node_id = node.get('id', '')
if not node_id.startswith('sdendnote'):
continue
p_tag = node.xpath('.//p')[0]
anchors_in_p_tag = p_tag.xpath('.//a')
endnote_num = None
if anchors_in_p_tag:
anchor = anchors_in_p_tag[0]
endnote_num = anchor.text_content()
anchor.getparent().remove(anchor)
endnote_txt = p_tag.text_content()
endnotes.append(dict(text=endnote_txt, number=endnote_num, id=node_id))
node.getparent().remove(node)
if endnotes:
ul = lxml.html.Element('ul')
ul.attrib['class'] = 'endnotes'
for endnote in endnotes:
li = lxml.html.Element('li')
li.attrib['class'] = 'endnote'
span = lxml.html.Element('span')
span.attrib['class'] = 'endnote-number'
span.attrib['style'] = 'display: none'
span.text = endnote['number']
li.append(span)
span = lxml.html.Element('span')
span.attrib['class'] = 'endnote-text'
span.attrib['id'] = endnote['id'] + 'sym'
span.text = endnote['text']
li.append(span)
ul.append(li)
root.xpath('//body')[0].append(ul)
# Rename all 'name' attributes from the anchors to endnotes since TinyMCE
# considers this as an anchor and not as a link to an anchor and therefore
# TinyMCE will remove the inner text
selector = CSSSelector('a.sdendnoteanc')
for anchor in selector(root):
try:
del anchor.attrib['name']
except KeyError:
pass
@registerTransformation
def addIndexList(root):
""" Add an index listing for all terms inside <span class="index-term"> """
indexes = dict()
for num, node in enumerate(CSSSelector('span.index-term')(root)):
term = node.text_content().strip()
term_id = 'index-term-%d' % num
node.attrib['id'] = term_id
if not term in indexes:
indexes[term] = list()
indexes[term].append(term_id)
if not indexes:
return
div_indexes = lxml.html.Element('div')
div_indexes.attrib['id'] = 'indexes-list'
div_ul = lxml.html.Element('ul')
div_indexes.append(div_ul)
index_terms = sorted(indexes.keys(), cmp=locale.strcoll)
for index_term in index_terms:
term_ids = indexes[index_term]
li = lxml.html.Element('li')
li.attrib['class'] = 'index-term-entry'
span = lxml.html.Element('span')
span.attrib['class'] = 'index-term-entry'
span.text = index_term
li.append(span)
num_term_ids = len(term_ids)
for i, term_id in enumerate(term_ids):
a = lxml.html.Element('a')
a.attrib['href'] = '#' + term_id
a.attrib['class'] = 'index-term-entry'
a.text = (i+1 < num_term_ids) and ', ' or ''
li.append(a)
div_ul.append(li)
# check for an existing div#indexes-list)
nodes = CSSSelector('div#indexes-list')(root)
if nodes:
# replace it
nodes[0].replace(nodes[0], div_indexes)
else:
# add to end of document
body = root.xpath('//body')[0]
body.append(div_indexes) | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/transforms/transformation.py | transformation.py |
from zope.component import adapts
from zope.interface import implements
from archetypes.schemaextender.interfaces import ISchemaExtender
from archetypes.schemaextender.field import ExtensionField
from Products.Archetypes.public import IntegerField, SelectionWidget, DisplayList, BooleanField, BooleanWidget, StringField
from Products.ATContentTypes.interface import IATImage
ScaleVocabulary= DisplayList((
('100', '100 %'),
('90', '90 %'),
('80', '80 %'),
('70', '70 %'),
('60', '60 %'),
('50', '50 %'),
('40', '40 %'),
('30', '30 %'),
('20', '20 %'),
('10', '10 %'),
))
CaptionPositionVocabulary = DisplayList((
(u'top', u'Top'),
(u'bottom', u'Bottom'),
))
class MyIntegerField(ExtensionField, IntegerField):
""" integer field """
class MyBooleanField(ExtensionField, BooleanField):
""" bool field """
class MyStringField(ExtensionField, StringField):
""" string field """
class ImageExtender(object):
""" add a dedicated (optional) field to ATImage for
storing an "original" or Hires file e.g. an EPS
or hi-res TIFF image.
"""
adapts(IATImage)
implements(ISchemaExtender)
fields = [MyIntegerField('pdfScale',
default=100,
vocabulary=ScaleVocabulary,
widget=SelectionWidget(
label=u"Scale for PDF production",
label_msgid='label_scale_for_pdf',
i18n_domain='producepublish',
),
schemata='PDF',
),
MyBooleanField('excludeFromImageEnumeration',
default=False,
widget=BooleanWidget(
label=u"Exclude from image enumeration",
label_msgid='label_exclude_from_image_enumeration',
i18n_domain='producepublish',
),
schemata='PDF',
),
MyBooleanField('linkToFullScale',
default=False,
widget=BooleanWidget(
label=u'Create image link to full scale in HTML view',
label_msgid='label_create_link_to_full_scale',
i18n_domain='producepublish',
),
schemata='PDF',
),
MyBooleanField('displayInline',
default=False,
widget=BooleanWidget(
label=u'Display image inline ',
label_msgid='label_display_image_inline',
i18n_domain='producepublish',
),
schemata='PDF',
),
MyStringField('captionPosition',
default='bottom',
vocabulary=CaptionPositionVocabulary,
widget=SelectionWidget(
label=u'Caption position',
label_msgid='label_caption_position',
i18n_domain='producepublish',
format='select',
),
schemata='PDF',
),
]
def __init__(self, context):
self.context = context
def getFields(self):
# extend only inside an authoring project
try:
self.context.getAuthoringProject()
return self.fields
except AttributeError:
return () | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/extender/image.py | image.py |
import os
import codecs
import shutil
import tempfile
import zipfile
from compatible import InitializeClass
from Products.Five.browser import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from Products.ATContentTypes.interface.folder import IATFolder
from ZPublisher.Iterators import filestream_iterator
try:
from zope.app.pagetemplate.viewpagetemplatefile import ViewPageTemplateFile as ViewPageTemplateFile2
except ImportError:
from zope.browserpage.viewpagetemplatefile import ViewPageTemplateFile as ViewPageTemplateFile2
from zopyx.smartprintng.plone.logger import LOG
from zopyx.smartprintng.plone.resources import resources_registry
from zopyx.smartprintng.plone import Transformer
import splitter
from util import getLanguageForObject
cwd = os.path.dirname(os.path.abspath(__file__))
# server host/port of the SmartPrintNG server
URL = os.environ.get('SMARTPRINTNG_SERVER', 'http://localhost:6543')
LOCAL_CONVERSION = 'SMARTPRINTNG_LOCAL_CONVERSION' in os.environ
ZIP_OUTPUT = 'SMARTPRINTNG_ZIP_OUTPUT' in os.environ
class ProducePublishView(BrowserView):
""" Produce & Publish view (using Produce & Publish server) """
template = ViewPageTemplateFile('../resources_default/pdf_template_standalone.pt')
# default transformations used for the default PDF view.
# 'transformations' can be overriden within a derived ProducePublishView.
# If you don't need any transformation -> redefine 'transformations'
# as empty list or tuple
transformations = (
'makeImagesLocal',
# 'removeEmptyElements',
# 'removeInternalLinks',
# 'annotateInternalLinks',
# 'cleanupTables',
'convertFootnotes',
'removeCrapFromHeadings',
'fixHierarchies',
'addTableOfContents',
)
def copyResources(self, resources_dir, destdir):
""" Copy over resources for a global or local resources directory into the
destination directory.
"""
if os.path.exists(resources_dir):
for name in os.listdir(resources_dir):
fullname = os.path.join(resources_dir, name)
if os.path.isfile(fullname):
shutil.copy(fullname, destdir)
def transformHtml(self, html, destdir, transformations=None):
""" Perform post-rendering HTML transformations """
if transformations is None:
transformations = self.transformations
# the request can override transformations as well
if self.request.has_key('transformations'):
t_from_request = self.request['transformations']
if isinstance(t_from_request, basestring):
transformations = t_from_request and t_from_request.split(',') or []
else:
transformations = t_from_request
T = Transformer(transformations, context=self.context, destdir=destdir)
return T(html)
def __call__(self, *args, **kw):
try:
return self.__call2__(*args, **kw)
except:
LOG.error('Conversion failed', exc_info=True)
raise
def __call2__(self, *args, **kw):
""" URL parameters:
'language' - 'de', 'en'....used to override the language of the
document
'converter' - default to on the converters registered with
zopyx.convert2 (default: pdf-prince)
'resource' - the name of a registered resource (directory)
'template' - the name of a custom template name within the choosen
'resource'
"""
# Output directory
tmpdir_prefix = os.path.join(tempfile.gettempdir(), 'produce-and-publish')
if not os.path.exists(tmpdir_prefix):
os.makedirs(tmpdir_prefix)
destdir = tempfile.mkdtemp(dir=tmpdir_prefix, prefix=self.context.getId() + '-')
# debug/logging
params = kw.copy()
params.update(self.request.form)
LOG.info('new job (%s, %s) - outdir: %s' % (args, params, destdir))
# get hold of the language (hyphenation support)
language = getLanguageForObject(self.context)
if params.get('language'):
language = params.get('language')
# Check for CSS injection
custom_css = None
custom_stylesheet = params.get('custom_stylesheet')
if custom_stylesheet:
custom_css = str(self.context.restrictedTraverse(custom_stylesheet, None))
if custom_css is None:
raise ValueError('Could not access custom CSS at %s' % custom_stylesheet)
# check for resource parameter
resource = params.get('resource')
if resource:
resources_directory = resources_registry.get(resource)
if not resources_directory:
raise ValueError('No resource "%s" configured' % resource)
if not os.path.exists(resources_directory):
raise ValueError('Resource directory for resource "%s" does not exist' % resource)
self.copyResources(resources_directory, destdir)
# look up custom template in resources directory
template_name = params.get('template', 'pdf_template')
if not template_name.endswith('.pt'):
template_name += '.pt'
template_filename = os.path.join(resources_directory, template_name)
if not os.path.exists(template_filename):
raise IOError('No template found (%s)' % template_filename)
template = ViewPageTemplateFile2(template_filename)
else:
template = self.template
# call the dedicated @@asHTML on the top-level node. For a leaf document
# this will return either a HTML fragment for a single document or @@asHTML
# might be defined as an aggregator for a bunch of documents (e.g. if the
# top-level is a folderish object
html_view = self.context.restrictedTraverse('@@asHTML', None)
if not html_view:
raise RuntimeError('Object at does not provide @@asHTML view (%s, %s)' %
(self.context.absolute_url(1), self.context.portal_type))
html_fragment = html_view()
# arbitrary application data
data = params.get('data', None)
# Now render the complete HTML document
html = template(self,
language=language,
request=self.request,
body=html_fragment,
custom_css=custom_css,
data=data,
)
# and apply transformations
html = self.transformHtml(html, destdir)
# hack to replace '&' with '&'
html = html.replace('& ', '& ')
# and store it in a dedicated working directory
dest_filename = os.path.join(destdir, 'index.html')
fp = codecs.open(dest_filename, 'wb', encoding='utf-8')
fp.write(html)
fp.close()
# split HTML document into parts and store them on the filesystem
# (making only sense for folderish content)
if IATFolder.providedBy(self.context) and not 'no-split' in params:
splitter.split_html(dest_filename, destdir)
# copy over global styles etc.
resources_dir = os.path.join(cwd, 'resources')
self.copyResources(resources_dir, destdir)
# copy over language dependent hyphenation data
if language:
hyphen_file = os.path.join(resources_dir, 'hyphenation', language + '.hyp')
if os.path.exists(hyphen_file):
shutil.copy(hyphen_file, destdir)
hyphen_css_file = os.path.join(resources_dir, 'languages', language + '.css')
if os.path.exists(hyphen_css_file):
shutil.copy(hyphen_css_file, destdir)
# now copy over resources (of a derived view)
self.copyResources(getattr(self, 'local_resources', ''), destdir)
if ZIP_OUTPUT or 'zip_output' in params:
archivename = tempfile.mktemp(suffix='.zip')
fp = zipfile.ZipFile(archivename, "w", zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(destdir):
#NOTE: ignore empty directories
for fn in files:
absfn = os.path.join(root, fn)
zfn = absfn[len(destdir)+len(os.sep):] #XXX: relative path
fp.write(absfn, zfn)
fp.close()
LOG.info('ZIP file written to %s' % archivename)
if 'no_conversion' in params:
return destdir
if LOCAL_CONVERSION:
from zopyx.convert2 import Converter
c = Converter(dest_filename)
result = c(params.get('converter', 'pdf-pisa'))
if result['status'] != 0:
raise RuntimeError('Error during PDF conversion (%r)' % result)
pdf_file = result['output_filename']
LOG.info('Output file: %s' % pdf_file)
return pdf_file
else:
# Produce & Publish server integration
from zopyx.smartprintng.client.zip_client import Proxy2
proxy = Proxy2(URL)
result = proxy.convertZIP2(destdir, self.request.get('converter', 'pdf-prince'))
LOG.info('Output file: %s' % result['output_filename'])
return result['output_filename']
InitializeClass(ProducePublishView)
class PDFDownloadView(ProducePublishView):
def __call__(self, *args, **kw):
if not 'resource' in kw:
kw['resource'] = 'pp-default'
if not 'template' in kw:
kw['template'] = 'pdf_template_standalone'
kw['no-split'] = True
output_file = super(PDFDownloadView, self).__call__(*args, **kw)
mimetype = os.path.splitext(os.path.basename(output_file))[1]
R = self.request.response
R.setHeader('content-type', 'application/%s' % mimetype)
R.setHeader('content-disposition', 'attachment; filename="%s.%s"' % (self.context.getId(), mimetype))
R.setHeader('pragma', 'no-cache')
R.setHeader('cache-control', 'no-cache')
R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT')
R.setHeader('content-length', os.path.getsize(output_file))
return filestream_iterator(output_file, 'rb').read()
InitializeClass(PDFDownloadView)
class GenericDownloadView(ProducePublishView):
def __call__(self, *args, **kw):
output_file = super(GenericDownloadView, self).__call__(*args, **kw)
mimetype = os.path.splitext(os.path.basename(output_file))[1]
# return output file over HTTP
R = self.request.response
R.setHeader('content-type', 'application/%s' % mimetype)
R.setHeader('content-disposition', 'attachment; filename="%s.%s"' % (self.context.getId(), mimetype))
R.setHeader('content-length', os.path.getsize(output_file))
R.setHeader('pragma', 'no-cache')
R.setHeader('cache-control', 'no-cache')
R.setHeader('Expires', 'Fri, 30 Oct 1998 14:19:41 GMT')
return filestream_iterator(output_file, 'rb')
InitializeClass(GenericDownloadView) | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/browser/pdf.py | pdf.py |
import os
import codecs
from cStringIO import StringIO
from BeautifulSoup import BeautifulSoup, Tag
from lxml.cssselect import CSSSelector
import lxml.html
from util import _findTextInNode
def split_html(html_filename, split_at_level=0):
""" Split aggregated and rendered HTML document at
some <hX> tag(s). split_at_level=0 -> split at
H1 tags, split_at_level=1 -> split at H1 and H2
tags.
Returns a list of dicts with keys 'html' referring
to the subdocument and 'level' indicating the split
point.
"""
destdir = os.path.dirname(html_filename)
soup = BeautifulSoup(file(html_filename).read())
fp = StringIO(soup.__str__(prettyPrint=True))
docs = list()
current_doc = list()
for line in fp:
line = line.rstrip()
for level in range(split_at_level+1):
if '<h%d' % (level+1) in line.lower():
html = '\n'.join(current_doc)
root = lxml.html.fromstring(unicode(html, 'utf-8'))
title = u''
h1_nodes = root.xpath('//h1')
if h1_nodes:
title = h1_nodes[0].text_content().strip()
# count tables and images
number_tables = len(root.xpath('//table'))
number_images = len(CSSSelector('div.image-caption')(root))
# find all linkable nodes with an ID attribute
node_ids = list()
for node in root.xpath('.//*'):
node_id = node.get('id')
if node_id:
node_ids.append(node_id)
html = lxml.html.tostring(root, encoding=unicode)
docs.append(dict(html=html,
level=level,
title=title,
node_ids=node_ids,
number_images=number_images,
number_tables=number_tables))
current_doc = []
break
current_doc.append(line)
# now deal with the remaining part of the document
html = '\n'.join(current_doc)
root = lxml.html.fromstring(unicode(html, 'utf-8'))
title = u''
h1_nodes = root.xpath('//h1')
if h1_nodes:
title = h1_nodes[0].text_content().strip()
# count tables and images
# count tables and images
number_tables = len(root.xpath('//table'))
number_images = len(CSSSelector('div.image-caption')(root))
# find all linkable nodes with an ID attribute
node_ids = list()
for node in root.xpath('.//*'):
node_id = node.get('id')
if node_id:
node_ids.append(node_id)
html = lxml.html.tostring(root, encoding=unicode)
docs.append(dict(html=html,
level=0,
title=title,
node_ids=node_ids,
number_images=number_images,
number_tables=number_tables))
# now store files on the filesystem
ini_filename = os.path.join(destdir, 'documents.ini')
fp_ini = codecs.open(ini_filename, 'w', 'utf-8')
for count, d in enumerate(docs[1:]):
filename = os.path.join(destdir, 'split-0/%d-level-%d.html' % (count, d['level']))
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
file(filename, 'w').write(d['html'].encode('utf-8'))
print >>fp_ini, '[%d]' % count
print >>fp_ini, 'filename = %s' % filename
print >>fp_ini, 'title = %s' % d['title']
print >>fp_ini, 'number_tables= %d' % d['number_tables']
print >>fp_ini, 'number_images = %d' % d['number_images']
print >>fp_ini, 'node_ids = '
for node_id in d['node_ids']:
print >>fp_ini, ' ' + node_id
print >>fp_ini
fp_ini.close()
return docs[1:] | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/browser/splitter.py | splitter.py |
from urllib2 import unquote, Request, urlopen, HTTPError
from urlparse import urlparse
from Products.CMFCore.utils import getToolByName
from Products.ATContentTypes.interfaces import IATImage
from Products.Archetypes.Field import Image
from plone.app.imaging.scale import ImageScale
from zopyx.smartprintng.plone.logger import LOG
try:
from zope.app.component.hooks import getSite
except ImportError:
from zope.component.hooks import getSite
def resolveImage(context, src):
""" Try to resolve an image based on its src which
can be a relative URL, an absolute URL or an URL
using UIDs. Image scales can be annotated through
image_<scale> or using the newer plone.app.imaging
mechanism. Much fun :-P
"""
if context is None:
context = getSite()
ref_catalog = getToolByName(context, 'reference_catalog')
parse_result = urlparse(unquote(src))
path = str(parse_result.path)
img_obj = None
if path.startswith('resolveuid'):
# can be resolveuid/<uid>/@@images/image/preview
path_parts = path.split('/')
img_obj = ref_catalog.lookupObject(path_parts[1])
else:
candidates = [path, path[1:]] # with and without leading '/'
# check for a possible URL redirection
if src.startswith('http'):
req = Request(src)
try:
result = urlopen(req)
except HTTPError:
result = None
if result and result.url != src:
# a redirection happened
parse_result2 = urlparse(unquote(result.url))
path2 = str(parse_result2.path)
candidates.extend([path2, path2[1:]])
for p in candidates:
img_obj = context.restrictedTraverse(p, None)
if img_obj:
if img_obj.portal_type in ('Image',):
# check if current image is a scale (having a parent image)
if IATImage.providedBy(img_obj.aq_parent):
img_obj = img_obj.aq_parent
break
elif isinstance(img_obj, ImageScale):
img_obj = img_obj.aq_parent
break
elif isinstance(img_obj.aq_inner.aq_base, Image):
img_obj = img_obj.aq_inner.aq_base
break
elif isinstance(img_obj.aq_parent, Image):
break
else:
img_obj = None
return img_obj
def existsExternalImageUrl(url):
""" Check if the external URL exists (by issuing a
HTTP request.
"""
class HeadRequest(Request):
def get_method(self):
return "HEAD"
if not url.startswith('http'):
return False
try:
urlopen(HeadRequest(url))
return True
except Exception, e:
LOG.warn('External(?) image reference not found (%s)' % e)
return False | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/browser/images.py | images.py |
from Products.Five.browser import BrowserView
from Products.ATContentTypes.interface.folder import IATFolder
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from ...import Transformer
try:
from plone.dexterity.interfaces import IDexterityContainer
HAVE_DEXTERITY = True
except ImportError:
HAVE_DEXTERITY = False
from ...logger import LOG
from ...interfaces import IArchiveFolder
def _c(s):
if not isinstance(s, unicode):
s = unicode(s, 'utf-8', 'ignore')
return s.encode('utf-8')
def collector(folder, level=1, published_only=False, html=[], filter_uids=[]):
utils = getToolByName(folder, 'plone_utils')
wf_tool = getToolByName(folder, 'portal_workflow')
for brain in folder.getFolderContents({'sort_on' : 'getObjPositionInParent'}):
obj = brain.getObject()
LOG.info('Introspecting %s' % obj.absolute_url(1))
view = obj.restrictedTraverse('@@asHTML', None)
if view is not None:
pt = utils.normalizeString(obj.portal_type)
review_state = wf_tool.getInfoFor(obj, 'review_state')
if published_only and review_state not in ['published']:
continue
is_folderish = False
if HAVE_DEXTERITY:
if (IATFolder.providedBy(obj) or IDexterityContainer.providedBy(obj)) and not IArchiveFolder.providedBy(obj):
is_folderish = True
else:
if IATFolder.providedBy(obj) and not IArchiveFolder.providedBy(obj):
is_folderish = True
if is_folderish:
html.append('<div class="mode-nested level-%d document-boundary portal-type-folder review-state-%s" path="%s" id="doc-id-%s" document_id="%s" review_state="%s" level="%d" uid="%s">\n' %
(level, review_state, obj.absolute_url(1), obj.getId(), obj.getId(), review_state, level, obj.UID()))
if IATFolder.providedBy(obj):
folder_title = obj.Title()
folder_descr = obj.Description()
else:
folder_title = obj.title # Dexterity
folder_descr = obj.description
html.append('<h%d class="title">%s</h%d>' % (level, folder_title, level))
html.append('<div class="description">%s</div>' % folder_descr)
collector(obj, level+1, published_only, html)
html.append('</div>')
else:
html.append('<div class="level-%d document-boundary portal-type-%s review-state-%s" path="%s" id="doc-id-%s" document_id="%s" review_state="%s" level="%d" uid="%s">\n' %
(level, pt, review_state, obj.absolute_url(1), obj.getId(), obj.getId(), review_state, level, obj.UID()))
html.append('<div class="contentinfo">')
html.append('<div><a class="editlink" href="%s/edit">Edit</a></div>' % obj.absolute_url())
try:
html.append('<div class="review-state">%s</div>' % wf_tool.getInfoFor(obj, 'review_state'))
except WorkflowException:
pass
html.append('</div>')
html.append(view())
html.append('</div>')
else :
LOG.warn('No @@asHTML view found for %s' % obj.absolute_url(1))
class NestedHTMLView(BrowserView):
""" A HTML collector for a Plone folder containing Document instances """
def __call__(self, published_only=False, filter_uids=[]):
""" Collector for folderish content """
html = list()
collector(self.context, 1, published_only, html, filter_uids=[])
html = '\n'.join(html)
T = Transformer(['adjustHeadingsFromAggregatedHTML'])
return T(html) | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/browser/types/folder_nested.py | folder_nested.py |
from Products.Five.browser import BrowserView
from Products.ATContentTypes.interface.folder import IATFolder
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
try:
from plone.dexterity.interfaces import IDexterityContainer
HAVE_DEXTERITY = True
except ImportError:
HAVE_DEXTERITY = False
from ...logger import LOG
from ...interfaces import IPPContent, IArchiveFolder
def _c(s):
if not isinstance(s, unicode):
s = unicode(s, 'utf-8', 'ignore')
return s.encode('utf-8')
class FlatHTMLView(BrowserView):
""" A HTML collector for a Plone folder containing Document instances """
def collect(self, published_only=False, filter_uids=[]):
""" A collector taking only flat contents into account for the
conversion.
"""
def collect_objects(folder, level=0, items=[]):
""" Collect all related subobjects """
for brain in folder.getFolderContents({'sort_on' : 'getObjPositionInParent'}):
obj = brain.getObject()
if IPPContent.providedBy(obj):
items.append(dict(obj=obj, level=level))
else:
LOG.warn('IPPContent not provided by %s' % obj.absolute_url(1))
if HAVE_DEXTERITY:
if (IATFolder.providedBy(obj) or IDexterityContainer.providedBy(obj)) and not IArchiveFolder.providedBy(obj):
collect_objects(obj, level+1, items)
else:
if IATFolder.providedBy(obj) and not IArchiveFolder.providedBy(obj):
collect_objects(obj, level+1, items)
utils = getToolByName(self.context, 'plone_utils')
wf_tool = getToolByName(self.context, 'portal_workflow')
html = list()
collected_objs = list()
collect_objects(self.context, 0, collected_objs)
for d in collected_objs:
level = d['level']
obj = d['obj']
if filter_uids and not d['obj'].UID() in filter_uids:
LOG.info('Filtered: %s' % obj.absolute_url(1))
continue
LOG.info('Introspecting %s' % obj.absolute_url(1))
view = obj.restrictedTraverse('@@asHTML', None)
if view is not None:
pt = utils.normalizeString(obj.portal_type)
review_state = wf_tool.getInfoFor(obj, 'review_state')
if published_only and review_state not in ['published']:
continue
html.append('<div class="mode-flat level-%d document-boundary portal-type-%s review-state-%s" path="%s" id="doc-id-%s" document_id="%s" review_state="%s" level="%d" uid="%s">\n' %
(level, pt, review_state, obj.absolute_url(1), obj.getId(), obj.getId(), review_state, level, obj.UID()))
html.append('<div class="contentinfo">')
html.append('<div><a class="editlink" href="%s/edit">Edit</a></div>' % obj.absolute_url())
try:
html.append('<div class="review-state">%s</div>' % wf_tool.getInfoFor(obj, 'review_state'))
except WorkflowException:
pass
html.append('</div>')
html.append(view())
html.append('</div>')
else :
LOG.warn('No @@asHTML view found for %s' % obj.absolute_url(1))
return '\n'.join(html)
def __call__(self, published_only=False, filter_uids=[]):
""" Collector for folderish content """
return self.collect(published_only=published_only, filter_uids=filter_uids) | zopyx.smartprintng.plone | /zopyx.smartprintng.plone-2.1.27.zip/zopyx.smartprintng.plone-2.1.27/zopyx/smartprintng/plone/browser/types/folder_flat.py | folder_flat.py |
zopyx.smartprintng.psd
======================
A simple converter for parsing PSD (Photoshop) files into PrinceXML templates (HTML/CSS)
based on the ``psdparse`` converter.
Requirements
============
* requires ``psdparse`` :
- svn checkout http://www.telegraphics.com.au/svn/psdparse
- configure; make; make install
Usage
=====
* bin/psd2prince --help
* bin/psd2prince <PSD-FILENAME>
Licence
=======
``zopyx.smartprintng.psd`` is published under the GNU Public License V2 (GPL 2)
Author
======
| ZOPYX Limited
| Andreas Jung
| Charlottenstr. 37/1
| 72070 Tuebingen
| Germany
| [email protected]
| www.zopyx.com
| zopyx.smartprintng.psd | /zopyx.smartprintng.psd-0.1.1.tar.gz/zopyx.smartprintng.psd-0.1.1/README.txt | README.txt |
import os
import re
import sys
import shutil
import commands
import tempfile
import optparse
from lxml import etree
def _c(value, unit):
if unit == 'px':
return '%spx' % value
elif unit == 'mm':
return '%2.1fmm' % (float(value) / 11.81)
else:
raise ValueError('Unsupported unit: %s' % unit)
def parse_psd(filename, options):
output_directory = options.output_directory
if not output_directory:
output_directory = tempfile.mktemp()
if os.path.exists(output_directory):
shutil.rmtree(output_directory)
os.makedirs(output_directory)
# external PSD to XML parser
cmd = 'psdparse -e -r --xmlout --pngdir "%s" --writepng "%s"' % (output_directory, filename)
status, xml = commands.getstatusoutput(cmd)
# cleanup XML and store it
xml = re.sub('(&#.*?;)', '', xml)
file(os.path.join(output_directory, 'source.xml'), 'w').write(xml)
tree = etree.fromstring(xml)
css_out = file(os.path.join(output_directory, 'styles.css'), 'w')
html_out = file(os.path.join(output_directory, 'index.html'), 'w')
print >>html_out, '<html>'
print >>html_out, '<head>'
print >>html_out, '<link rel="stylesheet" type="text/css" href="styles.css"/>'
print >>html_out, '</head>'
print >>html_out, '<body>'
# determine page size
page_width = _c(tree.attrib['COLUMNS'], options.units)
page_height = _c(tree.attrib['ROWS'], options.units)
print >>css_out, '@page { size: %s %s}' % (page_width, page_height)
print >>css_out, '@page { margin: 0 0 0 0}'
if options.outline:
print >>css_out, '.layer {border: 1px dashed grey;}'
print >>css_out
print 'Page: %s x %s' % (page_width, page_height)
for num, layer in enumerate(tree.iterfind('.//LAYER')):
name = layer.attrib['NAME']
top = _c(layer.attrib['TOP'], options.units)
left = _c(layer.attrib['LEFT'], options.units)
bottom = _c(layer.attrib['BOTTOM'], options.units)
right = _c(layer.attrib['RIGHT'], options.units)
width = _c(layer.attrib['WIDTH'], options.units)
height = _c(layer.attrib['HEIGHT'], options.units)
print 'Layer (%s): %s/%s -> %s/%s' % (name, top, left, right, bottom)
if width == page_width and height == page_height:
print '...omitted (background layer)'
continue
# check for text node(s)
text = []
for node in layer.iterfind('.//Text'):
node_text = node.text
node_text = node_text.replace('\n', '<br/>')
text.append(node_text)
# check for images (PNG node)
bg_image = None
png_node = layer.find('.//PNG')
if png_node is not None:
bg_image = png_node.attrib['FILE']
# Figure out fonts
fonts = []
for node in layer.iterfind('.//FontSet//Name'):
if not node.text in fonts:
fonts.append(node.text)
# HTML DIV
print >>html_out, '<!-- layer: %s -->' % name
print >>html_out, '<div class="layer" id="content-%d">' % num
if text:
print >>html_out, (' '.join(text)).encode('utf-8', 'ignore')
print >>html_out, '</div>'
print >>html_out
# CSS
print >>css_out, '/* layer: %s */' % name
# print >>css_out, '#content-%d::before { ' % num
# print >>css_out, ' content: "content-%d";' % num
# print >>css_out, ' color: blue;'
# print >>css_out, '}'
# print
print >>css_out, '#content-%d {' % num
print >>css_out, ' position: absolute;'
print >>css_out, ' left: %s;' % left;
print >>css_out, ' top: %s;' % top;
print >>css_out, ' width: %s;' % width;
print >>css_out, ' height: %s;' % height;
if bg_image and not text:
print >>css_out, ' background-image: url("%s");' % os.path.basename(bg_image)
if text:
print >>css_out, ' font-size: 11pt;'
if fonts:
print >>css_out, ' font-family: %s;' % ', '.join(['"%s"' % font for font in fonts])
print >>css_out, '}'
print >>css_out
print >>html_out, '</body>'
print >>html_out, '</html>'
html_out.close()
css_out.close()
return os.path.abspath(output_directory)
def main():
usage = "usage: %prog [options] psd-file"
parser = optparse.OptionParser(usage=usage)
parser.add_option('-o', '--output-directory', action='store', default='converted',
dest='output_directory',
help='Output directory')
parser.add_option('-u', '--units', action='store', default='px',
dest='units',
help='Unit (pt(default) or mm)')
parser.add_option('-l', '--outline', action='store_true', default=False,
dest='outline',
help='Outline boxes')
options, args = parser.parse_args()
if not args:
raise RuntimeError('No PSD given')
return parse_psd(args[0], options)
if __name__ == '__main__':
main() | zopyx.smartprintng.psd | /zopyx.smartprintng.psd-0.1.1.tar.gz/zopyx.smartprintng.psd-0.1.1/zopyx/smartprintng/psd/psd.py | psd.py |
import os, shutil, sys, tempfile, urllib2
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
is_jython = sys.platform.startswith('java')
# parsing arguments
parser = OptionParser()
parser.add_option("-v", "--version", dest="version",
help="use a specific zc.buildout version")
parser.add_option("-d", "--distribute",
action="store_true", dest="distribute", default=False,
help="Use Disribute rather than Setuptools.")
parser.add_option("-c", None, action="store", dest="config_file",
help=("Specify the path to the buildout configuration "
"file to be used."))
options, args = parser.parse_args()
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args += ['-c', options.config_file]
if options.version is not None:
VERSION = '==%s' % options.version
else:
VERSION = ''
# We decided to always use distribute, make sure this is the default for us
# USE_DISTRIBUTE = options.distribute
USE_DISTRIBUTE = True
args = args + ['bootstrap']
to_reload = False
try:
import pkg_resources
if not hasattr(pkg_resources, '_distribute'):
to_reload = True
raise ImportError
except ImportError:
ez = {}
if USE_DISTRIBUTE:
exec urllib2.urlopen('http://python-distribute.org/distribute_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0, no_fake=True)
else:
exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
).read() in ez
ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
if to_reload:
reload(pkg_resources)
else:
import pkg_resources
if sys.platform == 'win32':
def quote(c):
if ' ' in c:
return '"%s"' % c # work around spawn lamosity on windows
else:
return c
else:
def quote (c):
return c
cmd = 'from setuptools.command.easy_install import main; main()'
ws = pkg_resources.working_set
if USE_DISTRIBUTE:
requirement = 'distribute'
else:
requirement = 'setuptools'
if is_jython:
import subprocess
assert subprocess.Popen([sys.executable] + ['-c', quote(cmd), '-mqNxd',
quote(tmpeggs), 'zc.buildout' + VERSION],
env=dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
),
).wait() == 0
else:
assert os.spawnle(
os.P_WAIT, sys.executable, quote (sys.executable),
'-c', quote (cmd), '-mqNxd', quote (tmpeggs), 'zc.buildout' + VERSION,
dict(os.environ,
PYTHONPATH=
ws.find(pkg_resources.Requirement.parse(requirement)).location
),
) == 0
ws.add_entry(tmpeggs)
ws.require('zc.buildout' + VERSION)
import zc.buildout.buildout
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/bootstrap.py | bootstrap.py |
Changelog
=========
1.1.2 (2012/10/14)
------------------
* documentation updated
1.1.1 (2012/02/03)
------------------
* better self-test page (more text, some images)
* updated dependencies
1.1.0 (2011/08/22)
------------------
* compatibility with Pyramid 1.1
1.0.1 (2011/01/30)
------------------
* compatibility with Pyramid 1.0b3+
1.0.0 (2011/01/16)
------------------
* final release
* fixed test fixtures
0.7.1 (2011/01/10)
------------------
* pinned pyramid_xmlrpc 0.1
0.7.0 (2010/12/11)
------------------
* converted to Pyramid
0.6.7 (2010/07/18)
------------------
* adjusted company name
0.6.6 (2010/05/15)
------------------
* include conversion result into ZIP file
0.6.5 (2010/02/04)
------------------
* fixed racing condition in cleanup code
0.6.4 (2009/12/25)
------------------
* minor fixes
* documentation cleanup
0.6.3 (2009/11/29)
------------------
* compatiblity with BFG 1.2
0.6.1 (2009/10/04)
------------------
* fixed bug in code for cleaning up the spool_directory
0.6.0 (2009/09/15)
------------------
* authentication and authorization support
0.5.2 (2009/09/05)
------------------
* adjusted to newest zopyx.convert2 version
0.6.0 (2009/09/15)
------------------
* added authentication and authorization support
0.5.2 (2009/09/05)
------------------
* adjusted to newest zopyx.convert2 version
0.5.1 (2009/08/01)
------------------
* added convertZIPandRedirect() method
* added deliver() method
* moved base.ServerCore to models.py
* delivered files must be younger than 'delivery_max_age' seconds
* cleanup code
* internal refactoring
* more tests
0.5.0 (2009/07/23)
------------------
* now requires Python 2.6
0.4.3 (2009/07/22)
------------------
* removed most of the ZCML configuration
* tests, tests, tests
0.4.2 (2009/07/19)
------------------
* switching back to zope.sendmail
* implemented asynchronous mail delivery on top of zope.sendmail
0.4.1 (2009/07/19)
------------------
* using repoze.sendmail
0.4.0 (2009/07/19)
------------------
* added convertZIPEmail() API
0.3.4 (2009/07/13)
------------------
* updated documentation
0.3.3 (2009/07/12)
------------------
* fix for missing BASE tag within HTML files
0.3.2 (2009/07/12)
------------------
* better logging
0.3.1 (2009/07/08)
------------------
* disabled check for maximum size of the request within
parse_xmlrpc_request() since 8MB is too small for us
0.3.0 (2009/07/06)
------------------
* switched to repoze.bfg
0.2.0 (2009/07/06)
------------------
* improved handling of temporary directories
0.1.2 (2009/07/05)
------------------
* improved handling of temporary directories
0.1.1 (2009/07/05)
------------------
* improved logging and error handling
0.1 (2009/07/05)
----------------
* Initial release
| zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/docs/source/HISTORY.rst | HISTORY.rst |
zopyx.smartprintng.server
=========================
``zopyx.smartprintng.server`` is a Pyramid based server implementation
and implements the server side functionality of the Produce & Publish platform.
It is know as the ``Produce & Publish Server``.
Requirements
------------
* Python 2.6, 2.7 (no Python 3 support)
Installation
------------
- create an ``virtualenv`` environment (Python 2.6) - either within your
current (empty) directory or by letting virtualenv create one for you.
(``easy_install virtualenv`` if ``virtualenv`` is not available on your system)::
virtualenv --no-site-packages .
or::
virtualenv --no-site-packages smartprintng
- install the SmartPrintNG server::
bin/easy_install zopyx.smartprintng.server
- create a ``server.ini`` configuration file (and change it according to your needs)::
[DEFAULT]
debug = true
[app:main]
use = egg:zopyx.smartprintng.server#app
reload_templates = true
debug_authorization = false
debug_notfound = false
[server:main]
use = egg:Paste#http
host = 127.0.0.1
port = 6543
- start the server (in foreground)::
bin/pserve server.ini
- or start it in background::
bin/pserve server.ini --daemon
Upgrading
---------
For upgrading an existing SmartPrintNG server you should try the following inside
your virtualenv environment::
bin/easy_install -U zopyx.smartprintng.server
bin/easy_install -U zopyx.convert2
XMLRPC API
----------
The SmartPrintNG server exposes several methods through XMLRPC::
def convertZIP(auth_token, zip_archive, converter_name):
""" 'zip_archive' is ZIP archive (encoded as base-64 byte string).
The archive must contain exactly *one* HTML file to be converted
including all related resources like stylesheets and images.
All files must be stored flat within the archive (no subfolders).
All references to externals resources like the 'src' attribute
of the IMG tag or references to the stylesheet(s) must use
relative paths. The method returns the converted output file
also as base64-encoded ZIP archive.
"""
def convertZIPEmail(auth_token, context, zip_archive, converter_name='pdf-prince',
sender=None, recipient=None, subject=None, body=None):
""" Similar to convertZIP() except that this method will send the
converted output document to a recipient by email. 'subject' and
'body' parameters *must* be utf-8 encoded.
"""
def availableConverters():
""" Returns a list of available converter names on the
SmartPrintNG backend.
"""
def authenticate(username, password):
""" Log into the server. Returns an auth_token. authenticate()
must be called before calling any of the methods above.
"""
def ping():
""" says 'pong' - or something similar """
Email configuration
-------------------
For using the email support through the ``convertZIPEmail()`` the email server must be
configured through a dedicated configuration file. An ``email.ini`` may look like this::
[mail]
hostname = smtp.gmail.com
username = some_username
password = some_password
force_tls = False
no_tls = False
You have to pass the name of the email configuration file to ``pserve`` when starting
then server::
bin/pserve server.ini mail_config=/path/to/email.ini
Source code
-----------
https://github.com/zopyx/zopyx.smartprintng.server/
Bug tracker
-----------
https://github.com/zopyx/zopyx.smartprintng.server/issues
Support
-------
Support for Produce & Publish Server is currently only available on a project basis.
Contact
-------
| ZOPYX Limited
| Hundskapfklinge 33
| D-72074 Tuebingen, Germany
| [email protected]
| www.zopyx.com
| zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/docs/source/README.rst | README.rst |
import os
import tempfile
import email.MIMEText
import email.Header
import email.MIMEBase
import email.MIMEMultipart
from email import Encoders
from ConfigParser import ConfigParser
from zope.sendmail.mailer import SMTPMailer
from zope.sendmail.delivery import QueuedMailDelivery, QueueProcessorThread
import transaction
from logger import LOG
def getMailConfiguration():
""" read the email configuration from an INI file and
return it as dict
"""
mail_config = os.environ.get('EMAIL_CONFIG')
if not mail_config:
raise RuntimeError('No email configuration found')
if not os.path.exists(mail_config):
raise RuntimeError('Configured email configuration file not available (%s)' % mail_config)
CP = ConfigParser()
CP.read('email.ini')
hostname = 'localhost'
username = None
password = None
no_tls = False
force_tls = False
maildir = tempfile.mkdtemp(prefix='zopyx.smartprintng.server')
if CP.has_option('mail', 'hostname'): hostname = CP.get('mail', 'hostname')
if CP.has_option('mail', 'username'): username = CP.get('mail', 'username')
if CP.has_option('mail', 'password'): password = CP.get('mail', 'password')
if CP.has_option('mail', 'maildir'): maildir = CP.get('mail', 'maildir')
if CP.has_option('mail', 'no_tls'): no_tls = CP.getboolean('mail', 'no_tls')
if CP.has_option('mail', 'force_tls'): force_tls = CP.getboolean('mail', 'force_tls')
# setup maildir structure
if not os.path.exists(maildir):
os.makedirs(maildir)
for subdir in ('cur', 'tmp', 'new'):
destdir = os.path.join(maildir, subdir)
if not os.path.exists(destdir):
os.makedirs(destdir)
return dict(hostname=hostname,
username=username,
password=password,
maildir=maildir,
force_tls=force_tls,
no_tls=no_tls)
def setupMailer():
""" Set up zope.sendmail delivery thread """
config = getMailConfiguration()
thread = QueueProcessorThread()
thread.setMailer(makeMailer())
thread.setQueuePath(config['maildir'])
thread.start()
return config
def makeMailer():
""" Create an SMTP mailer """
config = getMailConfiguration().copy()
del config['maildir']
return SMTPMailer(**config)
def send_email(sender, recipient, subject, body, attachments=[]):
""" Asynchronous mail delivery """
msg = email.MIMEMultipart.MIMEMultipart()
msg["From"] = sender
msg["To"] = recipient
msg["Subject"] = email.Header.Header(subject, 'UTF-8')
msg.attach(email.MIMEText.MIMEText(body.encode('UTF-8'), 'plain', 'UTF-8'))
for att in attachments:
part = email.MIMEBase.MIMEBase('application', "octet-stream")
part.set_payload(file(att, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(att))
msg.attach(part)
config = getMailConfiguration()
delivery = QueuedMailDelivery(config['maildir'])
delivery.send(sender, [recipient], msg.as_string())
transaction.commit() | zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/zopyx/smartprintng/server/mail_util.py | mail_util.py |
import os
import time
import tempfile
import shutil
import mimetypes
import xmlrpclib
import pkg_resources
from stat import ST_CTIME
from pyramid.renderers import render_to_response
from pyramid.view import static_view
from pyramid.view import view_config
from pyramid_xmlrpc import xmlrpc_view
from webob import Response
from models import Server
from logger import LOG
try:
from zopyx.smartprintng.authentication import authenticateRequest, authorizeRequest
have_authentication = True
except ImportError:
from nullauth import authenticateRequest, authorizeRequest
have_authentication = False
static_view = static_view('templates/static', use_subpath=True)
##################
# HTTP views
##################
@view_config(for_=Server, request_method='GET', permission='read')
class index(object):
""" The default view providing some system information """
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self):
converters = self.context.availableConverters()
version = pkg_resources.require('zopyx.smartprintng.server')[0].version
params = dict(context=self.context,
project='zopyx.smartprintng.server',
version=version,
converters=converters)
return render_to_response('templates/index.pt',
params,
request=self.request)
@view_config(for_=Server, request_method='GET', permission='read', name='selftest')
class selftest(object):
""" Server selftest """
def __init__(self, context, request):
self.context = context
self.request = request
def __call__(self, converter=None):
converter = self.request.params['converter']
test_file = os.path.join(os.path.dirname(__file__), 'test_data', 'test.html')
result = self.context._convert(test_file, converter)
if result['status'] == 0:
output_filename = result['output_filename']
ct, dummy = mimetypes.guess_type(output_filename)
basename, ext = os.path.splitext(output_filename)
headers = [('content-disposition','attachment; filename=selftest-%s%s' % (converter,ext)),
('content-type', ct)]
return Response(body=file(output_filename, 'rb').read(),
content_type=ct,
headerlist=headers
)
raise RuntimeError
@view_config(for_=Server, name='deliver')
def deliver(context, request):
""" Send out a generated output file """
filename = request.params['filename']
prefix = request.params.get('prefix')
dest_filename = os.path.abspath(os.path.join(context.spool_directory, filename))
# various (security) checks
if not os.path.exists(dest_filename):
return Response(status=404)
if not dest_filename.startswith(context.spool_directory):
return Response(status=404)
if time.time() - os.stat(dest_filename)[ST_CTIME] >= context.delivery_max_age:
return Response(status=404)
ct, dummy = mimetypes.guess_type(dest_filename)
filename = os.path.basename(filename)
if prefix:
filename = prefix + os.path.splitext(filename)[1]
headers = [('content-disposition','attachment; filename=%s' % filename),
('content-type', ct)]
return Response(body=file(dest_filename, 'rb').read(),
content_type=ct,
headerlist=headers
)
##################
# XMLRPC views
##################
@view_config(name='authenticate', for_=Server)
@xmlrpc_view
def authenticate(context, username, password):
if not have_authentication:
return True
try:
return authenticateRequest(username, password)
except Exception, e:
msg = 'Authentication failed (%s)' % e
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
@view_config(name='convertZIP', for_=Server)
@xmlrpc_view
def convertZIP(context, auth_token, zip_archive, converter_name='pdf-prince'):
if not authorizeRequest(auth_token):
msg = 'Authorization failed'
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
try:
return context.convertZIP(zip_archive, converter_name)
except Exception, e:
msg = 'Conversion failed (%s)' % e
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
@view_config(name='convertZIPEmail', for_=Server)
@xmlrpc_view
def convertZIPEmail(context, auth_token, zip_archive, converter_name='pdf-prince', sender=None, recipient=None, subject=None, body=None):
if not authorizeRequest(auth_token):
msg = 'Authorization failed'
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
try:
return context.convertZIPEmail(zip_archive, converter_name, sender, recipient, subject, body)
except Exception, e:
msg = 'Conversion failed (%s)' % e
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
@view_config(name='convertZIPandRedirect', for_=Server)
@xmlrpc_view
def convertZIPandRedirect(context, auth_token, zip_archive, converter_name='prince-pdf', prefix=None):
""" This view appects a ZIP archive through a POST request containing all
relevant information (similar to the XMLRPC API). However the converted
output file is not returned to the caller but delivered "directly" through
the SmartPrintNG server (through an URL redirection). The 'prefix'
parameter can be used to override the basename of filename used within the
content-disposition header.
(This class is only a base class for the related http_ and xmlrpc_
view (in order to avoid redudant code).)
"""
if not authorizeRequest(auth_token):
msg = 'Authorization failed'
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
try:
output_archivename, output_filename = context._processZIP(zip_archive, converter_name)
output_ext = os.path.splitext(output_filename)[1]
# take ident from archive name
ident = os.path.splitext(os.path.basename(output_archivename))[0]
# move output file to spool directory
dest_filename = os.path.join(context.spool_directory, '%s%s' % (ident, output_ext))
rel_output_filename = dest_filename.replace(context.spool_directory + os.sep, '')
shutil.move(output_filename, dest_filename)
host = 'localhost'
port = 6543
prefix = prefix or ''
location = 'http://%s:%s/deliver?filename=%s&prefix=%s' % (host, port, rel_output_filename, prefix)
return location
except Exception, e:
msg = 'Conversion failed (%s)' % e
LOG.error(msg, exc_info=True)
return xmlrpclib.Fault(123, msg)
@view_config(name='availableConverters', for_=Server)
@xmlrpc_view
def availableConverters(context):
return context.availableConverters()
@view_config(name='ping', for_=Server)
@xmlrpc_view
def ping(context):
return 'zopyx.smartprintng.server' | zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/zopyx/smartprintng/server/views.py | views.py |
import threading
import base64
import glob
import os
import shutil
import tempfile
from stat import ST_CTIME
import shutil
from datetime import datetime
import time
import uuid
import zipfile
from logger import LOG
import mail_util
from zopyx.convert2.convert import Converter
class Server(object):
""" SmartPrintNG Server Core Implementation """
def __init__(self):
self.num_requests = 0
self.start_time = datetime.now()
self.delivery_max_age = 1800 # deliver files only younger than xx seconds
self.cleanup_after = 3600
self.cleanup_last = time.time()
self.keep_files_for = 3600 # keep files for no longer than xx seconds
self._lock = threading.Lock()
self.temp_directory = os.path.join(tempfile.gettempdir(),
'zopyx.smartprintng.server')
if not os.path.exists(self.temp_directory):
os.makedirs(self.temp_directory)
self.spool_directory = os.path.join(tempfile.gettempdir(),
'zopyx.smartprintng.server-spool')
if not os.path.exists(self.spool_directory):
os.makedirs(self.spool_directory)
def countRequest(self):
self._lock.acquire()
self.num_requests += 1
self._lock.release()
@property
def start_time_as_str(self):
return self.start_time.strftime('%d.%m.%Y %H:%M:%S')
def _cleanup(self):
""" Remove old and outdated files from the temporary and
spool directory.
"""
if time.time() - self.cleanup_last > self.cleanup_after:
self._lock.acquire()
try:
self.__cleanup()
self.cleanup_last = time.time()
except Exception, e:
LOG.error(e, exc_info=True)
finally:
self._lock.release()
def __cleanup(self):
for dir in os.listdir(self.temp_directory):
destdir = os.path.join(self.temp_directory, dir)
age = time.time() - os.stat(destdir)[ST_CTIME]
if age > self.keep_files_for:
shutil.rmtree(destdir)
for name in os.listdir(self.spool_directory):
fullname = os.path.join(self.spool_directory, name)
age = time.time() - os.stat(fullname)[ST_CTIME]
if age > self.keep_files_for:
if os.path.exists(fullname):
shutil.rmtree(fullname)
def _inject_base_tag(self, html_filename):
""" All input HTML files contain relative urls (relative
to the path of the main HTML file (the "working dir").
So we must inject a BASE tag in order to call the external
converters properly with the full path of the html input file
since we do not want to change the process working dir (not
acceptable in a multi-threaded environment).
ATT: this should perhaps handled within zopyx.convert2
"""
html = file(html_filename).read()
pos = html.lower().find('<head>')
if pos == -1:
raise RuntimeError('HTML does not contain a HEAD tag')
html = html[:pos] + '<head><base href="%s"/>' % html_filename + html[pos+6:]
file(html_filename, 'wb').write(html)
def _convert(self, html_filename, converter_name='pdf-prince'):
""" Process a single HTML file """
self._cleanup()
return Converter(html_filename)(converter_name)
def _processZIP(self, zip_archive, converter_name):
LOG.info('Incoming request (%s, %d bytes)' % (converter_name, len(zip_archive)))
ts = time.time()
# temp directory handling
now = datetime.now().strftime('%Y%m%d%Z%H%M%S')
ident = '%s-%s' % (now, uuid.uuid4())
tempdir = os.path.join(self.temp_directory, ident)
os.makedirs(tempdir)
# store zip archive first
zip_temp = os.path.join(tempdir, 'input.zip')
file(zip_temp, 'wb').write(base64.decodestring(zip_archive))
ZF = zipfile.ZipFile(zip_temp, 'r')
for name in ZF.namelist():
destfile = os.path.join(tempdir, name)
if not os.path.exists(os.path.dirname(destfile)):
os.makedirs(os.path.dirname(destfile))
file(destfile, 'wb').write(ZF.read(name))
ZF.close()
# find HTML file
html_files = glob.glob(os.path.join(tempdir, '*.htm*'))
if not html_files:
raise IOError('Archive does not contain any html files')
if len(html_files) > 1:
raise RuntimeError('Archive contains more than one html file')
html_filename = html_files[0]
# inject BASE tag containing the full local path (required by PrinceXML)
self._inject_base_tag(html_filename)
result = self._convert(html_filename,
converter_name=converter_name)
output_filename = result['output_filename']
basename, ext = os.path.splitext(os.path.basename(output_filename))
# Generate result ZIP archive with base64-encoded result
zip_out = os.path.join(tempdir, '%s.zip' % ident)
ZF = zipfile.ZipFile(zip_out, 'w')
ZF.writestr('output%s' % ext, file(output_filename, 'rb').read())
ZF.writestr('conversion-output.txt', result['output'])
ZF.close()
LOG.info('Request end (%3.2lf seconds)' % (time.time() - ts))
return zip_out, output_filename
def convertZIP(self, zip_archive, converter_name='pdf-prince'):
""" Process html-file + images within a ZIP archive """
self.countRequest()
zip_out, output_filename = self._processZIP(zip_archive, converter_name)
encoded_result = base64.encodestring(file(zip_out, 'rb').read())
shutil.rmtree(os.path.dirname(zip_out))
return encoded_result
def convertZIPEmail(self, zip_archive, converter_name='pdf-prince', sender=None, recipient=None, subject=None, body=None):
""" Process zip archive and send conversion result as mail """
self.countRequest()
zip_out, output_filename = self._processZIP(zip_archive, converter_name)
mail_util.send_email(sender, recipient, subject, body, [output_filename])
shutil.rmtree(os.path.dirname(zip_out))
return True
def availableConverters(self):
""" Return a list of available converter names """
from zopyx.convert2.registry import availableConverters
self.countRequest()
return availableConverters()
root = Server()
def get_root(environ):
return root
if __name__ == '__main__':
s = Server()
print s.availableConverters() | zopyx.smartprintng.server | /zopyx.smartprintng.server-1.1.2.zip/zopyx.smartprintng.server-1.1.2/zopyx/smartprintng/server/models.py | models.py |
zopyx.textindexng3 - Extension modules for TextIndexNG3
=======================================================
Author
------
zopyx.textindexng3 was written by Andreas Jung for ZOPYX Ltd. & Co. KG, Tuebingen, Germany.
License
-------
- TextIndexNG 3 is published under the Zope Public License V 2.1 (see ZPL.txt)
Other license agreements can be made. Contact us for details ([email protected]).
- TextIndexNG 3 ships with a copy of the Snowball code (snowball.tartarus.org)
for implementing stemming. This code is (C) 2001, Dr. Martin Porter and
published under the BSD license.
- TextIndexNG3 ships with the python-levenshtein extension written by
David Necas und published under the GNU Public License (GPL).
Building
--------
Building the egg on Windows (basically a reminder for myself)::
Basically a reminder for myself:
- Install Cygwin
- Install the Windows Python 2.4
- under Cygwin::
python.exe setup.py build -c mingw32 bdist_egg upload
Contact
-------
| ZOPYX Ltd. & Co. KG
| c/o Andreas Jung,
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| E-mail: info at zopyx dot com
| Web: http://www.zopyx.com
| zopyx.textindexng3 | /zopyx.textindexng3-4.0.1.tar.gz/zopyx.textindexng3-4.0.1/README.txt | README.txt |
from Levenshtein import *
from warnings import warn
class StringMatcher:
"""A SequenceMatcher-like class built on the top of Levenshtein"""
def _reset_cache(self):
self._ratio = self._distance = None
self._opcodes = self._editops = self._matching_blocks = None
def __init__(self, isjunk=None, seq1='', seq2=''):
if isjunk:
warn("isjunk not NOT implemented, it will be ignored")
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seqs(self, seq1, seq2):
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seq1(self, seq1):
self._str1 = seq1
self._reset_cache()
def set_seq2(self, seq2):
self._str2 = seq2
self._reset_cache()
def get_opcodes(self):
if not self._opcodes:
if self._editops:
self._opcodes = opcodes(self._editops, self._str1, self._str2)
else:
self._opcodes = opcodes(self._str1, self._str2)
return self._opcodes
def get_editops(self):
if not self._editops:
if self._opcodes:
self._editops = editops(self._opcodes, self._str1, self._str2)
else:
self._editops = editops(self._str1, self._str2)
return self._editops
def get_matching_blocks(self):
if not self._matching_blocks:
self._matching_blocks = matching_blocks(self.get_opcodes(),
self._str1, self._str2)
return self._matching_blocks
def ratio(self):
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def quick_ratio(self):
# This is usually quick enough :o)
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def real_quick_ratio(self):
len1, len2 = len(self._str1), len(self._str2)
return 2.0 * min(len1, len2) / (len1 + len2)
def distance(self):
if not self._distance:
self._distance = distance(self._str1, self._str2)
return self._distance | zopyx.textindexng3 | /zopyx.textindexng3-4.0.1.tar.gz/zopyx.textindexng3-4.0.1/zopyx/txng3/python-Levenshtein-0.10/StringMatcher.py | StringMatcher.py |
zopyx.tinymceplugins.imgmap
===========================
This module allows you to edit image maps inside the TinyMCE editor
in Plone 4.0 or higher.
The code is based on the ``imgmap`` plugin for TinyMCE.
See http://code.google.com/p/imgmap for details.
Installation
============
As with every Plone add-on: add ``zopyx.tinymceplugins.imgmap`` to the ``eggs``
option of your buildout configuration and re-run buildout.
Configuration
=============
Right now you have to ``map`` and ``area`` manually to the list of allowed
HTML tags inside the Plone control panel for HTML filtering.
Usage
=====
Inside TinyMCE you have to select an image inside the editor window and choose
the imagemap icon from the toolbar of TinyMCE.
License
=======
This module is published under the GNU Public License V 2
Author
======
| ZOPYX Limited
| c/o Andreas Jung
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| www.zopyx.com
| [email protected]
Contributors:
* Harald Friessnegger (fRiSi)
* Jean Michel Francois (toutpt)
| zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/README.txt | README.txt |
README for the 'browser/images/' directory
==========================================
This folder is a Zope 3 Resource Directory acting as a repository for images.
Its declaration is located in 'browser/configure.zcml':
<!-- Resource directory for images -->
<browser:resourceDirectory
name="zopyx.imgmap.images"
directory="images"
layer=".interfaces.IThemeSpecific"
/>
An image placed in this directory (e.g. 'logo.png') can be accessed from
this relative URL:
"++resource++zopyx.imgmap.images/logo.png"
Note that it might be better to register each of these resources separately if
you want them to be overridable from zcml directives.
The only way to override a resource in a resource directory is to override the
entire directory (all elements have to be copied over).
A Zope 3 browser resource declared like this in 'browser/configure.zcml':
<browser:resource
name="logo.png"
file="images/logo.png"
layer=".interfaces.IThemeSpecific"
/>
can be accessed from this relative URL:
"++resource++logo.png"
Notes
-----
* Whatever the way they are declared (in bulk inside a resource directory or
as separate resources), images registered as Zope 3 browser resources don't
have all the attributes that Zope 2 image objects have (i.e. the 'title'
property and the 'tag()' and 'get_size()' methods).
This means that if you want the html tag of your image to be auto-generated
(this is the case by default for the portal logo), you should store it in a
directory that is located in the 'skins/' folder of your package, registered
as a File System Directory View in the 'portal_skins' tool, and added to the
layers of your skin.
* Customizing/overriding images that are originally accessed from the
'portal_skins' tool (e.g. Plone default logo and icons) can be done inside
that tool only. There is no known way to do it with Zope 3 browser
resources.
Vice versa, there is no known (easy) way to override a Zope 3 browser
resource from a skin layer in 'portal_skins'.
| zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/browser/images/README.txt | README.txt |
README for the 'browser/stylesheets/' directory
===============================================
This folder is a Zope 3 Resource Directory acting as a repository for
stylesheets.
Its declaration is located in 'browser/configure.zcml':
<!-- Resource directory for stylesheets -->
<browser:resourceDirectory
name="zopyx.imgmap.stylesheets"
directory="stylesheets"
layer=".interfaces.IThemeSpecific"
/>
A stylesheet placed in this directory (e.g. 'main.css') can be accessed from
this relative URL:
"++resource++zopyx.imgmap.stylesheets/main.css"
Note that it might be better to register each of these resources separately if
you want them to be overridable from zcml directives.
The only way to override a resource in a resource directory is to override the
entire directory (all elements have to be copied over).
A Zope 3 browser resource declared like this in 'browser/configure.zcml':
<browser:resource
name="main.css"
file="stylesheets/main.css"
layer=".interfaces.IThemeSpecific"
/>
can be accessed from this relative URL:
"++resource++main.css"
Notes
-----
* Stylesheets registered as Zope 3 resources might be flagged as not found in
the 'portal_css' tool if the layer they are registered for doesn't match the
default skin set in 'portal_skins'.
This can be confusing but it must be considered as a minor bug in the CSS
registry instead of a lack in the way Zope 3 resources are handled in
Zope 2.
* There might be a way to interpret DTML from a Zope 3 resource view.
Although, if you need to use DTML for setting values in a stylesheet (the
same way as in default Plone stylesheets where values are read from
'base_properties'), it is much easier to store it in a directory that is
located in the 'skins/' folder of your package, registered as a File System
Directory View in the 'portal_skins' tool, and added to the layers of your
skin.
* Customizing/overriding stylesheets that are originally accessed from the
'portal_skins' tool (e.g. Plone default stylesheets) can be done inside that
tool only. There is no known way to do it with Zope 3 browser resources.
Vice versa, there is no known way to override a Zope 3 browser resource from
a skin layer in 'portal_skins'.
| zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/browser/stylesheets/README.txt | README.txt |
This directory will be the home for internationalizations for your theme
package. For more information on internationalization please consult the
following sources:
http://plone.org/documentation/kb/product-skin-localization
http://plone.org/documentation/kb/i18n-for-developers
http://www.mattdorn.com/content/plone-i18n-a-brief-tutorial/
http://grok.zope.org/documentation/how-to/how-to-internationalize-your-application
http://maurits.vanrees.org/weblog/archive/2007/09/i18n-locales-and-plone-3.0
http://n2.nabble.com/Recipe-for-overriding-translations-td3045492ef221724.html
http://dev.plone.org/plone/wiki/TranslationGuidelines
| zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/locales/README.txt | README.txt |
(function() {
tinymce.PluginManager.requireLangPack('imgmap');
tinymce.create('tinymce.plugins.imgmapPlugin', {
init : function(ed, url) {
// Register commands
ed.addCommand('mceimgmapPopup', function() {
var e = ed.selection.getNode();
// Internal image object like a flash placeholder
if (ed.dom.getAttrib(e, 'class').indexOf('mceItem') != -1)
return;
ed.windowManager.open({
file : url + '/popup.html',
width : 800,
height : 650,
inline : 1
}, {
plugin_url : url
});
});
// Register buttons
//tinyMCE.getButtonHTML(cn, 'lang_imgmap_desc', '{$pluginurl}/images/tinymce_button.gif', 'mceimgmapPopup');
ed.addButton('imgmap', {
title : 'imgmap.desc',
cmd : 'mceimgmapPopup',
image : url + '/images/tinymce_button.gif'
});
// Add a node change handler, selects the button in the UI when a image is selected
ed.onNodeChange.add(function(ed, cm, node) {
if (node == null)
return;
//check parents
//if image parent already has imagemap, toggle selected state, if simple image, use normal state
do {
//console.log(node.nodeName);
if (node.nodeName == "IMG" && ed.dom.getAttrib(node, 'class').indexOf('mceItem') == -1) {
if (ed.dom.getAttrib(node, 'usemap') != '') {
cm.setDisabled('imgmap', false);
cm.setActive('imgmap', true);
}
else {
cm.setDisabled('imgmap', false);
cm.setActive('imgmap', false);
}
return true;
}
}
while ((node = node.parentNode));
//button disabled by default
cm.setDisabled('imgmap', true);
cm.setActive('imgmap', false);
return true;
});
},
getInfo : function() {
return {
longname : 'Image Map Editor',
author : 'Adam Maschek, John Ericksen',
authorurl : 'http://imgmap.googlecode.com',
infourl : 'http://imgmap.googlecode.com',
version : "2.0"
};
}
});
// Register plugin
tinymce.PluginManager.add('imgmap', tinymce.plugins.imgmapPlugin);
})();
var TinyMCE_imgmapPlugin = {
execCommand : function(editor_id, element, command, user_interface, value) {
switch (command) {
case "mceimgmapPopup":
var template = new Array();
template['file'] = '../../plugins/imgmap/popup.html';
template['width'] = 700;
template['height'] = 670;
var inst = tinyMCE.getInstanceById(editor_id);
var elm = inst.getFocusElement();
if (elm != null && tinyMCE.getAttrib(elm, 'class').indexOf('mceItem') != -1)
return true;
tinyMCE.openWindow(template, {editor_id : editor_id, scrollbars : "yes", resizable: "yes"});
return true;
}
return false;
},
handleNodeChange : function(editor_id, node, undo_index, undo_levels, visual_aid, any_selection) {
if (node == null)
return;
//check parents
//if image parent already has imagemap, toggle selected state, if simple image, use normal state
do {
//console.log(node.nodeName);
if (node.nodeName == "IMG" && tinyMCE.getAttrib(node, 'class').indexOf('mceItem') == -1) {
if (tinyMCE.getAttrib(node, 'usemap') != '') {
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonSelected');
}
else {
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonNormal');
}
return true;
}
}
while ((node = node.parentNode));
//button disabled by default
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonDisabled');
return true;
}
};
//tinyMCE.addPlugin("imgmap", TinyMCE_imgmapPlugin);
//tinymce.PluginManager.add("imgmap", tinymce.plugins.imgmapPlugin); | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/editor_plugin.js | editor_plugin.js |
(function() {
tinymce.PluginManager.requireLangPack('imgmap');
tinymce.create('tinymce.plugins.imgmapPlugin', {
init : function(ed, url) {
// Register commands
ed.addCommand('mceimgmapPopup', function() {
var e = ed.selection.getNode();
// Internal image object like a flash placeholder
if (ed.dom.getAttrib(e, 'class').indexOf('mceItem') != -1)
return;
ed.windowManager.open({
file : url + '/popup.html',
width : 700,
height : 560,
inline : 1
}, {
plugin_url : url
});
});
// Register buttons
//tinyMCE.getButtonHTML(cn, 'lang_imgmap_desc', '{$pluginurl}/images/tinymce_button.gif', 'mceimgmapPopup');
ed.addButton('imgmap', {
title : 'imgmap.desc',
cmd : 'mceimgmapPopup',
image : url + '/images/tinymce_button.gif'
});
// Add a node change handler, selects the button in the UI when a image is selected
ed.onNodeChange.add(function(ed, cm, node) {
if (node == null)
return;
//check parents
//if image parent already has imagemap, toggle selected state, if simple image, use normal state
do {
//console.log(node.nodeName);
if (node.nodeName == "IMG" && ed.dom.getAttrib(node, 'class').indexOf('mceItem') == -1) {
if (ed.dom.getAttrib(node, 'usemap') != '') {
cm.setDisabled('imgmap', false);
cm.setActive('imgmap', true);
}
else {
cm.setDisabled('imgmap', false);
cm.setActive('imgmap', false);
}
return true;
}
}
while ((node = node.parentNode));
//button disabled by default
cm.setDisabled('imgmap', true);
cm.setActive('imgmap', false);
return true;
});
},
getInfo : function() {
return {
longname : 'Image Map Editor',
author : 'Adam Maschek, John Ericksen',
authorurl : 'http://imgmap.googlecode.com',
infourl : 'http://imgmap.googlecode.com',
version : "2.0"
};
}
});
// Register plugin
tinymce.PluginManager.add('imgmap', tinymce.plugins.imgmapPlugin);
})();
var TinyMCE_imgmapPlugin = {
execCommand : function(editor_id, element, command, user_interface, value) {
switch (command) {
case "mceimgmapPopup":
var template = new Array();
template['file'] = '../../plugins/imgmap/popup.html';
template['width'] = 700;
template['height'] = 670;
var inst = tinyMCE.getInstanceById(editor_id);
var elm = inst.getFocusElement();
if (elm != null && tinyMCE.getAttrib(elm, 'class').indexOf('mceItem') != -1)
return true;
tinyMCE.openWindow(template, {editor_id : editor_id, scrollbars : "yes", resizable: "yes"});
return true;
}
return false;
},
handleNodeChange : function(editor_id, node, undo_index, undo_levels, visual_aid, any_selection) {
if (node == null)
return;
//check parents
//if image parent already has imagemap, toggle selected state, if simple image, use normal state
do {
//console.log(node.nodeName);
if (node.nodeName == "IMG" && tinyMCE.getAttrib(node, 'class').indexOf('mceItem') == -1) {
if (tinyMCE.getAttrib(node, 'usemap') != '') {
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonSelected');
}
else {
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonNormal');
}
return true;
}
}
while ((node = node.parentNode));
//button disabled by default
tinyMCE.switchClass(editor_id + '_imgmap', 'mceButtonDisabled');
return true;
}
};
//tinyMCE.addPlugin("imgmap", TinyMCE_imgmapPlugin);
//tinymce.PluginManager.add("imgmap", tinymce.plugins.imgmapPlugin); | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/editor_plugin_src.js | editor_plugin_src.js |
var myimgmap;
var editor = null;
var img_obj = null;
var map_obj = null;
//array of form elements
var props = [];
function init() {
tinyMCEPopup.resizeToInnerSize();
//tinyMCE.setWindowArg('mce_windowresize', true);//i guess we dont need this
editor = tinyMCEPopup.editor;
img_obj = editor.selection.getNode();
//late init
myimgmap = new imgmap({
mode : "editor",
custom_callbacks : {
'onStatusMessage' : function(str) {gui_statusMessage(str);},//to display status messages on gui
'onHtmlChanged' : function(str) {gui_htmlChanged(str);},//to display updated html on gui
//'onModeChanged' : function(mode) {gui_modeChanged(mode);},//to switch normal and preview modes on gui
'onAddArea' : function(id) {gui_addArea(id);},//to add new form element on gui
'onRemoveArea' : function(id) {gui_removeArea(id);},//to remove form elements from gui
'onAreaChanged' : function(obj) {gui_areaChanged(obj);},
'onSelectArea' : function(obj) {gui_selectArea(obj);}//to select form element when an area is clicked
},
pic_container: document.getElementById('pic_container'),
bounding_box : false
});
//we need this to load languages
myimgmap.onLoad();
myimgmap.loadImage(img_obj);
//console.log(myimgmap);
myimgmap.addEvent(document.getElementById('html_container'), 'blur', gui_htmlBlur);
myimgmap.addEvent(document.getElementById('html_container'), 'focus', gui_htmlFocus);
//check if the image has a valid map already assigned
var mapname = img_obj.getAttribute('usemap', 2) || img_obj.usemap ;
//console.log(mapname);
if (mapname != null && mapname != '') {
mapname = mapname.substr(1);
var maps = editor.contentWindow.document.getElementsByTagName('MAP');
//console.log(maps);
for (var i=0; i < maps.length; i++) {
// IE doesn't return name?
if (maps[i].name == mapname || maps[i].id == mapname) {
map_obj = maps[i];
myimgmap.setMapHTML(map_obj);
break;
}
}
}
var _parent = editor.contentWindow.parent.location.href;
var _anchors = document.getElementById('anchors_container');
_anchors.innerHTML = gui_loadAnchorsHtml(_parent + '/../@@getAnchors');
// alert(_parent + '/../@@anchor_view');
}
function gui_loadAnchorsHtml(url) {
// load anchors from view @@getAnchors view
var request = new XMLHttpRequest();
request.open('get', url, false);
request.send(null);
if (request.status == 200)
return request.responseText;
else
return 'An error occurred';
}
function updateAction() {
if (img_obj != null && img_obj.nodeName == "IMG") {
tinyMCEPopup.execCommand("mceBeginUndoLevel");
if (typeof map_obj == 'undefined' || map_obj == null) {
map_obj = editor.contentWindow.document.createElement('MAP');
img_obj.parentNode.appendChild(map_obj);
}
//map_obj.innerHTML = myimgmap.getMapInnerHTML();
editor.dom.setHTML(map_obj, myimgmap.getMapInnerHTML());
map_obj.name = myimgmap.getMapName();
map_obj.id = myimgmap.getMapId();
img_obj.setAttribute('usemap', "#" + myimgmap.getMapName(), 0);
//img_obj.setAttribute('border', '0');
tinyMCEPopup.execCommand("mceEndUndoLevel");
}
tinyMCEPopup.close();
}
function cancelAction() {
tinyMCEPopup.close();
}
//remove the map object and unset the usemap attribute
function removeAction() {
tinyMCEPopup.execCommand("mceBeginUndoLevel");
if (img_obj != null && img_obj.nodeName == "IMG") {
img_obj.removeAttribute('usemap', 0);
}
if (typeof map_obj != 'undefined' && map_obj != null) {
map_obj.parentNode.removeChild(map_obj);
}
tinyMCEPopup.execCommand("mceEndUndoLevel");
tinyMCEPopup.close();
}
/** FUNCTION SECTION (code taken from default_interface) *****************************/
/**
* Handles mouseover on props row.
*/
function gui_row_mouseover(e) {
if (myimgmap.is_drawing) {return;}//exit if in drawing state
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
var obj = (myimgmap.isMSIE) ? window.event.srcElement : e.currentTarget;
if (typeof obj.aid == 'undefined') {obj = obj.parentNode;}
//console.log(obj.aid);
myimgmap.highlightArea(obj.aid);
}
/**
* Handles mouseout on props row.
*/
function gui_row_mouseout(e) {
if (myimgmap.is_drawing) {return;}//exit if in drawing state
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
var obj = (myimgmap.isMSIE) ? window.event.srcElement : e.currentTarget;
if (typeof obj.aid == 'undefined') {obj = obj.parentNode;}
myimgmap.blurArea(obj.aid);
}
/**
* Handles click on props row.
*/
function gui_row_click(e) {
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
var obj = (myimgmap.isMSIE) ? window.event.srcElement : e.currentTarget;
//var multiple = (e.originalTarget.name == 'img_active');
//myimgmap.log(e.originalTarget);
if (typeof obj.aid == 'undefined') {obj = obj.parentNode;}
//gui_row_select(obj.aid, false, multiple);
gui_row_select(obj.aid, false, false);
myimgmap.currentid = obj.aid;
}
/**
* Handles click on a property row.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-06 16:55:29
*/
function gui_row_select(id, setfocus, multiple) {
if (myimgmap.is_drawing) {return;}//exit if in drawing state
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
if (!document.getElementById('img_active_'+id)) {return;}
//if (!multiple)
gui_cb_unselect_all();
document.getElementById('img_active_'+id).checked = 1;
if (setfocus) {
document.getElementById('img_active_'+id).focus();
}
//remove all background styles
for (var i = 0; i < props.length; i++) {
if (props[i]) {
props[i].style.background = '';
}
}
//put highlight on actual props row
props[id].style.background = '#e7e7e7';
};
/**
* Handles delete keypress when focus is on the leading checkbox/radio.
* @author adam
*/
function gui_cb_keydown(e) {
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
var key = (myimgmap.isMSIE) ? event.keyCode : e.keyCode;
//alert(key);
if (key == 46) {
//delete pressed
myimgmap.removeArea(myimgmap.currentid);
}
};
/**
* Unchecks all checboxes/radios.
*/
function gui_cb_unselect_all() {
for (var i = 0; i < props.length; i++) {
if (props[i]) {
document.getElementById('img_active_'+i).checked = false;
}
}
}
/**
* Handles arrow keys on img_coords input field.
* Changes the coordinate values by +/- 1 and updates the corresponding canvas area.
* @author adam
* @date 25-09-2007 17:12:43
*/
function gui_coords_keydown(e) {
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
var key = (myimgmap.isMSIE || myimgmap.isOpera) ? event.keyCode : e.keyCode;
var obj = (myimgmap.isMSIE || myimgmap.isOpera) ? window.event.srcElement : e.originalTarget;
//obj is the input field
//this.log(key);
//this.log(obj);
if (key == 40 || key == 38) {
//down or up pressed
//get the coords
var coords = obj.value.split(',');
var s = getSelectionStart(obj);//helper function
var j = 0;
for (var i=0; i<coords.length; i++) {
j+=coords[i].length;
if (j > s) {
//this is the coord we want
if (key == 40 && coords[i] > 0) {coords[i]--;}
if (key == 38) {coords[i]++;}
break;
}
//jump one more because of comma
j++;
}
obj.value = coords.join(',');
if (obj.value != myimgmap.areas[obj.parentNode.aid].lastInput) {
myimgmap._recalculate(obj.parentNode.aid, obj.value);//contains repaint
}
//set cursor back to its original position
setSelectionRange(obj, s);
return true;
}
};
/**
* Gets the position of the cursor in the input box.
* @author Diego Perlini
* @url http://javascript.nwbox.com/cursor_position/
*/
function getSelectionStart(obj) {
if (obj.createTextRange) {
var r = document.selection.createRange().duplicate();
r.moveEnd('character', obj.value.length);
if (r.text === '') {return obj.value.length;}
return obj.value.lastIndexOf(r.text);
}
else {
return obj.selectionStart;
}
}
/**
* Sets the position of the cursor in the input box.
* @link http://www.codingforums.com/archive/index.php/t-90176.html
*/
function setSelectionRange(obj, start, end) {
if (typeof end == "undefined") {end = start;}
if (obj.setSelectionRange) {
obj.focus(); // to make behaviour consistent with IE
obj.setSelectionRange(start, end);
}
else if (obj.createTextRange) {
var range = obj.createTextRange();
range.collapse(true);
range.moveEnd('character', end);
range.moveStart('character', start);
range.select();
}
}
/**
* Called when one of the properties change, and the recalculate function
* must be called.
* @date 2006.10.24. 22:42:02
* @author Adam Maschek (adam.maschek(at)gmail.com)
*/
function gui_input_change(e) {
if (myimgmap.viewmode === 1) {return;}//exit if preview mode
if (myimgmap.is_drawing) {return;}//exit if drawing
//console.log('blur');
var obj = (myimgmap.isMSIE) ? window.event.srcElement : e.currentTarget;
//console.log(obj);
var id = obj.parentNode.aid;
//console.log(this.areas[id]);
if (obj.name == 'img_href') {myimgmap.areas[id].ahref = obj.value;}
else if (obj.name == 'img_alt') {myimgmap.areas[id].aalt = obj.value;}
else if (obj.name == 'img_title') {myimgmap.areas[id].atitle = obj.value;}
else if (obj.name == 'img_target') {myimgmap.areas[id].atarget = obj.value;}
else if (obj.name == 'img_shape') {
if (myimgmap.areas[id].shape != obj.value && myimgmap.areas[id].shape != 'undefined') {
//shape changed, adjust coords intelligently inside _normCoords
var coords = '';
if (props[id]) {
coords = props[id].getElementsByTagName('input')[2].value;
}
else {
coords = myimgmap.areas[id].lastInput || '' ;
}
coords = myimgmap._normCoords(coords, obj.value, 'from'+myimgmap.areas[id].shape);
if (props[id]) {
props[id].getElementsByTagName('input')[2].value = coords;
}
myimgmap.areas[id].shape = obj.value;
myimgmap._recalculate(id, coords);
myimgmap.areas[id].lastInput = coords;
}
else if (myimgmap.areas[id].shape == 'undefined') {
myimgmap.nextShape = obj.value;
}
}
gui_recalculateHTML(id);
};
function gui_recalculateHTML(id) {
if (myimgmap.areas[id] && myimgmap.areas[id].shape != 'undefined') {
myimgmap._recalculate(id, props[id].getElementsByTagName('input')[2].value);
myimgmap.fireEvent('onHtmlChanged', myimgmap.getMapHTML());//temp ## shouldnt be here
}
}
/**
* Called from imgmap when a new area is added.
*/
function gui_addArea(id) {
//var id = props.length;
//id = 1;
props[id] = document.createElement('DIV');
document.getElementById('form_container').appendChild(props[id]);
props[id].id = 'img_area_' + id;
props[id].aid = id;
props[id].className = 'img_area';
//hook ROW event handlers
myimgmap.addEvent(props[id], 'mouseover', gui_row_mouseover);
myimgmap.addEvent(props[id], 'mouseout', gui_row_mouseout);
myimgmap.addEvent(props[id], 'click', gui_row_click);
var temp = '<input type="text" name="img_id" class="img_id" value="' + id + '" readonly="1"/>';
//temp+= '<input type="checkbox" name="img_active" class="img_active" id="img_active_'+id+'" value="'+id+'">';
//could be checkbox in the future
temp+= '<input type="radio" name="img_active" class="img_active" id="img_active_'+id+'" value="'+id+'">';
temp+= '<select name="img_shape" class="img_shape">';
temp+= '<option value="rect" >rectangle</option>';
temp+= '<option value="circle" >circle</option>';
temp+= '<option value="poly" >polygon</option>';
temp+= '</select>';
temp+= ' Coords: <input type="text" name="img_coords" class="img_coords" value="">';
temp+= ' Href: <input type="text" id="img_href_' + id + '" name="img_href" class="img_href" value="" style="width: 150px">';
//temp+= ' <a href="javascript:alert(\'Anchors\');">[A]</a> ';
temp+= '<img src="images/add.gif" onclick="gui_anchorsShow()" alt="Show anchors" title="Show anchors"/>';
temp+= ' Alt: <input type="text" name="img_alt" class="img_alt" value="">';
temp+= ' Target: <select name="img_target" class="img_target">';
temp+= '<option value="" ><not set></option>';
temp+= '<option value="_self" >this window</option>';
temp+= '<option value="_blank" >new window</option>';
temp+= '<option value="_top" >top window</option>';
temp+= '</select>';
props[id].innerHTML = temp;
//hook more event handlers to individual inputs
myimgmap.addEvent(props[id].getElementsByTagName('input')[1], 'keydown', gui_cb_keydown);
myimgmap.addEvent(props[id].getElementsByTagName('input')[2], 'keydown', gui_coords_keydown);
myimgmap.addEvent(props[id].getElementsByTagName('input')[2], 'change', gui_input_change);
myimgmap.addEvent(props[id].getElementsByTagName('input')[3], 'change', gui_input_change);
myimgmap.addEvent(props[id].getElementsByTagName('input')[4], 'change', gui_input_change);
myimgmap.addEvent(props[id].getElementsByTagName('select')[0], 'change', gui_input_change);
myimgmap.addEvent(props[id].getElementsByTagName('select')[1], 'change', gui_input_change);
if (myimgmap.isSafari) {
//need these for safari
myimgmap.addEvent(props[id].getElementsByTagName('select')[0], 'change', gui_row_click);
myimgmap.addEvent(props[id].getElementsByTagName('select')[1], 'change', gui_row_click);
}
//set shape as nextshape if set
if (myimgmap.nextShape) {props[id].getElementsByTagName('select')[0].value = myimgmap.nextShape;}
//alert(this.props[id].parentNode.innerHTML);
gui_row_select(id, true);
}
/**
* Called from imgmap when an area was removed.
*/
function gui_removeArea(id) {
if (props[id]) {
//shall we leave the last one?
var pprops = props[id].parentNode;
pprops.removeChild(props[id]);
var lastid = pprops.lastChild.aid;
props[id] = null;
try {
gui_row_select(lastid, true);
myimgmap.currentid = lastid;
}
catch (err) {
//alert('noparent');
}
}
}
/**
* Called from imgmap when mode changed to a given value (preview or normal)
*/
function gui_modeChanged(mode) {
var nodes, i;
if (mode == 1) {
//preview mode
if (document.getElementById('html_container')) {
document.getElementById('html_container').disabled = true;
}
//disable form elements (inputs and selects)
nodes = document.getElementById('form_container').getElementsByTagName("input");
for (i=0; i<nodes.length; i++) {
nodes[i].disabled = true;
}
nodes = document.getElementById('form_container').getElementsByTagName("select");
for (i=0; i<nodes.length; i++) {
nodes[i].disabled = true;
}
document.getElementById('i_preview').src = 'example1_files/edit.gif';
}
else {
//normal mode
if (document.getElementById('html_container')) {
document.getElementById('html_container').disabled = false;
}
//enable form elements (inputs and selects)
nodes = document.getElementById('form_container').getElementsByTagName("input");
for (i=0; i<nodes.length; i++) {
nodes[i].disabled = false;
}
nodes = document.getElementById('form_container').getElementsByTagName("select");
for (i=0; i<nodes.length; i++) {
nodes[i].disabled = false;
}
document.getElementById('i_preview').src = 'example1_files/zoom.gif';
}
}
/**
* Called from imgmap with the new html code when changed.
*/
function gui_htmlChanged(str) {
if (document.getElementById('html_container')) {
document.getElementById('html_container').value = str;
}
}
/**
* Called from imgmap with new status string.
*/
function gui_statusMessage(str) {
if (document.getElementById('status_container')) {
document.getElementById('status_container').innerHTML = str;
}
window.defaultStatus = str;//for IE
}
function gui_areaChanged(area) {
var id = area.aid;
if (props[id]) {
if (area.shape) {props[id].getElementsByTagName('select')[0].value = area.shape;}
if (area.lastInput) {props[id].getElementsByTagName('input')[2].value = area.lastInput;}
if (area.ahref) {props[id].getElementsByTagName('input')[3].value = area.ahref;}
if (area.aalt) {props[id].getElementsByTagName('input')[4].value = area.aalt;}
if (area.atarget) {props[id].getElementsByTagName('select')[1].value = area.atarget;}
}
}
/**
* Called when the grand HTML code loses focus, and the changes must be reflected.
* @date 2006.10.24. 22:51:20
* @author Adam Maschek (adam.maschek(at)gmail.com)
*/
function gui_htmlBlur() {
var elem = document.getElementById('html_container');
var oldvalue = elem.getAttribute('oldvalue');
if (oldvalue != elem.value) {
//dirty
myimgmap.setMapHTML(elem.value);
}
};
/**
* Called when the optional html container gets focus.
* We need to memorize its old value in order to be able to
* detect changes in the code that needs to be reflected.
* @date 20-02-2007 17:51:16
* @author Adam Maschek (adam.maschek(at)gmail.com)
*/
function gui_htmlFocus() {
var elem = document.getElementById('html_container');
elem.setAttribute('oldvalue', elem.value);
elem.select();
};
function gui_htmlShow() {
toggleFieldset(document.getElementById('fieldset_html'), 1);
document.getElementById('html_container').focus();
}
function gui_anchorsShow() {
toggleFieldset(document.getElementById('fieldset_anchors'), 1);
document.getElementById('anchors_container').focus();
}
function gui_anchorSet(str) {
var id = myimgmap.currentid;
var img_href_id = document.getElementById('img_href_' + id);
img_href_id.value = str;
toggleFieldset(document.getElementById('fieldset_anchors'), 0);
// img_href_id.onchange();
// sollte eigentlich von gui_input_change erledigt werden :(, aber onchange funktioniert nicht
myimgmap.areas[img_href_id.parentNode.aid].ahref = img_href_id.value;
gui_recalculateHTML(id);
}
/**
* Change the labeling mode directly in imgmap config then repaint all areas.
*/
function changelabeling(obj) {
myimgmap.config.label = obj.value;
myimgmap._repaintAll();
}
/**
* Change the bounding box mode straight in imgmap config then relax all areas.
* (Relax just repaints the borders and opacity.)
*/
function toggleBoundingBox(obj) {
//console.log(obj.checked);
myimgmap.config.bounding_box = obj.checked;
myimgmap.relaxAllAreas();
}
/**
* Toggles fieldset visibility by changing the className.
* External css needed with the appropriate classnames.
* @date 2006.10.24. 22:13:34
* @author Adam Maschek ([email protected])
*/
function toggleFieldset(fieldset, on) {
if (fieldset) {
if (fieldset.className == 'fieldset_off' || on == 1) {
fieldset.className = '';
}
else {
fieldset.className = 'fieldset_off';
}
}
}
function gui_selectArea(obj) {
gui_row_select(obj.aid, true, false);
}
function gui_zoom() {
var scale = document.getElementById('dd_zoom').value;
var pic = document.getElementById('pic_container').getElementsByTagName('img')[0];
if (typeof pic == 'undefined') {return false;}
if (typeof pic.oldwidth == 'undefined' || !pic.oldwidth) {
pic.oldwidth = pic.width;
}
if (typeof pic.oldheight == 'undefined' || !pic.oldheight) {
pic.oldheight = pic.height;
}
pic.width = pic.oldwidth * scale;
pic.height = pic.oldheight * scale;
myimgmap.scaleAllAreas(scale);
} | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/jscripts/functions.js | functions.js |
* @fileoverview
* Online Image Map Editor - main script file.
* This is the main script file of the Online Image Map Editor.
*
* TODO:
* -scriptload race condition fix
* -destroy/cleanup function ?
* -testing highlighter
* -cursor area_mousemove in opera not refreshing quite well - bug reported
* -get rid of memo array
* -highlight which control point is edited in html or form mode
* -more comments, especially on config vars
* -make function names more logical
* - dumpconfig
* -prepare for bad input /poly not properly closed?
* -prepare for % values in coords
* -prepare for default shape http://www.w3.org/TR/html4/struct/objects.html#edef-AREA
*
* @date 26-02-2007 2:24:50
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @copyright
* @version 2.2
*
*/
/*jslint browser: true, newcap: false, white: false, onevar: false, plusplus: false, eqeqeq: false, nomen: false */
/*global imgmapStrings:true, window:false, G_vmlCanvasManager:false, air:false, imgmap_spawnObjects:true */
/**
* @author Adam Maschek
* @constructor
* @param config The config object.
*/
function imgmap(config) {
/** Version string of imgmap */
this.version = "2.2";
/** Build date of imgmap */
this.buildDate = "2009/07/26 16:29";
/** Sequential build number of imgmap */
this.buildNumber = "108";
/** Config object of the imgmap instance */
this.config = {};
/** Status flag to indicate current drawing mode */
this.is_drawing = 0;
/** Array to hold language strings */
this.strings = [];
/** Helper array for some drawing operations */
this.memory = [];
/** Array to hold reference to all areas (canvases) */
this.areas = [];
/** Array to hold last log entries */
this.logStore = [];
/** Associative array to hold bound event handlers */
this.eventHandlers = {};
this.currentid = 0;
this.draggedId = null;
this.selectedId = null;
this.nextShape = 'rect';
/** possible values: 0 - edit, 1 - preview */
this.viewmode = 0;
/** array of dynamically loaded javascripts */
this.loadedScripts = [];
this.isLoaded = false;
this.cntReloads = 0;
/** holds the name of the actively edited map, use getMapName to read it */
this.mapname = '';
/** holds the id of the actively edited map, use getMapIdto read it */
this.mapid = '';
/** watermark to attach to output */
this.waterMark = '<!-- Created by Online Image Map Editor (http://www.maschek.hu/imagemap/index) -->';
/** global scale of areas (1-normal, 2-doubled, 0.5-half, etc.) */
this.globalscale = 1;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_DRAW = 1;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_MOVE = 11;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_RESIZE_TOP = 12;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_RESIZE_RIGHT = 13;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_RESIZE_BOTTOM = 14;
/** is_drawing draw mode constant */
this.DM_RECTANGLE_RESIZE_LEFT = 15;
/** is_drawing draw mode constant */
this.DM_SQUARE_DRAW = 2;
/** is_drawing draw mode constant */
this.DM_SQUARE_MOVE = 21;
/** is_drawing draw mode constant */
this.DM_SQUARE_RESIZE_TOP = 22;
/** is_drawing draw mode constant */
this.DM_SQUARE_RESIZE_RIGHT = 23;
/** is_drawing draw mode constant */
this.DM_SQUARE_RESIZE_BOTTOM = 24;
/** is_drawing draw mode constant */
this.DM_SQUARE_RESIZE_LEFT = 25;
/** is_drawing draw mode constant */
this.DM_POLYGON_DRAW = 3;
/** is_drawing draw mode constant */
this.DM_POLYGON_LASTDRAW = 30;
/** is_drawing draw mode constant */
this.DM_POLYGON_MOVE = 31;
/** is_drawing draw mode constant */
this.DM_BEZIER_DRAW = 4;
/** is_drawing draw mode constant */
this.DM_BEZIER_LASTDRAW = 40;
/** is_drawing draw mode constant */
this.DM_BEZIER_MOVE = 41;
//set some config defaults below
/**
* Mode of operation
* possible values:
* editor - classical editor,
* editor2 - dreamweaver style editor,
* highlighter - map highlighter, will spawn imgmap instances for each map found in the current page
* highlighter_spawn - internal mode after spawning imgmap objects
*/
this.config.mode = "editor";
this.config.baseroot = '';
this.config.lang = '';
this.config.defaultLang = 'en';
this.config.loglevel = 0;
this.config.custom_callbacks = {};//possible values: see below!
/** Callback events that you can handle in your GUI. */
this.event_types = [
'onModeChanged',
'onHtmlChanged',
'onAddArea',
'onRemoveArea',
'onDrawArea',
'onResizeArea',
'onRelaxArea',
'onFocusArea',
'onBlurArea',
'onMoveArea',
'onSelectRow',
'onLoadImage',
'onSetMap',
'onGetMap',
'onSelectArea',
'onDblClickArea',
'onStatusMessage',
'onAreaChanged'];
//default color values
this.config.CL_DRAW_BOX = '#E32636';
this.config.CL_DRAW_SHAPE = '#d00';
this.config.CL_DRAW_BG = '#fff';
this.config.CL_NORM_BOX = '#E32636';
this.config.CL_NORM_SHAPE = '#d00';
this.config.CL_NORM_BG = '#fff';
this.config.CL_HIGHLIGHT_BOX = '#E32636';
this.config.CL_HIGHLIGHT_SHAPE = '#d00';
this.config.CL_HIGHLIGHT_BG = '#fff';
this.config.CL_KNOB = '#555';
this.config.bounding_box = true;
this.config.label = '%n';
//the format string of the area labels - possible values: %n - number, %c - coords, %h - href, %a - alt, %t - title
this.config.label_class = 'imgmap_label';
//the css class to apply on labels
this.config.label_style = 'font: bold 10px Arial';
//this.config.label_style = 'font-weight: bold; font-size: 10px; font-family: Arial; color: #964';
//the css style(s) to apply on labels
this.config.hint = '#%n %h';
//the format string of the area mouseover hints - possible values: %n - number, %c - coords, %h - href, %a - alt, %t - title
this.config.draw_opacity = '35';
//the opacity value of the area while drawing, moving or resizing - possible values 0 - 100 or range "(x)-y"
this.config.norm_opacity = '50';
//the opacity value of the area while relaxed - possible values 0 - 100 or range "(x)-y"
this.config.highlight_opacity = '70';
//the opacity value of the area while highlighted - possible values 0 - 100 or range "(x)-y"
this.config.cursor_default = 'crosshair'; //auto/pointer
//the css cursor while hovering over the image
//browser sniff
var ua = navigator.userAgent;
this.isMSIE = (navigator.appName == "Microsoft Internet Explorer");
this.isMSIE5 = this.isMSIE && (ua.indexOf('MSIE 5') != -1);
this.isMSIE5_0 = this.isMSIE && (ua.indexOf('MSIE 5.0') != -1);
this.isMSIE7 = this.isMSIE && (ua.indexOf('MSIE 7') != -1);
this.isGecko = ua.indexOf('Gecko') != -1;
this.isSafari = ua.indexOf('Safari') != -1;
this.isOpera = (typeof window.opera != 'undefined');
this.setup(config);
}
/**
* Return an object given by id or object itself.
* @date 22-02-2007 0:14:50
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param objorid A DOM object, or id of a DOM object.
* @return The identified DOM object or null on error.
*/
imgmap.prototype.assignOID = function(objorid) {
try {
if (typeof objorid == 'undefined') {
this.log("Undefined object passed to assignOID.");// Called from: " + arguments.callee.caller, 1);
return null;
}
else if (typeof objorid == 'object') {
return objorid;
}
else if (typeof objorid == 'string') {
return document.getElementById(objorid);
}
}
catch (err) {
this.log("Error in assignOID", 1);
}
return null;
};
/**
* Main setup function.
* Can be called manually or constructor will call it.
* @date 22-02-2007 0:15:42
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param config config object
* @return True if all went ok.
*/
imgmap.prototype.setup = function(config) {
//this.log('setup');
//copy non-default config parameters to this.config
for (var i in config) {
if (config.hasOwnProperty(i)) {
this.config[i] = config[i];
}
}
//set document event hooks
this.addEvent(document, 'keydown', this.eventHandlers.doc_keydown = this.doc_keydown.bind(this));
this.addEvent(document, 'keyup', this.eventHandlers.doc_keyup = this.doc_keyup.bind(this));
this.addEvent(document, 'mousedown', this.eventHandlers.doc_mousedown = this.doc_mousedown.bind(this));
//set pic_container element - supposedly it already exists in the DOM
if (config && config.pic_container) {
this.pic_container = this.assignOID(config.pic_container);
this.disableSelection(this.pic_container);
}
if (!this.config.baseroot) {
//search for a base - theoretically there can only be one, but lets search
//for the first non-empty
var bases = document.getElementsByTagName('base');
var base = '';
for (i=0; i<bases.length; i++) {//i declared earlier
if (bases[i].href) {
base = bases[i].href;
//append slash if missing
if (base.charAt(base.length-1) != '/') {
base+= '/';
}
break;
}
}
//search for scripts
var scripts = document.getElementsByTagName('script');
for (i=0; i<scripts.length; i++) {//i declared earlier
if (scripts[i].src && scripts[i].src.match(/imgmap\w*\.js(\?.*?)?$/)) {
var src = scripts[i].src;
//cut filename part, leave last slash
src = src.substring(0, src.lastIndexOf('/') + 1);
//set final baseroot path
if (base && src.indexOf('://') == -1) {
this.config.baseroot = base + src;
}
else {
this.config.baseroot = src;
}
//exit loop
break;
}
}
}
//load excanvas js - as soon as possible
if (this.isMSIE &&
typeof window.CanvasRenderingContext2D == 'undefined' && typeof G_vmlCanvasManager == 'undefined') {
this.loadScript(this.config.baseroot + 'excanvas.js');
//alert('loadcanvas');
}
//alert(this.config.baseroot);
//load language js - as soon as possible
if (!this.config.lang) {
this.config.lang = this.detectLanguage();
}
if (typeof imgmapStrings == 'undefined') {
//language file might have already been loaded (ex highlighter mode)
this.loadScript(this.config.baseroot + 'lang_' + this.config.lang + '.js');
}
//check event hooks
var found, j, le;
for (i in this.config.custom_callbacks) {
if (this.config.custom_callbacks.hasOwnProperty(i)) {
found = false;
for (j = 0, le = this.event_types.length; j < le; j++) {
if (i == this.event_types[j]) {
found = true;
break;
}
}
if (!found) {
this.log("Unknown custom callback: " + i, 1);
}
}
}
//hook onload event - as late as possible
this.addEvent(window, 'load', this.onLoad.bind(this));
return true;
};
/**
* currently unused
* @ignore
*/
imgmap.prototype.retryDelayed = function(fn, delay, tries) {
if (typeof fn.tries == 'undefined') {fn.tries = 0;}
//alert(fn.tries+1);
if (fn.tries++ < tries) {
//alert('ss');
window.setTimeout(function() {
fn.apply(this);
}, delay);
}
};
/**
* EVENT HANDLER: Handle event when the page with scripts is loaded.
* @date 22-02-2007 0:16:22
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param e The event object.
*/
imgmap.prototype.onLoad = function(e) {
if (this.isLoaded) {return true;}
var _this = this;
//this.log('readystate: ' + document.readyState);
if (typeof imgmapStrings == 'undefined') {
if (this.cntReloads++ < 5) {
//this.retryDelayed(_this.onLoad(), 1000, 3);
window.setTimeout(function () {_this.onLoad(e);} ,1200);
this.log('Delaying onload (language ' + this.config.lang + ' not loaded, try: ' + this.cntReloads + ')');
return false;
}
else if (this.config.lang != this.config.defaultLang && this.config.defaultLang != 'en') {
this.log('Falling back to default language: ' + this.config.defaultLang);
this.cntReloads = 0;
this.config.lang = this.config.defaultLang;
this.loadScript(this.config.baseroot + 'lang_' + this.config.lang + '.js');
window.setTimeout(function () {_this.onLoad(e);} ,1200);
return false;
}
else if (this.config.lang != 'en') {
this.log('Falling back to english language');
this.cntReloads = 0;
this.config.lang = 'en';
this.loadScript(this.config.baseroot + 'lang_' + this.config.lang + '.js');
window.setTimeout(function () {_this.onLoad(e);} ,1200);
return false;
}
}
//else
try {
this.loadStrings(imgmapStrings);
}
catch (err) {
this.log("Unable to load language strings", 1);
}
//check if ExplorerCanvas correctly loaded - detect if browser supports canvas
//alert(typeof G_vmlCanvasManager + this.isMSIE + typeof window.CanvasRenderingContext2D);
if (this.isMSIE) {
//alert('cccc');
//alert(typeof G_vmlCanvasManager);
if (typeof window.CanvasRenderingContext2D == 'undefined' && typeof G_vmlCanvasManager == 'undefined') {
//alert('bbb');
/*
if (this.cntReloads++ < 5) {
var _this = this;
//this.retryDelayed(_this.onLoad(), 1000, 3);
window.setTimeout(function () {
_this.onLoad(e);
}
,1000
);
//alert('aaa');
this.log('Delaying onload (excanvas not loaded, try: ' + this.cntReloads + ')');
return false;
}
*/
this.log(this.strings.ERR_EXCANVAS_LOAD, 2);//critical error
}
}
if (this.config.mode == 'highlighter') {
//call global scope function
imgmap_spawnObjects(this.config);
}
this.isLoaded = true;
return true;
};
/**
* Attach new 'evt' event handler 'callback' to 'obj'
* @date 24-02-2007 21:16:20
* @param obj The object on which the handler is defined.
* @param evt The name of the event, like mousedown.
* @param callback The callback function (named if you want it to be removed).
*/
imgmap.prototype.addEvent = function(obj, evt, callback) {
if (obj.attachEvent) {
//Microsoft style registration model
return obj.attachEvent("on" + evt, callback);
}
else if (obj.addEventListener) {
//W3C style model
obj.addEventListener(evt, callback, false);
return true;
}
else {
obj['on' + evt] = callback;
}
};
/**
* Detach 'evt' event handled by 'callback' from 'obj' object.
* Callback must be a non anonymous function, see eventHandlers.
* @see #eventHandlers
* Example: myimgmap.removeEvent(myimgmap.pic, 'mousedown', myimgmap.eventHandlers.img_mousedown);
* @date 24-11-2007 15:22:17
* @param obj The object on which the handler is defined.
* @param evt The name of the event, like mousedown.
* @param callback The named callback function.
*/
imgmap.prototype.removeEvent = function(obj, evt, callback) {
if (obj.detachEvent) {
//Microsoft style detach model
return obj.detachEvent("on" + evt, callback);
}
else if (obj.removeEventListener) {
//W3C style model
obj.removeEventListener(evt, callback, false);
return true;
}
else {
obj['on' + evt] = null;
}
};
/**
* We need this because load events for scripts function slightly differently.
* @link http://dean.edwards.name/weblog/2006/06/again/
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 24-03-2007 11:02:21
*/
imgmap.prototype.addLoadEvent = function(obj, callback) {
if (obj.attachEvent) {
//Microsoft style registration model
return obj.attachEvent("onreadystatechange", callback);
}
else if (obj.addEventListener) {
//W3C style registration model
obj.addEventListener('load', callback, false);
return true;
}
else {
obj.onload = callback;
}
};
/**
* Include another js script into the current document.
* @date 22-02-2007 0:17:04
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param url The url of the script we want to load.
* @see #script_load
* @see #addLoadEvent
*/
imgmap.prototype.loadScript = function(url) {
if (url === '') {return false;}
if (this.loadedScripts[url] == 1) {return true;}//script already loaded
this.log('Loading script: ' + url);
//we might need this someday for safari?
//var temp = '<script language="javascript" type="text/javascript" src="' + url + '"></script>';
//document.write(temp);
try {
var head = document.getElementsByTagName('head')[0];
var temp = document.createElement('SCRIPT');
temp.setAttribute('language', 'javascript');
temp.setAttribute('type', 'text/javascript');
temp.setAttribute('src', url);
//temp.setAttribute('defer', true);
head.appendChild(temp);
this.addLoadEvent(temp, this.script_load.bind(this));
}
catch (err) {
this.log('Error loading script: ' + url);
}
return true;
};
/**
* EVENT HANDLER: Event handler of external script loaded.
* @param e The event object.
*/
imgmap.prototype.script_load = function(e) {
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
var url = obj.src;
var complete = false;
//alert(url);
if (typeof obj.readyState != 'undefined') {
//explorer
if (obj.readyState == 'complete') {
complete = true;
}
}
else {
//other browsers?
complete = true;
}
if (complete) {
this.loadedScripts[url] = 1;
this.log('Loaded script: ' + url);
return true;
}
};
/**
* Load strings from a key:value object to the prototype strings array.
* @author adam
* @date 2007
* @param obj Javascript object that holds key:value pairs.
*/
imgmap.prototype.loadStrings = function(obj) {
for (var key in obj) {
if (obj.hasOwnProperty(key)) {
this.strings[key] = obj[key];
}
}
};
/**
* This function is to load a given img url to the pic_container.
*
* Loading an image will clear all current maps.
* @see #useImage
* @param img The imageurl or object to load (if object, function will get url, and do a recall)
* @param imgw The width we want to force on the image (optional)
* @param imgh The height we want to force on the image (optional)
* @returns True on success
*/
imgmap.prototype.loadImage = function(img, imgw, imgh) {
//test for container
if (typeof this.pic_container == 'undefined') {
this.log('You must have pic_container defined to use loadImage!', 2);
return false;
}
//wipe all
this.removeAllAreas();
//reset scale
this.globalscale = 1;
this.fireEvent('onHtmlChanged', '');//empty
if (!this._getLastArea()) {
//init with one new area if there was none editable
if (this.config.mode != "editor2") {this.addNewArea();}
}
if (typeof img == 'string') {
//there is an image given with url to load
if (typeof this.pic == 'undefined') {
this.pic = document.createElement('IMG');
this.pic_container.appendChild(this.pic);
//event handler hooking - only at the first load
this.addEvent(this.pic, 'mousedown', this.eventHandlers.img_mousedown = this.img_mousedown.bind(this));
this.addEvent(this.pic, 'mouseup', this.eventHandlers.img_mouseup = this.img_mouseup.bind(this));
this.addEvent(this.pic, 'mousemove', this.eventHandlers.img_mousemove = this.img_mousemove.bind(this));
this.pic.style.cursor = this.config.cursor_default;
}
//img ='../../'+img;
this.log('Loading image: ' + img, 0);
//calculate timestamp to bypass browser cache mechanism
var q = '?';
if (img.indexOf('?') > -1) {
q = '&';
}
this.pic.src = img + q + (new Date().getTime());
if (imgw && imgw > 0) {this.pic.setAttribute('width', imgw);}
if (imgh && imgh > 0) {this.pic.setAttribute('height', imgh);}
this.fireEvent('onLoadImage', this.pic);
return true;
}
else if (typeof img == 'object') {
//we have to use the src of the image object
var src = img.src; //img.getAttribute('src');
if (src === '' && img.getAttribute('mce_src') !== '') {
//if it is a tinymce object, it has no src but mce_src attribute!
src = img.getAttribute('mce_src');
}
else if (src === '' && img.getAttribute('_fcksavedurl') !== '') {
//if it is an fck object, it might have only _fcksavedurl attribute!
src = img.getAttribute('_fcksavedurl');
}
// Get the displayed dimensions of the image
if (!imgw) {
imgw = img.clientWidth;
}
if (!imgh) {
imgh = img.clientHeight;
}
//recurse, this time with the url string
return this.loadImage(src, imgw, imgh);
}
};
/**
* We use this when there is an existing image object we want to handle with imgmap.
* Mainly used in highlighter mode.
* @author adam
* @see #loadImage
* @see #imgmap_spawnObjects
* @date 2007
* @param img DOM object or id of image we want to use.
*/
imgmap.prototype.useImage = function(img) {
//wipe all
this.removeAllAreas();
if (!this._getLastArea()) {
//init with one new area if there was none editable
if (this.config.mode != "editor2") {this.addNewArea();}
}
img = this.assignOID(img);
if (typeof img == 'object') {
if (typeof this.pic != 'undefined') {
//remove previous handlers
this.removeEvent(this.pic, 'mousedown', this.eventHandlers.img_mousedown);
this.removeEvent(this.pic, 'mouseup', this.eventHandlers.img_mouseup);
this.removeEvent(this.pic, 'mousemove', this.eventHandlers.img_mousemove);
this.pic.style.cursor = '';
}
this.pic = img;
//hook event handlers
this.addEvent(this.pic, 'mousedown', this.eventHandlers.img_mousedown = this.img_mousedown.bind(this));
this.addEvent(this.pic, 'mouseup', this.eventHandlers.img_mouseup = this.img_mouseup.bind(this));
this.addEvent(this.pic, 'mousemove', this.eventHandlers.img_mousemove = this.img_mousemove.bind(this));
this.pic.style.cursor = this.config.cursor_default;
if (this.pic.parentNode.className == 'pic_container') {
//use existing container
this.pic_container = this.pic.parentNode;
}
else {
//dynamically create container
this.pic_container = document.createElement("div");
this.pic_container.className = 'pic_container';
this.pic.parentNode.insertBefore(this.pic_container, this.pic);
//ref: If the node already exists it is removed from current parent node, then added to new parent node.
this.pic_container.appendChild(this.pic);
}
this.fireEvent('onLoadImage', this.pic);
return true;
}
};
/**
* Fires custom hook onStatusMessage, passing the status string.
* Use this to update your GUI.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 26-07-2008 13:22:54
* @param str The status string
*/
imgmap.prototype.statusMessage = function(str) {
this.fireEvent('onStatusMessage', str);
};
/**
* Adds basic logging functionality using firebug console object if available.
* Also tries to use AIR introspector if available.
* @date 20-02-2007 17:55:18
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param obj The object or string you want to debug/echo.
* @level level The log level, 0 being the smallest issue.
*/
imgmap.prototype.log = function(obj, level) {
if (level === '' || typeof level == 'undefined') {level = 0;}
if (this.config.loglevel != -1 && level >= this.config.loglevel) {
this.logStore.push({level: level, obj: obj});
}
if (typeof console == 'object') {
console.log(obj);
}
else if (this.isOpera) {
opera.postError(level + ': ' + obj);
}
else if (typeof air == 'object') {
//we are inside AIR
if (typeof air.Introspector == 'object') {
air.Introspector.Console.log(obj);
}
else {
air.trace(obj);
}
}
else {
if (level > 1) {
//alert(level + ': ' + obj);
//dump with all pevious errors:
var msg = '';
for (var i=0, le = this.logStore.length; i<le; i++) {
msg+= this.logStore[i].level + ': ' + this.logStore[i].obj + "\n";
}
alert(msg);
}
else {
window.defaultStatus = (level + ': ' + obj);
}
}
};
/**
* Produces the image map HTML output with the defined areas.
* Invokes getMapInnerHTML.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-06 15:10:27
* @param flags Currently ony 'noscale' used to prevent scaling of coordinates in preview mode.
* @return The generated html code.
*/
imgmap.prototype.getMapHTML = function(flags) {
var html = '<map id="'+this.getMapId()+'" name="'+this.getMapName()+'">' + this.getMapInnerHTML(flags) + this.waterMark + '</map>';
this.fireEvent('onGetMap', html);
//alert(html);
return html;
};
/**
* Get the map areas part only of the current imagemap.
* @see #getMapHTML
* @author adam
* @param flags Currently ony 'noscale' used to prevent scaling of coordinates in preview mode.
* @return The generated map code without the map wrapper.
*/
imgmap.prototype.getMapInnerHTML = function(flags) {
var html, coords;
html = '';
//foreach area properties
for (var i=0, le = this.areas.length; i<le; i++) {
if (this.areas[i]) {
if (this.areas[i].shape && this.areas[i].shape != 'undefined') {
coords = this.areas[i].lastInput;
if (flags && flags.match(/noscale/)) {
//for preview use real coordinates, not scaled
var cs = coords.split(',');
for (var j=0, le2 = cs.length; j<le2; j++) {
cs[j] = Math.round(cs[j] * this.globalscale);
}
coords = cs.join(',');
}
html+= '<area shape="' + this.areas[i].shape + '"' +
' alt="' + this.areas[i].aalt + '"' +
' title="' + this.areas[i].atitle + '"' +
' coords="' + coords + '"' +
' href="' + this.areas[i].ahref + '"' +
' target="' + this.areas[i].atarget + '" />';
}
}
}
//alert(html);
return html;
};
/**
* Get the map name of the current imagemap.
* If doesnt exist, nor map id, generate a new name based on timestamp.
* The most portable solution is to use the same value for id and name.
* This also conforms the HTML 5 specification, that says:
* "If the id attribute is also specified, both attributes must have the same value."
* @link http://www.w3.org/html/wg/html5/#the-map-element
* @author adam
* @see #getMapId
* @return The name of the map.
*/
imgmap.prototype.getMapName = function() {
if (this.mapname === '') {
if (this.mapid !== '') {return this.mapid;}
var now = new Date();
this.mapname = 'imgmap' + now.getFullYear() + (now.getMonth()+1) + now.getDate() + now.getHours() + now.getMinutes() + now.getSeconds();
}
return this.mapname;
};
/**
* Get the map id of the current imagemap.
* If doesnt exist, use map name.
* @author adam
* @see #getMapName
* @return The id of the map.
*/
imgmap.prototype.getMapId = function() {
if (this.mapid === '') {
this.mapid = this.getMapName();
}
return this.mapid;
};
/**
* Convert wild shape names to normal ones.
* @date 25-12-2008 19:27:06
* @param shape The name of the shape to convert.
* @return The normalized shape name, rect as default.
*/
imgmap.prototype._normShape = function(shape) {
if (!shape) {return 'rect';}
shape = this.trim(shape).toLowerCase();
if (shape.substring(0, 4) == 'rect') {return 'rect';}
if (shape.substring(0, 4) == 'circ') {return 'circle';}
if (shape.substring(0, 4) == 'poly') {return 'poly';}
return 'rect';
};
/**
* Try to normalize coordinates that came from:
* 1. html textarea
* 2. user input in the active area's input field
* 3. from the html source in case of plugins or highlighter
* Example of inputs that need to be handled:
* 035,035 075,062
* 150,217, 190,257, 150,297,110,257
* @author adam
* @param coords The coordinates in a string.
* @param shape The shape of the object (rect, circle, poly, bezier1).
* @param flag Flags that modify the operation. (fromcircle, frompoly, fromrect, preserve)
* @returns The normalized coordinates.
*/
imgmap.prototype._normCoords = function(coords, shape, flag) {
//function level var declarations
var i;//generic cycle counter
var sx;//smallest x
var sy;//smallest y
var gx;//greatest x
var gy;//greatest y
var temp, le;
//console.log('normcoords: ' + coords + ' - ' + shape + ' - ' + flag);
coords = this.trim(coords);
if (coords === '') {return '';}
var oldcoords = coords;
//replace some general junk
coords = coords.replace(/(\d)(\D)+(\d)/g, "$1,$3");
coords = coords.replace(/,\D+(\d)/g, ",$1");//cut leading junk
coords = coords.replace(/,0+(\d)/g, ",$1");//cut leading zeros
coords = coords.replace(/(\d)(\D)+,/g, "$1,");
coords = coords.replace(/^\D+(\d)/g, "$1");//cut leading junk
coords = coords.replace(/^0+(\d)/g, "$1");//cut leading zeros
coords = coords.replace(/(\d)(\D)+$/g, "$1");//cut trailing junk
//console.log('>'+coords + ' - ' + shape + ' - ' + flag);
//now fix other issues
var parts = coords.split(',');
if (shape == 'rect') {
if (flag == 'fromcircle') {
var r = parts[2];
parts[0] = parts[0] - r;
parts[1] = parts[1] - r;
parts[2] = parseInt(parts[0], 10) + 2 * r;
parts[3] = parseInt(parts[1], 10) + 2 * r;
}
else if (flag == 'frompoly') {
sx = parseInt(parts[0], 10); gx = parseInt(parts[0], 10);
sy = parseInt(parts[1], 10); gy = parseInt(parts[1], 10);
for (i=0, le = parts.length; i<le; i++) {
if (i % 2 === 0 && parseInt(parts[i], 10) < sx) {
sx = parseInt(parts[i], 10);}
if (i % 2 === 1 && parseInt(parts[i], 10) < sy) {
sy = parseInt(parts[i], 10);}
if (i % 2 === 0 && parseInt(parts[i], 10) > gx) {
gx = parseInt(parts[i], 10);}
if (i % 2 === 1 && parseInt(parts[i], 10) > gy) {
gy = parseInt(parts[i], 10);}
//console.log(sx+","+sy+","+gx+","+gy);
}
parts[0] = sx; parts[1] = sy;
parts[2] = gx; parts[3] = gy;
}
if (!(parseInt(parts[1], 10) >= 0)) {parts[1] = parts[0];}
if (!(parseInt(parts[2], 10) >= 0)) {parts[2] = parseInt(parts[0], 10) + 10;}
if (!(parseInt(parts[3], 10) >= 0)) {parts[3] = parseInt(parts[1], 10) + 10;}
if (parseInt(parts[0], 10) > parseInt(parts[2], 10)) {
temp = parts[0];
parts[0] = parts[2];
parts[2] = temp;
}
if (parseInt(parts[1], 10) > parseInt(parts[3], 10)) {
temp = parts[1];
parts[1] = parts[3];
parts[3] = temp;
}
coords = parts[0]+","+parts[1]+","+parts[2]+","+parts[3];
//console.log(coords);
}
else if (shape == 'circle') {
if (flag == 'fromrect') {
sx = parseInt(parts[0], 10); gx = parseInt(parts[2], 10);
sy = parseInt(parts[1], 10); gy = parseInt(parts[3], 10);
//use smaller side
parts[2] = (gx - sx < gy - sy) ? gx - sx : gy - sy;
parts[2] = Math.floor(parts[2] / 2);//radius
parts[0] = sx + parts[2];
parts[1] = sy + parts[2];
}
else if (flag == 'frompoly') {
sx = parseInt(parts[0], 10); gx = parseInt(parts[0], 10);
sy = parseInt(parts[1], 10); gy = parseInt(parts[1], 10);
for (i=0, le = parts.length; i<le; i++) {
if (i % 2 === 0 && parseInt(parts[i], 10) < sx) {
sx = parseInt(parts[i], 10);}
if (i % 2 === 1 && parseInt(parts[i], 10) < sy) {
sy = parseInt(parts[i], 10);}
if (i % 2 === 0 && parseInt(parts[i], 10) > gx) {
gx = parseInt(parts[i], 10);}
if (i % 2 === 1 && parseInt(parts[i], 10) > gy) {
gy = parseInt(parts[i], 10);}
//console.log(sx+","+sy+","+gx+","+gy);
}
//use smaller side
parts[2] = (gx - sx < gy - sy) ? gx - sx : gy - sy;
parts[2] = Math.floor(parts[2] / 2);//radius
parts[0] = sx + parts[2];
parts[1] = sy + parts[2];
}
if (!(parseInt(parts[1], 10) > 0)) {parts[1] = parts[0];}
if (!(parseInt(parts[2], 10) > 0)) {parts[2] = 10;}
coords = parts[0]+","+parts[1]+","+parts[2];
}
else if (shape == 'poly') {
if (flag == 'fromrect') {
parts[4] = parts[2];
parts[5] = parts[3];
parts[2] = parts[0];
parts[6] = parts[4];
parts[7] = parts[1];
}
else if (flag == 'fromcircle') {
//@url http://www.pixelwit.com/blog/2007/06/29/basic-circle-drawing-actionscript/
var centerX = parseInt(parts[0], 10);
var centerY = parseInt(parts[1], 10);
var radius = parseInt(parts[2], 10);
var j = 0;
parts[j++] = centerX + radius;
parts[j++] = centerY;
var sides = 60;//constant = sides the fake circle will have
for (i=0; i<=sides; i++) {
var pointRatio = i/sides;
var xSteps = Math.cos(pointRatio*2*Math.PI);
var ySteps = Math.sin(pointRatio*2*Math.PI);
var pointX = centerX + xSteps * radius;
var pointY = centerY + ySteps * radius;
parts[j++] = Math.round(pointX);
parts[j++] = Math.round(pointY);
}
//console.log(parts);
}
coords = parts.join(',');
}
else if (shape == 'bezier1') {
coords = parts.join(',');
}
if (flag == 'preserve' && oldcoords != coords) {
//return original and throw error
//throw "invalid coords";
return oldcoords;
}
return coords;
};
/**
* Sets the coordinates according to the given HTML map code or DOM object.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-07 11:47:16
* @param map DOM object or string of a map you want to apply.
* @return True on success
*/
imgmap.prototype.setMapHTML = function(map) {
if (this.viewmode === 1) {return;}//exit if preview mode
this.fireEvent('onSetMap', map);
//this.log(map);
//remove all areas
this.removeAllAreas();
//console.log(this.areas);
var oMap;
if (typeof map == 'string') {
var oHolder = document.createElement('DIV');
oHolder.innerHTML = map;
oMap = oHolder.firstChild;
}
else if (typeof map == 'object') {
oMap = map;
}
if (!oMap || oMap.nodeName.toLowerCase() !== 'map') {return false;}
this.mapname = oMap.name;
this.mapid = oMap.id;
var newareas = oMap.getElementsByTagName('area');
var shape, coords, href, alt, title, target, id;
for (var i=0, le = newareas.length; i<le; i++) {
shape = coords = href = alt = title = target = '';
id = this.addNewArea();//btw id == this.currentid, just this form is a bit clearer
shape = this._normShape(newareas[i].getAttribute('shape', 2));
this.initArea(id, shape);
if (newareas[i].getAttribute('coords', 2)) {
//normalize coords
coords = this._normCoords(newareas[i].getAttribute('coords', 2), shape);
this.areas[id].lastInput = coords;
//for area this one will be set in recalculate
}
href = newareas[i].getAttribute('href', 2);
// FCKeditor stored url to prevent mangling from the browser.
var sSavedUrl = newareas[i].getAttribute( '_fcksavedurl' );
if (sSavedUrl) {
href = sSavedUrl;
}
if (href) {
this.areas[id].ahref = href;
}
alt = newareas[i].getAttribute('alt');
if (alt) {
this.areas[id].aalt = alt;
}
title = newareas[i].getAttribute('title');
if (!title) {title = alt;}
if (title) {
this.areas[id].atitle = title;
}
target = newareas[i].getAttribute('target');
if (target) {target = target.toLowerCase();}
// if (target == '') target = '_self';
this.areas[id].atarget = target;
this._recalculate(id, coords);//contains repaint
this.relaxArea(id);
this.fireEvent('onAreaChanged', this.areas[id]);
}//end for areas
this.fireEvent('onHtmlChanged', this.getMapHTML());
return true;
};
/**
* Preview image with imagemap applied.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-06 14:51:01
* @url http://www.quirksmode.org/bugreports/archives/2005/03/Usemap_attribute_wrongly_case_sensitive.html
* @return False on error, 0 when switched to edit mode, 1 when switched to preview mode
*/
imgmap.prototype.togglePreview = function() {
var i, le;//generic cycle counter
if (!this.pic) {return false;}//exit if pic is undefined
//dynamically create preview container
if (!this.preview) {
this.preview = document.createElement('DIV');
this.preview.style.display = 'none';
this.pic_container.appendChild(this.preview);
}
if (this.viewmode === 0) {
//hide canvas elements and labels
for (i = 0, le = this.areas.length; i < le; i++) {
if (this.areas[i]) {
this.areas[i].style.display = 'none';
if (this.areas[i].label) {this.areas[i].label.style.display = 'none';}
}
}
//activate image map
this.preview.innerHTML = this.getMapHTML('noscale');
this.pic.setAttribute('border', '0', 0);
this.pic.setAttribute('usemap', '#' + this.mapname, 0);
this.pic.style.cursor = 'auto';
this.viewmode = 1;
this.statusMessage(this.strings.PREVIEW_MODE);
}
else {
//show canvas elements
for (i = 0, le = this.areas.length; i < le; i++) {
if (this.areas[i]) {
this.areas[i].style.display = '';
if (this.areas[i].label && this.config.label) {this.areas[i].label.style.display = '';}
}
}
//clear image map
this.preview.innerHTML = '';
this.pic.style.cursor = this.config.cursor_default;
this.pic.removeAttribute('usemap', 0);
this.viewmode = 0;
this.statusMessage(this.strings.DESIGN_MODE);
this.is_drawing = 0;
}
this.fireEvent('onModeChanged', this.viewmode);
return this.viewmode;
};
/**
* Adds a new area. It will later become a canvas.
* GUI should use the onAddArea callback to act accordingly.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-06 16:49:25
* @see #initArea
*/
imgmap.prototype.addNewArea = function() {
if (this.viewmode === 1) {return;}//exit if preview mode
var lastarea = this._getLastArea();
var id = (lastarea) ? lastarea.aid + 1 : 0;
//alert(id);
//insert new possibly? unknown area (will be initialized at mousedown)
this.areas[id] = document.createElement('DIV');
this.areas[id].id = this.mapname + 'area' + id;
this.areas[id].aid = id;
this.areas[id].shape = "undefined";
this.currentid = id;
this.fireEvent('onAddArea', id);
return id;
};
/**
* Initialize a new area.
* Create the canvas, initialize it.
* Reset area parameters.
* @param id The id of the area (already existing with undefined shape)
* @param shape The shape the area will have (rect, circle, poly, bezier1)
*/
imgmap.prototype.initArea = function(id, shape) {
if (!this.areas[id]) {return false;}//if all was erased, return
//remove preinited dummy div or already placed canvas
if (this.areas[id].parentNode) {this.areas[id].parentNode.removeChild(this.areas[id]);}
if (this.areas[id].label) {this.areas[id].label.parentNode.removeChild(this.areas[id].label);}
this.areas[id] = null;
//create CANVAS node
this.areas[id] = document.createElement('CANVAS');
this.pic_container.appendChild(this.areas[id]);
this.pic_container.style.position = 'relative';
//alert('init' + typeof G_vmlCanvasManager);
if (typeof G_vmlCanvasManager != "undefined") {
//override CANVAS with VML object
this.areas[id] = G_vmlCanvasManager.initElement(this.areas[id]);
//this.areas[id] = this.pic.parentNode.lastChild;
}
this.areas[id].id = this.mapname + 'area' + id;
this.areas[id].aid = id;
this.areas[id].shape = shape;
this.areas[id].ahref = '';
this.areas[id].atitle = '';
this.areas[id].aalt = '';
this.areas[id].atarget = ''; // '_self';
this.areas[id].style.position = 'absolute';
this.areas[id].style.top = this.pic.offsetTop + 'px';
this.areas[id].style.left = this.pic.offsetLeft + 'px';
this._setopacity(this.areas[id], this.config.CL_DRAW_BG, this.config.draw_opacity);
//hook event handlers
this.areas[id].ondblclick = this.area_dblclick.bind(this);
this.areas[id].onmousedown = this.area_mousedown.bind(this);
this.areas[id].onmouseup = this.area_mouseup.bind(this);
this.areas[id].onmousemove = this.area_mousemove.bind(this);
this.areas[id].onmouseover = this.area_mouseover.bind(this);
this.areas[id].onmouseout = this.area_mouseout.bind(this);
//initialize memory object
this.memory[id] = {};
this.memory[id].downx = 0;
this.memory[id].downy = 0;
this.memory[id].left = 0;
this.memory[id].top = 0;
this.memory[id].width = 0;
this.memory[id].height = 0;
this.memory[id].xpoints = [];
this.memory[id].ypoints = [];
//create label node
this.areas[id].label = document.createElement('DIV');
this.pic_container.appendChild(this.areas[id].label);
this.areas[id].label.className = this.config.label_class;
this.assignCSS(this.areas[id].label, this.config.label_style);
this.areas[id].label.style.position = 'absolute';
};
/**
* Resets area border and opacity to a normal state after drawing.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 15-02-2007 22:07:28
* @param id The id of the area.
* @see #relaxAllAreas
*/
imgmap.prototype.relaxArea = function(id) {
if (!this.areas[id]) {return;}
this.fireEvent('onRelaxArea', id);
this._setBorder(id, 'NORM');
this._setopacity(this.areas[id], this.config.CL_NORM_BG, this.config.norm_opacity);
};
/**
* Resets area border and opacity of all areas.
* Calls relaxArea on each of them.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 23-04-2007 23:31:09
* @see #relaxArea
*/
imgmap.prototype.relaxAllAreas = function() {
for (var i=0, le = this.areas.length; i<le; i++) {
if (this.areas[i]) {
this.relaxArea(i);
}
}
};
/**
* Set border of a given area according to style flag.
* Possible values of style: NORM, HIGHLIGHT, DRAW.
* Non-rectangle shapes wont get a border if config.bounding_box is false.
* @date 26-12-2008 22:34:41
* @param id The id of the area to set the border on.
* @param style Coloring style (NORM, HIGHLIGHT, DRAW), see relevant colors in config.
* @since 2.1
*/
imgmap.prototype._setBorder = function(id, style) {
if (this.areas[id].shape == 'rect' || this.config.bounding_box) {
this.areas[id].style.borderWidth = '1px';
this.areas[id].style.borderStyle = (style == 'DRAW' ? 'dotted' : 'solid');
this.areas[id].style.borderColor = this.config['CL_' + style + '_' + (this.areas[id].shape == 'rect' ? 'SHAPE' : 'BOX')];
}
else {
//clear border
this.areas[id].style.border = '';
}
};
/**
* Set opacity of area to the given percentage, as well as set the background color.
* If percentage contains a dash(-), the setting of the opacity will be gradual.
* @param area The area object.
* @param bgcolor New background color
* @param pct Percentage of the opacity.
*/
imgmap.prototype._setopacity = function(area, bgcolor, pct) {
if (bgcolor) {area.style.backgroundColor = bgcolor;}
if (pct && typeof pct == 'string' && pct.match(/^\d*\-\d+$/)) {
//gradual fade
var parts = pct.split('-');
if (typeof parts[0] != 'undefined') {
//set initial opacity
parts[0] = parseInt(parts[0], 10);
this._setopacity(area, bgcolor, parts[0]);
}
if (typeof parts[1] != 'undefined') {
parts[1] = parseInt(parts[1], 10);
var curr = this._getopacity(area);
//this.log('curr: '+curr);
var _this = this;
var diff = Math.round(parts[1] - curr);
if (diff > 5) {
window.setTimeout(function () {_this._setopacity(area, null, '-'+parts[1]);}, 20);
pct = 1*curr + 5;
}
else if (diff < -3) {
window.setTimeout(function () {_this._setopacity(area, null, '-'+parts[1]);}, 20);
pct = 1*curr - 3;
}
else {
//final set
pct = parts[1];
}
}
}
if (!isNaN(pct)) {
pct = Math.round(parseInt(pct, 10));
//this.log('set ('+area.aid+'): ' + pct, 1);
area.style.opacity = pct / 100;
area.style.filter = 'alpha(opacity='+pct+')';
}
};
/**
* Get the currently set opacity of a given area.
* @author adam
* @param area The area (canvas) you want to get opacity info from.
* @return Opacity value in a range of 0-100.
*/
imgmap.prototype._getopacity = function(area) {
if (area.style.opacity <= 1) {
return area.style.opacity * 100;
}
if (area.style.filter) {
//alpha(opacity=NaN)
return parseInt(area.style.filter.replace(/alpha\(opacity\=([^\)]*)\)/ig, "$1"), 10);
}
return 100;//default opacity
};
/**
* Removes the area marked by id.
* removeAllAreas will indicate a mass flag so that the output HTML will only be updated at
* the end of the operation.
* Callback will call the GUI code to remove GUI elements.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 11-02-2007 20:40:58
* @param id The id of the area to remove.
* @param mass Flag to indicate skipping the call of onHtmlChanged callback
* @see #removeAllAreas
*/
imgmap.prototype.removeArea = function(id, mass) {
if (this.viewmode === 1) {return;}//exit if preview mode
if (id === null || typeof id == "undefined") {return;}//exit if no id given
try {
//remove area and label
//explicitly set some values to null to avoid IE circular reference memleak
this.areas[id].label.parentNode.removeChild(this.areas[id].label);
this.areas[id].parentNode.removeChild(this.areas[id]);
this.areas[id].label.className = null;
//this.areas[id].label.style = null;
//console.log(this.areas[id].label);
this.areas[id].label = null;
this.areas[id].onmouseover = null;
this.areas[id].onmouseout = null;
this.areas[id].onmouseup = null;
this.areas[id].onmousedown = null;
this.areas[id].onmousemove = null;
// console.log(this.areas[id].label);
}
catch (err) {
//alert('noparent');
}
this.areas[id] = null;
this.fireEvent('onRemoveArea', id);
//update grand html
if (!mass) {this.fireEvent('onHtmlChanged', this.getMapHTML());}
};
/**
* Removes all areas.
* Will call removeArea on all areas.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-07 11:55:34
* @see #removeArea
*/
imgmap.prototype.removeAllAreas = function() {
for (var i = 0, le = this.areas.length; i < le; i++) {
if (this.areas[i]) {
this.removeArea(i, true);
}
}
//only call this at the end, use mass param above to avoid calling it in process
this.fireEvent('onHtmlChanged', this.getMapHTML());
};
/**
* Scales all areas.
* Will store scale parameter in globalscale property.
* This is needed to know how to draw new areas on an already scaled canvas.
* @author adam
* @date 02-11-2008 14:13:14
* @param scale Scale factor (1-original, 0.5-half, 2-double, etc.)
*/
imgmap.prototype.scaleAllAreas = function(scale) {
var rscale = 1;//relative scale
try {
rscale = scale / this.globalscale;
}
catch (err) {
this.log("Invalid (global)scale", 1);
}
//console.log('gscale: '+this.globalscale);
//console.log('scale: '+scale);
//console.log('rscale: '+rscale);
this.globalscale = scale;
for (var i = 0, le = this.areas.length; i < le; i++) {
if (this.areas[i] && this.areas[i].shape != 'undefined') {
this.scaleArea(i, rscale);
}
}
};
/**
* Scales one area.
* @author adam
* @date 02-11-2008 14:13:14
* @param rscale Relative scale factor (1-keep, 0.5-half, 2-double, etc.)
*/
imgmap.prototype.scaleArea = function(id, rscale) {
//set position and new dimensions
this.areas[id].style.top = parseInt(this.areas[id].style.top, 10) * rscale + 'px';
this.areas[id].style.left = parseInt(this.areas[id].style.left, 10) * rscale + 'px';
this.setAreaSize(id, this.areas[id].width * rscale, this.areas[id].height * rscale);
//handle polygon/bezier coordinates scaling
if (this.areas[id].shape == 'poly' || this.areas[id].shape == 'bezier1') {
for (var i=0, le = this.areas[id].xpoints.length; i<le; i++) {
this.areas[id].xpoints[i]*= rscale;
this.areas[id].ypoints[i]*= rscale;
}
}
this._repaint(this.areas[id], this.config.CL_NORM_SHAPE);
this._updatecoords(id);
};
/**
* Put label in the top left corner according to label config.
* By default it will contain the number of the area (area.aid)
* @param id The id of the area to add label to.
*/
imgmap.prototype._putlabel = function(id) {
if (this.viewmode === 1) {return;}//exit if preview mode
if (!this.areas[id].label) {return;}//not yet inited
try {
if (!this.config.label) {
this.areas[id].label.innerHTML = '';
this.areas[id].label.style.display = 'none';
}
else {
this.areas[id].label.style.display = '';
var label = this.config.label;
label = label.replace(/%n/g, String(id));
label = label.replace(/%c/g, String(this.areas[id].lastInput));
label = label.replace(/%h/g, String(this.areas[id].ahref));
label = label.replace(/%a/g, String(this.areas[id].aalt));
label = label.replace(/%t/g, String(this.areas[id].atitle));
this.areas[id].label.innerHTML = label;
}
//align to the top left corner
this.areas[id].label.style.top = this.areas[id].style.top;
this.areas[id].label.style.left = this.areas[id].style.left;
}
catch (err) {
this.log("Error putting label", 1);
}
};
/**
* Set area title and alt (for IE) according to the hint configuration.
* This will show up in the usual yellow box when you hover over with the mouse.
* @param id The id of the area to set hint at.
*/
imgmap.prototype._puthint = function(id) {
try {
if (!this.config.hint) {
this.areas[id].title = '';
this.areas[id].alt = '';
}
else {
var hint = this.config.hint;
hint = hint.replace(/%n/g, String(id));
hint = hint.replace(/%c/g, String(this.areas[id].lastInput));
hint = hint.replace(/%h/g, String(this.areas[id].ahref));
hint = hint.replace(/%a/g, String(this.areas[id].aalt));
hint = hint.replace(/%t/g, String(this.areas[id].atitle));
this.areas[id].title = hint;
this.areas[id].alt = hint;
}
}
catch (err) {
this.log("Error putting hint", 1);
}
};
/**
* Will call repaint on all areas.
* Useful when you change labeling or hint config on the GUI.
* @see #_repaint
*/
imgmap.prototype._repaintAll = function() {
for (var i=0, le = this.areas.length; i<le; i++) {
if (this.areas[i]) {
this._repaint(this.areas[i], this.config.CL_NORM_SHAPE);
}
}
};
/**
* Repaints the actual canvas content.
* This is the only canvas drawing magic that is happening.
* In fact rectangles will not have any canvas content, just a normal css border.
* After repainting the canvas, it will call putlabel and puthint methods.
* @param area The area object.
* @param color Color of the line to draw on the canvas.
* @param x Only used for polygons/beziers as the newest control point x.
* @param y Only used for polygons/beziers as the newest control point y.
*/
imgmap.prototype._repaint = function(area, color, x, y) {
var ctx;//canvas context
var width, height, left, top;//canvas properties
var i, le;//loop counter
if (area.shape == 'circle') {
width = parseInt(area.style.width, 10);
var radius = Math.floor(width/2) - 1;
//get canvas context
//alert(area.tagName);
ctx = area.getContext("2d");
//clear canvas
ctx.clearRect(0, 0, width, width);
//draw circle
ctx.beginPath();
ctx.strokeStyle = color;
ctx.arc(radius, radius, radius, 0, Math.PI*2, 0);
ctx.stroke();
ctx.closePath();
//draw center
ctx.strokeStyle = this.config.CL_KNOB;
ctx.strokeRect(radius, radius, 1, 1);
//put label
this._putlabel(area.aid);
this._puthint(area.aid);
}
else if (area.shape == 'rect') {
//put label
this._putlabel(area.aid);
this._puthint(area.aid);
}
else if (area.shape == 'poly') {
width = parseInt(area.style.width, 10);
height = parseInt(area.style.height, 10);
left = parseInt(area.style.left, 10);
top = parseInt(area.style.top, 10);
if (area.xpoints) {
//get canvas context
ctx = area.getContext("2d");
//clear canvas
ctx.clearRect(0, 0, width, height);
//draw polygon
ctx.beginPath();
ctx.strokeStyle = color;
ctx.moveTo(area.xpoints[0] - left, area.ypoints[0] - top);
for (i = 1, le = area.xpoints.length; i < le; i++) {
ctx.lineTo(area.xpoints[i] - left , area.ypoints[i] - top);
}
if (this.is_drawing == this.DM_POLYGON_DRAW || this.is_drawing == this.DM_POLYGON_LASTDRAW) {
//only draw to the current position if not moving
ctx.lineTo(x - left - 5 , y - top - 5);
}
ctx.lineTo(area.xpoints[0] - left , area.ypoints[0] - top);
ctx.stroke();
ctx.closePath();
}
//put label
this._putlabel(area.aid);
this._puthint(area.aid);
}
else if (area.shape == 'bezier1') {
width = parseInt(area.style.width, 10);
height = parseInt(area.style.height, 10);
left = parseInt(area.style.left, 10);
top = parseInt(area.style.top, 10);
if (area.xpoints) {
//get canvas context
ctx = area.getContext("2d");
//clear canvas
ctx.clearRect(0, 0, width, height);
//draw bezier1 (every second point is control point)
ctx.beginPath();
ctx.strokeStyle = color;
//move to the beginning position
ctx.moveTo(area.xpoints[0] - left, area.ypoints[0] - top);
//draw previous points - use every second point only
for (i = 2, le = area.xpoints.length; i < le; i+= 2) {
ctx.quadraticCurveTo(area.xpoints[i-1] - left, area.ypoints[i-1] - top, area.xpoints[i] - left, area.ypoints[i] - top);
}
if (this.is_drawing == this.DM_BEZIER_DRAW || this.is_drawing == this.DM_BEZIER_LASTDRAW) {
//only draw to the current position if not moving
if (area.xpoints.length % 2 === 0 && area.xpoints.length > 1) {
//drawing point - draw a curve to it using the previous control point
ctx.quadraticCurveTo(area.xpoints[area.xpoints.length - 1] - left - 5 , area.ypoints[area.ypoints.length - 1] - top - 5, x - left - 5 , y - top - 5);
}
else {
//control point - simply draw a line to it
ctx.lineTo(x - left - 5 , y - top - 5);
}
}
//close area by drawing a line to the first point
ctx.lineTo(area.xpoints[0] - left , area.ypoints[0] - top);
ctx.stroke();
ctx.closePath();
}
//put label
this._putlabel(area.aid);
this._puthint(area.aid);
}
};
/**
* Updates Area coordinates.
* Called when needed, eg. on mousemove, mousedown.
* Also updates html container value (thru hook).
* Calls callback onAreaChanged and onHtmlChanged so that GUI can follow.
* This is an important hook to your GUI.
* Uses globalscale to scale real coordinates to area coordinates.
* @date 2006.10.24. 22:39:27
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param id The id of the area.
*/
imgmap.prototype._updatecoords = function(id) {
var left = Math.round(parseInt(this.areas[id].style.left, 10) / this.globalscale);
var top = Math.round(parseInt(this.areas[id].style.top, 10) / this.globalscale);
var height = Math.round(parseInt(this.areas[id].style.height, 10) / this.globalscale);
var width = Math.round(parseInt(this.areas[id].style.width, 10) / this.globalscale);
var value = '';
if (this.areas[id].shape == 'rect') {
value = left + ',' + top + ',' + (left + width) + ',' + (top + height);
this.areas[id].lastInput = value;
}
else if (this.areas[id].shape == 'circle') {
var radius = Math.floor(width/2) - 1;
value = (left + radius) + ',' + (top + radius) + ',' + radius;
this.areas[id].lastInput = value;
}
else if (this.areas[id].shape == 'poly' || this.areas[id].shape == 'bezier1') {
if (this.areas[id].xpoints) {
for (var i=0, le = this.areas[id].xpoints.length; i<le; i++) {
value+= Math.round(this.areas[id].xpoints[i] / this.globalscale) + ',' +
Math.round(this.areas[id].ypoints[i] / this.globalscale) + ',';
}
value = value.substring(0, value.length - 1);
}
this.areas[id].lastInput = value;
}
this.fireEvent('onAreaChanged', this.areas[id]);
this.fireEvent('onHtmlChanged', this.getMapHTML());
};
/**
* Updates the visual representation of the area with the given id according
* to the new coordinates that typically come from an input on the GUI.
* Uses globalscale to scale area coordinates to real coordinates.
* @date 2006.10.24. 22:46:55
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @param id The id of the area.
* @param coords The new coords, they will be normalized.
*/
imgmap.prototype._recalculate = function(id, coords) {
try {
if (coords) {
coords = this._normCoords(coords, this.areas[id].shape, 'preserve');
}
else {
coords = this.areas[id].lastInput || '' ;
}
var parts = coords.split(',');
if (this.areas[id].shape == 'rect') {
if (parts.length != 4 ||
parseInt(parts[0], 10) > parseInt(parts[2], 10) ||
parseInt(parts[1], 10) > parseInt(parts[3], 10)) {throw "invalid coords";}
this.areas[id].style.left = this.globalscale * (this.pic.offsetLeft + parseInt(parts[0], 10)) + 'px';
this.areas[id].style.top = this.globalscale * (this.pic.offsetTop + parseInt(parts[1], 10)) + 'px';
this.setAreaSize(id, this.globalscale * (parts[2] - parts[0]), this.globalscale * (parts[3] - parts[1]));
this._repaint(this.areas[id], this.config.CL_NORM_SHAPE);
}
else if (this.areas[id].shape == 'circle') {
if (parts.length != 3 ||
parseInt(parts[2], 10) < 0) {throw "invalid coords";}
var width = 2 * (parts[2]);
//alert(parts[2]);
//alert(width);
this.setAreaSize(id, this.globalscale * width, this.globalscale * width);
this.areas[id].style.left = this.globalscale * (this.pic.offsetLeft + parseInt(parts[0], 10) - width/2) + 'px';
this.areas[id].style.top = this.globalscale * (this.pic.offsetTop + parseInt(parts[1], 10) - width/2) + 'px';
this._repaint(this.areas[id], this.config.CL_NORM_SHAPE);
}
else if (this.areas[id].shape == 'poly' || this.areas[id].shape == 'bezier1') {
if (parts.length < 2) {throw "invalid coords";}
this.areas[id].xpoints = [];
this.areas[id].ypoints = [];
for (var i=0, le = parts.length; i<le; i+=2) {
this.areas[id].xpoints[this.areas[id].xpoints.length] = this.globalscale * (this.pic.offsetLeft + parseInt(parts[i], 10));
this.areas[id].ypoints[this.areas[id].ypoints.length] = this.globalscale * (this.pic.offsetTop + parseInt(parts[i+1], 10));
this._polygongrow(this.areas[id], this.globalscale * parts[i], this.globalscale * parts[i+1]);
}
this._polygonshrink(this.areas[id]);//includes repaint
}
}
catch (err) {
var msg = (err.message) ? err.message : 'error calculating coordinates';
this.log(msg, 1);
this.statusMessage(this.strings.ERR_INVALID_COORDS);
if (this.areas[id].lastInput) {
this.fireEvent('onAreaChanged', this.areas[id]);
}
this._repaint(this.areas[id], this.config.CL_NORM_SHAPE);
return;
}
//on success update lastInput
this.areas[id].lastInput = coords;
};
/**
* Grow polygon area to be able to contain the given new coordinates.
* @author adam
* @param area The area to grow.
* @param newx The new coordinate x.
* @param newy The new coordinate y.
* @see #_polygonshrink
*/
imgmap.prototype._polygongrow = function(area, newx, newy) {
//this.log('pgrow');
var xdiff = newx - parseInt(area.style.left, 10);
var ydiff = newy - parseInt(area.style.top , 10);
var pad = 0;//padding on the edges
var pad2 = 0;//twice the padding
if (newx < parseInt(area.style.left, 10)) {
area.style.left = (newx - pad) + 'px';
this.setAreaSize(area.aid, parseInt(area.style.width, 10) + Math.abs(xdiff) + pad2, null);
}
else if (newx > parseInt(area.style.left, 10) + parseInt(area.style.width, 10)) {
this.setAreaSize(area.aid, newx - parseInt(area.style.left, 10) + pad2, null);
}
if (newy < parseInt(area.style.top, 10)) {
area.style.top = (newy - pad) + 'px';
this.setAreaSize(area.aid, null, parseInt(area.style.height, 10) + Math.abs(ydiff) + pad2);
}
else if (newy > parseInt(area.style.top, 10) + parseInt(area.style.height, 10)) {
this.setAreaSize(area.aid, null, newy - parseInt(area.style.top, 10) + pad2);
}
};
/**
* Shrink the polygon bounding area to the necessary size, by first reducing it
* to the minimum, and then gradually growing it.
* We need this because while we were drawing the polygon, it might have expanded
* the canvas more than needed.
* Will repaint the area.
* @author adam
* @param area The area to shrink.
* @see #_polygongrow
*/
imgmap.prototype._polygonshrink = function(area) {
//this.log('pshrink');
area.style.left = (area.xpoints[0]) + 'px';
area.style.top = (area.ypoints[0]) + 'px';
this.setAreaSize(area.aid, 0, 0);
for (var i=0, le = area.xpoints.length; i<le; i++) {
this._polygongrow(area, area.xpoints[i], area.ypoints[i]);
}
this._repaint(area, this.config.CL_NORM_SHAPE);
};
/**
* EVENT HANDLER: Handles mousemove on the image.
* This is the main drawing routine.
* Depending on the current shape, will draw the rect/circle/poly to the new position.
* @param e The event object.
*/
imgmap.prototype.img_mousemove = function(e) {
//function level var declarations
var x;
var y;
var xdiff;
var ydiff;
var diff;
if (this.viewmode === 1) {return;}//exit if preview mode
//event.x is relative to parent element, but page.x is NOT
//pos coordinates are the same absolute coords, offset coords are relative to parent
var pos = this._getPos(this.pic);
x = (this.isMSIE) ? (window.event.x - this.pic.offsetLeft) : (e.pageX - pos.x);
y = (this.isMSIE) ? (window.event.y - this.pic.offsetTop) : (e.pageY - pos.y);
x = x + this.pic_container.scrollLeft;
y = y + this.pic_container.scrollTop;
//this.log(x + ' - ' + y + ': ' + this.memory[this.currentid].downx + ' - ' +this.memory[this.currentid].downy);
//exit if outside image
if (x<0 || y<0 || x>this.pic.width || y>this.pic.height) {return;}
//old dimensions that need to be updated in this function
if (this.memory[this.currentid]) {
var top = this.memory[this.currentid].top;
var left = this.memory[this.currentid].left;
var height = this.memory[this.currentid].height;
var width = this.memory[this.currentid].width;
}
// Handle shift state for Safari
// Safari doesn't generate keyboard events for modifiers: http://bugs.webkit.org/show_bug.cgi?id=11696
if (this.isSafari) {
if (e.shiftKey) {
if (this.is_drawing == this.DM_RECTANGLE_DRAW) {
this.is_drawing = this.DM_SQUARE_DRAW;
this.statusMessage(this.strings.SQUARE2_DRAW);
}
}
else {
if (this.is_drawing == this.DM_SQUARE_DRAW && this.areas[this.currentid].shape == 'rect') {
//not for circle!
this.is_drawing = this.DM_RECTANGLE_DRAW;
this.statusMessage(this.strings.RECTANGLE_DRAW);
}
}
}
if (this.is_drawing == this.DM_RECTANGLE_DRAW) {
//rectangle mode
this.fireEvent('onDrawArea', this.currentid);
xdiff = x - this.memory[this.currentid].downx;
ydiff = y - this.memory[this.currentid].downy;
//alert(xdiff);
this.setAreaSize(this.currentid, Math.abs(xdiff), Math.abs(ydiff));
if (xdiff < 0) {
this.areas[this.currentid].style.left = (x + 1) + 'px';
}
if (ydiff < 0) {
this.areas[this.currentid].style.top = (y + 1) + 'px';
}
}
else if (this.is_drawing == this.DM_SQUARE_DRAW) {
//square mode - align to shorter side
this.fireEvent('onDrawArea', this.currentid);
xdiff = x - this.memory[this.currentid].downx;
ydiff = y - this.memory[this.currentid].downy;
if (Math.abs(xdiff) < Math.abs(ydiff)) {
diff = Math.abs(parseInt(xdiff, 10));
}
else {
diff = Math.abs(parseInt(ydiff, 10));
}
//alert(xdiff);
this.setAreaSize(this.currentid, diff, diff);
if (xdiff < 0) {
this.areas[this.currentid].style.left = (this.memory[this.currentid].downx + diff*-1) + 'px';
}
if (ydiff < 0) {
this.areas[this.currentid].style.top = (this.memory[this.currentid].downy + diff*-1 + 1) + 'px';
}
}
else if (this.is_drawing == this.DM_POLYGON_DRAW || this.is_drawing == this.DM_BEZIER_DRAW) {
//polygon or bezier mode
this.fireEvent('onDrawArea', this.currentid);
this._polygongrow(this.areas[this.currentid], x, y);
}
else if (this.is_drawing == this.DM_RECTANGLE_MOVE || this.is_drawing == this.DM_SQUARE_MOVE) {
this.fireEvent('onMoveArea', this.currentid);
x = x - this.memory[this.currentid].rdownx;
y = y - this.memory[this.currentid].rdowny;
if (x + width > this.pic.width || y + height > this.pic.height) {return;}
if (x < 0 || y < 0) {return;}
//this.log(x + ' - '+width+ '+'+this.memory[this.currentid].rdownx +'='+xdiff );
this.areas[this.currentid].style.left = x + 1 + 'px';
this.areas[this.currentid].style.top = y + 1 + 'px';
}
else if (this.is_drawing == this.DM_POLYGON_MOVE || this.is_drawing == this.DM_BEZIER_MOVE) {
this.fireEvent('onMoveArea', this.currentid);
x = x - this.memory[this.currentid].rdownx;
y = y - this.memory[this.currentid].rdowny;
if (x + width > this.pic.width || y + height > this.pic.height) {return;}
if (x < 0 || y < 0) {return;}
xdiff = x - left;
ydiff = y - top;
if (this.areas[this.currentid].xpoints) {
for (var i=0, le = this.areas[this.currentid].xpoints.length; i<le; i++) {
this.areas[this.currentid].xpoints[i] = this.memory[this.currentid].xpoints[i] + xdiff;
this.areas[this.currentid].ypoints[i] = this.memory[this.currentid].ypoints[i] + ydiff;
}
}
this.areas[this.currentid].style.left = x + 'px';
this.areas[this.currentid].style.top = y + 'px';
}
else if (this.is_drawing == this.DM_SQUARE_RESIZE_LEFT) {
this.fireEvent('onResizeArea', this.currentid);
diff = x - left;
//alert(diff);
if ((width + (-1 * diff)) > 0) {
//real resize left
this.areas[this.currentid].style.left = x + 1 + 'px';
this.areas[this.currentid].style.top = (top + (diff/2)) + 'px';
this.setAreaSize(this.currentid, parseInt(width + (-1 * diff), 10), parseInt(height + (-1 * diff), 10));
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].height = 0;
this.memory[this.currentid].left = x;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_SQUARE_RESIZE_RIGHT;
}
}
else if (this.is_drawing == this.DM_SQUARE_RESIZE_RIGHT) {
this.fireEvent('onResizeArea', this.currentid);
diff = x - left - width;
if ((width + (diff)) - 1 > 0) {
//real resize right
this.areas[this.currentid].style.top = (top + (-1* diff/2)) + 'px';
this.setAreaSize(this.currentid, (width + (diff)) - 1, (height + (diff)));
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].height = 0;
this.memory[this.currentid].left = x;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_SQUARE_RESIZE_LEFT;
}
}
else if (this.is_drawing == this.DM_SQUARE_RESIZE_TOP) {
this.fireEvent('onResizeArea', this.currentid);
diff = y - top;
if ((width + (-1 * diff)) > 0) {
//real resize top
this.areas[this.currentid].style.top = y + 1 + 'px';
this.areas[this.currentid].style.left = (left + (diff/2)) + 'px';
this.setAreaSize(this.currentid, (width + (-1 * diff)), (height + (-1 * diff)));
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].height = 0;
this.memory[this.currentid].left = x;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_SQUARE_RESIZE_BOTTOM;
}
}
else if (this.is_drawing == this.DM_SQUARE_RESIZE_BOTTOM) {
this.fireEvent('onResizeArea', this.currentid);
diff = y - top - height;
if ((width + (diff)) - 1 > 0) {
//real resize bottom
this.areas[this.currentid].style.left = (left + (-1* diff/2)) + 'px';
this.setAreaSize(this.currentid, (width + (diff)) - 1 , (height + (diff)) - 1);
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].height = 0;
this.memory[this.currentid].left = x;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_SQUARE_RESIZE_TOP;
}
}
else if (this.is_drawing == this.DM_RECTANGLE_RESIZE_LEFT) {
this.fireEvent('onResizeArea', this.currentid);
xdiff = x - left;
if (width + (-1 * xdiff) > 0) {
//real resize left
this.areas[this.currentid].style.left = x + 1 + 'px';
this.setAreaSize(this.currentid, width + (-1 * xdiff), null);
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].left = x;
this.is_drawing = this.DM_RECTANGLE_RESIZE_RIGHT;
}
}
else if (this.is_drawing == this.DM_RECTANGLE_RESIZE_RIGHT) {
this.fireEvent('onResizeArea', this.currentid);
xdiff = x - left - width;
if ((width + (xdiff)) - 1 > 0) {
//real resize right
this.setAreaSize(this.currentid, (width + (xdiff)) - 1, null);
}
else {
//jump to another state
this.memory[this.currentid].width = 0;
this.memory[this.currentid].left = x;
this.is_drawing = this.DM_RECTANGLE_RESIZE_LEFT;
}
}
else if (this.is_drawing == this.DM_RECTANGLE_RESIZE_TOP) {
this.fireEvent('onResizeArea', this.currentid);
ydiff = y - top;
if ((height + (-1 * ydiff)) > 0) {
//real resize top
this.areas[this.currentid].style.top = y + 1 + 'px';
this.setAreaSize(this.currentid, null, (height + (-1 * ydiff)));
}
else {
//jump to another state
this.memory[this.currentid].height = 0;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_RECTANGLE_RESIZE_BOTTOM;
}
}
else if (this.is_drawing == this.DM_RECTANGLE_RESIZE_BOTTOM) {
this.fireEvent('onResizeArea', this.currentid);
ydiff = y - top - height;
if ((height + (ydiff)) - 1 > 0) {
//real resize bottom
this.setAreaSize(this.currentid, null, (height + (ydiff)) - 1);
}
else {
//jump to another state
this.memory[this.currentid].height = 0;
this.memory[this.currentid].top = y;
this.is_drawing = this.DM_RECTANGLE_RESIZE_TOP;
}
}
//repaint canvas elements
if (this.is_drawing) {
this._repaint(this.areas[this.currentid], this.config.CL_DRAW_SHAPE, x, y);
this._updatecoords(this.currentid);
}
};
/**
* EVENT HANDLER: Handles mouseup on the image.
* Handles dragging and resizing.
* @param e The event object.
*/
imgmap.prototype.img_mouseup = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
//console.log('img_mouseup');
//if (!this.props[this.currentid]) return;
var pos = this._getPos(this.pic);
var x = (this.isMSIE) ? (window.event.x - this.pic.offsetLeft) : (e.pageX - pos.x);
var y = (this.isMSIE) ? (window.event.y - this.pic.offsetTop) : (e.pageY - pos.y);
x = x + this.pic_container.scrollLeft;
y = y + this.pic_container.scrollTop;
//for everything that is move or resize
if (this.is_drawing != this.DM_RECTANGLE_DRAW &&
this.is_drawing != this.DM_SQUARE_DRAW &&
this.is_drawing != this.DM_POLYGON_DRAW &&
this.is_drawing != this.DM_POLYGON_LASTDRAW &&
this.is_drawing != this.DM_BEZIER_DRAW &&
this.is_drawing != this.DM_BEZIER_LASTDRAW) {
//end dragging
this.draggedId = null;
//finish state
this.is_drawing = 0;
this.statusMessage(this.strings.READY);
this.relaxArea(this.currentid);
if (this.areas[this.currentid] == this._getLastArea()) {
//if (this.config.mode != "editor2") this.addNewArea();
return;
}
this.memory[this.currentid].downx = x;
this.memory[this.currentid].downy = y;
}
};
/**
* EVENT HANDLER: Handles mousedown on the image.
* Handles beggining or end of draw, or polygon/bezier point set.
* @param e The event object.
*/
imgmap.prototype.img_mousedown = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
if (!this.areas[this.currentid] && this.config.mode != "editor2") {return;}
//console.log('img_mousedown');
var pos = this._getPos(this.pic);
var x = (this.isMSIE) ? (window.event.x - this.pic.offsetLeft) : (e.pageX - pos.x);
var y = (this.isMSIE) ? (window.event.y - this.pic.offsetTop) : (e.pageY - pos.y);
x = x + this.pic_container.scrollLeft;
y = y + this.pic_container.scrollTop;
// Handle the Shift state
if (!e) {
e = window.event;
}
if (e.shiftKey) {
if (this.is_drawing == this.DM_POLYGON_DRAW) {
this.is_drawing = this.DM_POLYGON_LASTDRAW;
}
else if (this.is_drawing == this.DM_BEZIER_DRAW) {
this.is_drawing = this.DM_BEZIER_LASTDRAW;
}
}
//console.log(this.is_drawing);
//this.statusMessage(x + ' - ' + y + ': ' + this.props[this.currentid].getElementsByTagName('select')[0].value);
if (this.is_drawing == this.DM_POLYGON_DRAW || this.is_drawing == this.DM_BEZIER_DRAW) {
//its not finish state yet
this.areas[this.currentid].xpoints[this.areas[this.currentid].xpoints.length] = x - 5;
this.areas[this.currentid].ypoints[this.areas[this.currentid].ypoints.length] = y - 5;
this.memory[this.currentid].downx = x;
this.memory[this.currentid].downy = y;
return;
}
else if (this.is_drawing && this.is_drawing != this.DM_POLYGON_DRAW && this.is_drawing != this.DM_BEZIER_DRAW) {
//finish any other state
if (this.is_drawing == this.DM_POLYGON_LASTDRAW || this.is_drawing == this.DM_BEZIER_LASTDRAW) {
//add last controlpoint and update coords
this.areas[this.currentid].xpoints[this.areas[this.currentid].xpoints.length] = x - 5;
this.areas[this.currentid].ypoints[this.areas[this.currentid].ypoints.length] = y - 5;
this._updatecoords(this.currentid);
this.is_drawing = 0;
this._polygonshrink(this.areas[this.currentid]);
}
this.is_drawing = 0;
this.statusMessage(this.strings.READY);
this.relaxArea(this.currentid);
if (this.areas[this.currentid] == this._getLastArea()) {
//editor mode adds next area automatically
if (this.config.mode != "editor2") {this.addNewArea();}
return;
}
return;
}
if (this.config.mode == "editor2") {
if (!this.nextShape) {return;}
this.addNewArea();
//console.log("init: " + this.nextShape);
this.initArea(this.currentid, this.nextShape);
}
else if (this.areas[this.currentid].shape == 'undefined' || this.areas[this.currentid].shape == 'poly') {
//var shape = (this.props[this.currentid]) ? this.props[this.currentid].getElementsByTagName('select')[0].value : this.nextShape;
var shape = this.nextShape;
if (!shape) {shape = 'rect';}
//console.log("init: " + shape);
this.initArea(this.currentid, shape);
}
if (this.areas[this.currentid].shape == 'poly') {
this.is_drawing = this.DM_POLYGON_DRAW;
this.statusMessage(this.strings.POLYGON_DRAW);
this.areas[this.currentid].style.left = x + 'px';
this.areas[this.currentid].style.top = y + 'px';
this.areas[this.currentid].style.width = 0;
this.areas[this.currentid].style.height = 0;
this.areas[this.currentid].xpoints = [];
this.areas[this.currentid].ypoints = [];
this.areas[this.currentid].xpoints[0] = x;
this.areas[this.currentid].ypoints[0] = y;
}
else if (this.areas[this.currentid].shape == 'bezier1') {
this.is_drawing = this.DM_BEZIER_DRAW;
this.statusMessage(this.strings.BEZIER_DRAW);
this.areas[this.currentid].style.left = x + 'px';
this.areas[this.currentid].style.top = y + 'px';
this.areas[this.currentid].style.width = 0;
this.areas[this.currentid].style.height = 0;
this.areas[this.currentid].xpoints = [];
this.areas[this.currentid].ypoints = [];
this.areas[this.currentid].xpoints[0] = x;
this.areas[this.currentid].ypoints[0] = y;
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_DRAW;
this.statusMessage(this.strings.RECTANGLE_DRAW);
this.areas[this.currentid].style.left = x + 'px';
this.areas[this.currentid].style.top = y + 'px';
this.areas[this.currentid].style.width = 0;
this.areas[this.currentid].style.height = 0;
}
else if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_DRAW;
this.statusMessage(this.strings.SQUARE_DRAW);
this.areas[this.currentid].style.left = x + 'px';
this.areas[this.currentid].style.top = y + 'px';
this.areas[this.currentid].style.width = 0;
this.areas[this.currentid].style.height = 0;
}
this._setBorder(this.currentid, 'DRAW');
this.memory[this.currentid].downx = x;
this.memory[this.currentid].downy = y;
};
/**
* Highlights a given area.
* Sets opacity and repaints.
* @date 2007.12.28. 18:23:00
* @param id The id of the area to blur.
* @param flag Modifier, possible values: grad - for gradual fade in
*/
imgmap.prototype.highlightArea = function(id, flag) {
if (this.is_drawing) {return;}//exit if in drawing state
if (this.areas[id] && this.areas[id].shape != 'undefined') {
//area exists - highlight it
this.fireEvent('onFocusArea', this.areas[id]);
this._setBorder(id, 'HIGHLIGHT');
var opacity = this.config.highlight_opacity;
if (flag == 'grad') {
//apply gradient opacity
opacity = '-' + opacity;
}
this._setopacity(this.areas[id], this.config.CL_HIGHLIGHT_BG, opacity);
this._repaint(this.areas[id], this.config.CL_HIGHLIGHT_SHAPE);
}
};
/**
* Blurs a given area.
* Sets opacity and repaints.
* @date 2007.12.28. 18:23:26
* @param id The id of the area to blur.
* @param flag Modifier, possible values: grad - for gradual fade out
*/
imgmap.prototype.blurArea = function(id, flag) {
if (this.is_drawing) {return;}//exit if in drawing state
if (this.areas[id] && this.areas[id].shape != 'undefined') {
//area exists - fade it back
this.fireEvent('onBlurArea', this.areas[id]);
this._setBorder(id, 'NORM');
var opacity = this.config.norm_opacity;
if (flag == 'grad') {
//apply gradient opacity
opacity = '-' + opacity;
}
this._setopacity(this.areas[id], this.config.CL_NORM_BG, opacity);
this._repaint(this.areas[id], this.config.CL_NORM_SHAPE);
}
};
/**
* EVENT HANDLER: Handles event of mousemove on imgmap areas.
* - changes cursor depending where we are inside the area (buggy in opera)
* - handles area resize
* - handles area move
* @url http://evolt.org/article/Mission_Impossible_mouse_position/17/23335/index.html
* @url http://my.opera.com/community/forums/topic.dml?id=239498&t=1217158015&page=1
* @author adam
* @param e The event object.
*/
imgmap.prototype.area_mousemove = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
if (!this.is_drawing) {
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
//opera fix - adam - 04-12-2007 23:14:05
if (this.isOpera) {
e.layerX = e.offsetX;
e.layerY = e.offsetY;
}
var xdiff = (this.isMSIE) ? (window.event.offsetX) : (e.layerX);
var ydiff = (this.isMSIE) ? (window.event.offsetY) : (e.layerY);
//this.log(obj.aid + ' : ' + xdiff + ',' + ydiff);
var resizable = (obj.shape == 'rect' || obj.shape == 'circle');
if (resizable && xdiff < 6 && ydiff > 6) {
//move left
obj.style.cursor = 'w-resize';
}
else if (resizable && xdiff > parseInt(obj.style.width, 10) - 6 && ydiff > 6) {
//move right
obj.style.cursor = 'e-resize';
}
else if (resizable && xdiff > 6 && ydiff < 6) {
//move top
obj.style.cursor = 'n-resize';
}
else if (resizable && ydiff > parseInt(obj.style.height, 10) - 6 && xdiff > 6) {
//move bottom
obj.style.cursor = 's-resize';
}
else {
//move all
obj.style.cursor = 'move';
}
if (obj.aid != this.draggedId) {
//not dragged or different
if (obj.style.cursor == 'move') {obj.style.cursor = 'default';}
return;
}
//moved here from mousedown
if (xdiff < 6 && ydiff > 6) {
//move left
if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_RESIZE_LEFT;
this.statusMessage(this.strings.SQUARE_RESIZE_LEFT);
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_RESIZE_LEFT;
this.statusMessage(this.strings.RECTANGLE_RESIZE_LEFT);
}
}
else if (xdiff > parseInt(this.areas[this.currentid].style.width, 10) - 6 && ydiff > 6) {
//move right
if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_RESIZE_RIGHT;
this.statusMessage(this.strings.SQUARE_RESIZE_RIGHT);
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_RESIZE_RIGHT;
this.statusMessage(this.strings.RECTANGLE_RESIZE_RIGHT);
}
}
else if (xdiff > 6 && ydiff < 6) {
//move top
if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_RESIZE_TOP;
this.statusMessage(this.strings.SQUARE_RESIZE_TOP);
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_RESIZE_TOP;
this.statusMessage(this.strings.RECTANGLE_RESIZE_TOP);
}
}
else if (ydiff > parseInt(this.areas[this.currentid].style.height, 10) - 6 && xdiff > 6) {
//move bottom
if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_RESIZE_BOTTOM;
this.statusMessage(this.strings.SQUARE_RESIZE_BOTTOM);
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_RESIZE_BOTTOM;
this.statusMessage(this.strings.RECTANGLE_RESIZE_BOTTOM);
}
}
else/*if (xdiff < 10 && ydiff < 10 ) */{
//move all
if (this.areas[this.currentid].shape == 'circle') {
this.is_drawing = this.DM_SQUARE_MOVE;
this.statusMessage(this.strings.SQUARE_MOVE);
this.memory[this.currentid].rdownx = xdiff;
this.memory[this.currentid].rdowny = ydiff;
}
else if (this.areas[this.currentid].shape == 'rect') {
this.is_drawing = this.DM_RECTANGLE_MOVE;
this.statusMessage(this.strings.RECTANGLE_MOVE);
this.memory[this.currentid].rdownx = xdiff;
this.memory[this.currentid].rdowny = ydiff;
}
else if (this.areas[this.currentid].shape == 'poly' || this.areas[this.currentid].shape == 'bezier1') {
if (this.areas[this.currentid].xpoints) {
for (var i=0, le = this.areas[this.currentid].xpoints.length; i<le; i++) {
this.memory[this.currentid].xpoints[i] = this.areas[this.currentid].xpoints[i];
this.memory[this.currentid].ypoints[i] = this.areas[this.currentid].ypoints[i];
}
}
if (this.areas[this.currentid].shape == 'poly') {
this.is_drawing = this.DM_POLYGON_MOVE;
this.statusMessage(this.strings.POLYGON_MOVE);
}
else if (this.areas[this.currentid].shape == 'bezier1') {
this.is_drawing = this.DM_BEZIER_MOVE;
this.statusMessage(this.strings.BEZIER_MOVE);
}
this.memory[this.currentid].rdownx = xdiff;
this.memory[this.currentid].rdowny = ydiff;
}
}
//common memory settings (preparing to move or resize)
this.memory[this.currentid].width = parseInt(this.areas[this.currentid].style.width, 10);
this.memory[this.currentid].height = parseInt(this.areas[this.currentid].style.height, 10);
this.memory[this.currentid].top = parseInt(this.areas[this.currentid].style.top, 10);
this.memory[this.currentid].left = parseInt(this.areas[this.currentid].style.left, 10);
this._setBorder(this.currentid, 'DRAW');
this._setopacity(this.areas[this.currentid], this.config.CL_DRAW_BG, this.config.draw_opacity);
}
else {
//if drawing and not ie, have to propagate to image event
this.img_mousemove(e);
}
};
/**
* EVENT HANDLER: Handles event of mouseup on imgmap areas.
* Basically clears draggedId.
* @author adam
* @param e The event object
*/
imgmap.prototype.area_mouseup = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
//console.log('area_mouseup');
if (!this.is_drawing) {
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
if (this.areas[this.currentid] != obj) {
//trying to draw on a different canvas,switch to this one
if (typeof obj.aid == 'undefined') {
this.log('Cannot identify target area', 1);
return;
}
//this.form_selectRow(obj.aid, true);
//this.currentid = obj.aid;
}
this.draggedId = null;
}
else {
//if drawing and not ie, have to propagate to image event
//console.log('propup');
this.img_mouseup(e);
}
};
/**
* EVENT HANDLER: Handles event of mouseover on imgmap areas.
* Calls gradual highlight on the given area.
* @author adam
* @param e The event object
*/
imgmap.prototype.area_mouseover = function(e) {
if (this.viewmode === 1 && this.config.mode !== 'highlighter_spawn') {return;}//exit if preview mode
if (!this.is_drawing) {
//this.log('area_mouseover');
//identify source object
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
/*
//switch to hovered area
if (this.areas[this.currentid] != obj) {
//trying to draw on a different canvas, switch to this one
if (typeof obj.aid == 'undefined') {
this.log('Cannot identify target area', 1);
return;
}
this.form_selectRow(obj.aid, true);
this.currentid = obj.aid;
}
*/
this.highlightArea(obj.aid, 'grad');
}
};
/**
* EVENT HANDLER: Handles event of mouseout on imgmap areas.
* Calls gradient blur on the given area.
* @author adam
* @param e The event object
*/
imgmap.prototype.area_mouseout = function(e) {
if (this.viewmode === 1 && this.config.mode !== 'highlighter_spawn') {return;}//exit if preview mode
if (!this.is_drawing) {
//this.log('area_mouseout');
//identify source object
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
this.blurArea(obj.aid, 'grad');
}
};
/**
* EVENT HANDLER: Handles event of double click on imgmap areas.
* Basically only fires the custom callback.
* @author Colin Bell
* @param e The event object
*/
imgmap.prototype.area_dblclick = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
//console.log('area_dblclick');
if (!this.is_drawing) {
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
if (this.areas[this.currentid] != obj) {
//trying to draw on a different canvas, switch to this one
if (typeof obj.aid == 'undefined') {
this.log('Cannot identify target area', 1);
return;
}
this.currentid = obj.aid;
}
this.fireEvent('onDblClickArea', this.areas[this.currentid]);
//stop event propagation to document level
if (this.isMSIE) {
window.event.cancelBubble = true;
}
else {
e.stopPropagation();
}
}
};
/**
* EVENT HANDLER: Handles event of mousedown on imgmap areas.
* Sets the variables draggedid, selectedid and currentid to the given area.
* @author adam
* @param e The event object
*/
imgmap.prototype.area_mousedown = function(e) {
if (this.viewmode === 1 && this.config.mode !== 'highlighter_spawn') {return;}//exit if preview mode
//console.log('area_mousedown');
if (!this.is_drawing) {
var obj = (this.isMSIE) ? window.event.srcElement : e.currentTarget;
if (obj.tagName == 'DIV') {
//do this because of label
obj = obj.parentNode;
}
if (obj.tagName == 'image' || obj.tagName == 'group' ||
obj.tagName == 'shape' || obj.tagName == 'stroke') {
//do this because of excanvas
obj = obj.parentNode.parentNode;
}
if (this.areas[this.currentid] != obj) {
//trying to draw on a different canvas, switch to this one
if (typeof obj.aid == 'undefined') {
this.log('Cannot identify target area', 1);
return;
}
this.currentid = obj.aid;
}
//this.log('selected = '+this.currentid);
this.draggedId = this.currentid;
this.selectedId = this.currentid;
this.fireEvent('onSelectArea', this.areas[this.currentid]);
//stop event propagation to document level
if (this.isMSIE) {
window.event.cancelBubble = true;
}
else {
e.stopPropagation();
}
}
else {
//if drawing and not ie, have to propagate to image event
//console.log('propdown');
this.img_mousedown(e);
}
};
/**
* EVENT HANDLER: Handles event 'keydown' on document.
* Handles SHIFT hold while drawing.
* Note: Safari doesn't generate keyboard events for modifiers:
* @url http://bugs.webkit.org/show_bug.cgi?id=11696
* @author adam
* @param e The event object
*/
imgmap.prototype.doc_keydown = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
var key = (this.isMSIE) ? event.keyCode : e.keyCode;
//console.log(key);
if (key == 46) {
//delete key pressed
if (this.selectedId !== null && !this.is_drawing) {this.removeArea(this.selectedId);}
}
else if (key == 16) {
//shift key pressed
if (this.is_drawing == this.DM_RECTANGLE_DRAW) {
this.is_drawing = this.DM_SQUARE_DRAW;
this.statusMessage(this.strings.SQUARE2_DRAW);
}
}
};
/**
* EVENT HANDLER: Handles event 'keyup' on document.
* Handles SHIFT release while drawing.
* @author adam
* @param e The event object
*/
imgmap.prototype.doc_keyup = function(e) {
var key = (this.isMSIE) ? event.keyCode : e.keyCode;
//alert(key);
if (key == 16) {
//shift key released
if (this.is_drawing == this.DM_SQUARE_DRAW && this.areas[this.currentid].shape == 'rect') {
//not for circle!
this.is_drawing = this.DM_RECTANGLE_DRAW;
this.statusMessage(this.strings.RECTANGLE_DRAW);
}
}
};
/**
* EVENT HANDLER: Handles event 'mousedown' on document.
* @author adam
* @param e The event object
*/
imgmap.prototype.doc_mousedown = function(e) {
if (this.viewmode === 1) {return;}//exit if preview mode
if (!this.is_drawing) {
this.selectedId = null;
}
};
/**
* Get the real position of the element.
* Deal with browser differences when trying to get the position of an area.
* @param element The element you want the position of.
* @return An object with x and y members.
*/
imgmap.prototype._getPos = function(element) {
var xpos = 0;
var ypos = 0;
if (element) {
var elementOffsetParent = element.offsetParent;
// If the element has an offset parent
if (elementOffsetParent) {
// While there is an offset parent
while ((elementOffsetParent = element.offsetParent)) {
//offset might give negative in opera when the image is scrolled
if (element.offsetLeft > 0) {xpos += element.offsetLeft;}
if (element.offsetTop > 0) {ypos += element.offsetTop;}
element = elementOffsetParent;
}
}
else {
xpos = element.offsetLeft;
ypos = element.offsetTop;
}
}
return {x: xpos, y: ypos};
};
/**
* Gets the last (visible and editable) area.
* @author Adam Maschek (adam.maschek(at)gmail.com)
* @date 2006-06-15 16:34:51
* @returns The last area object or null.
*/
imgmap.prototype._getLastArea = function() {
for (var i = this.areas.length-1; i>=0; i--) {
if (this.areas[i]) {
return this.areas[i];
}
}
return null;
};
/**
* Parses cssText to single style declarations.
* @author adam
* @date 25-09-2007 18:19:51
* @param obj The DOM object to apply styles on.
* @param cssText The css declarations to apply.
*/
imgmap.prototype.assignCSS = function(obj, cssText) {
var parts = cssText.split(';');
for (var i = 0; i < parts.length; i++) {
var p = parts[i].split(':');
//we need to camelcase by - signs
var pp = this.trim(p[0]).split('-');
var prop = pp[0];
for (var j = 1; j < pp.length; j++) {
//replace first letters to uppercase
prop+= pp[j].replace(/^\w/, pp[j].substring(0,1).toUpperCase());
}
obj.style[this.trim(prop)] = this.trim(p[1]);
}
};
/**
* To fire callback hooks on custom events, passing them the object of the event.
* @author adam
* @date 13-10-2007 15:24:49
* @param evt The type of event
* @param obj The object of the event. (can be an id, a string, an object, whatever is most relevant)
*/
imgmap.prototype.fireEvent = function(evt, obj) {
//this.log("Firing event: " + evt);
if (typeof this.config.custom_callbacks[evt] == 'function') {
return this.config.custom_callbacks[evt](obj);
}
};
/**
* To set area dimensions.
* This is needed to achieve the same result in all browsers.
* @author adam
* @date 10-12-2007 22:29:41
* @param id The id of the area (canvas) to resize.
* @param w The desired width in pixels.
* @param h The desired height in pixels.
*/
imgmap.prototype.setAreaSize = function(id, w, h) {
if (id === null) {id = this.currentid;}
if (w !== null) {
this.areas[id].width = w;
this.areas[id].style.width = (w) + 'px';
this.areas[id].setAttribute('width', w);
}
if (h !== null) {
this.areas[id].height = h;
this.areas[id].style.height = (h) + 'px';
this.areas[id].setAttribute('height', h);
}
};
/**
* Tries to detect preferred language of user.
* @date 2007.12.28. 15:43:46
* @return The two byte language code. (We dont care now for pt-br, etc.)
*/
imgmap.prototype.detectLanguage = function() {
var lang;
if (navigator.userLanguage) {
lang = navigator.userLanguage.toLowerCase();
}
else if (navigator.language) {
lang = navigator.language.toLowerCase();
}
else {
return this.config.defaultLang;
}
//this.log(lang, 2);
if (lang.length >= 2) {
lang = lang.substring(0,2);
return lang;
}
return this.config.defaultLang;
};
/**
* Disable selection on a given object.
* This is especially useful in Safari, where dragging around areas
* keeps selecting all sorts of things.
* @author Bret Taylor
* @url http://ajaxcookbook.org/disable-text-selection/
* @date 27-07-2008 1:57:45
* @param element The DOM element on which you want to disable selection.
*/
imgmap.prototype.disableSelection = function(element) {
if (typeof element == 'undefined' || !element) {return false;}
if (typeof element.onselectstart != "undefined") {
element.onselectstart = function() {
return false;
};
}
if (typeof element.unselectable != "undefined") {
element.unselectable = "on";
}
if (typeof element.style.MozUserSelect != "undefined") {
element.style.MozUserSelect = "none";
}
};
/**
* @date 11-02-2007 19:57:05
* @url http://www.deepwood.net/writing/method-references.html.utf8
* @author Daniel Brockman
* @addon
*/
Function.prototype.bind = function(object) {
var method = this;
return function () {
return method.apply(object, arguments);
};
};
/**
* Trims a string.
* Changed not to extend String but use own function for better compatibility.
* @param str The string to trim.
*/
imgmap.prototype.trim = function(str) {
return str.replace(/^\s+|\s+$/g, '');
};
/**
* Spawn an imgmap object for each imagemap found in the document.
* This is used for highlighter mode only.
* @param config An imgmap config object
*/
function imgmap_spawnObjects(config) {
//console.log('spawnobjects');
var maps = document.getElementsByTagName('map');
var imgs = document.getElementsByTagName('img');
var imaps = [];
var imapn;
//console.log(maps.length);
for (var i=0, le=maps.length; i<le; i++) {
for (var j=0, le2=imgs.length; j<le2; j++) {
//console.log(i);
// console.log(maps[i].name);
// console.log(imgs[j].getAttribute('usemap'));
if ('#' + maps[i].name == imgs[j].getAttribute('usemap')) {
//we found one matching pair
// console.log(maps[i]);
config.mode = 'highlighter_spawn';
imapn = new imgmap(config);
//imapn.setup(config);
imapn.useImage(imgs[j]);
imapn.setMapHTML(maps[i]);
imapn.viewmode = 1;
imaps.push(imapn);
}
}
}
}
//global instance?
//imgmap_spawnObjects();? | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/jscripts/imgmap.js | imgmap.js |
function imgmap(_1){this.version="2.2";this.buildDate="2009/07/26 16:02";this.buildNumber="106";this.config={};this.is_drawing=0;this.strings=[];this.memory=[];this.areas=[];this.logStore=[];this.eventHandlers={};this.currentid=0;this.draggedId=null;this.selectedId=null;this.nextShape="rect";this.viewmode=0;this.loadedScripts=[];this.isLoaded=false;this.cntReloads=0;this.mapname="";this.mapid="";this.waterMark="<!-- Created by Online Image Map Editor (http://www.maschek.hu/imagemap/index) -->";this.globalscale=1;this.DM_RECTANGLE_DRAW=1;this.DM_RECTANGLE_MOVE=11;this.DM_RECTANGLE_RESIZE_TOP=12;this.DM_RECTANGLE_RESIZE_RIGHT=13;this.DM_RECTANGLE_RESIZE_BOTTOM=14;this.DM_RECTANGLE_RESIZE_LEFT=15;this.DM_SQUARE_DRAW=2;this.DM_SQUARE_MOVE=21;this.DM_SQUARE_RESIZE_TOP=22;this.DM_SQUARE_RESIZE_RIGHT=23;this.DM_SQUARE_RESIZE_BOTTOM=24;this.DM_SQUARE_RESIZE_LEFT=25;this.DM_POLYGON_DRAW=3;this.DM_POLYGON_LASTDRAW=30;this.DM_POLYGON_MOVE=31;this.DM_BEZIER_DRAW=4;this.DM_BEZIER_LASTDRAW=40;this.DM_BEZIER_MOVE=41;this.config.mode="editor";this.config.baseroot="";this.config.lang="";this.config.defaultLang="en";this.config.loglevel=0;this.config.custom_callbacks={};this.event_types=["onModeChanged","onHtmlChanged","onAddArea","onRemoveArea","onDrawArea","onResizeArea","onRelaxArea","onFocusArea","onBlurArea","onMoveArea","onSelectRow","onLoadImage","onSetMap","onGetMap","onSelectArea","onDblClickArea","onStatusMessage","onAreaChanged"];this.config.CL_DRAW_BOX="#E32636";this.config.CL_DRAW_SHAPE="#d00";this.config.CL_DRAW_BG="#fff";this.config.CL_NORM_BOX="#E32636";this.config.CL_NORM_SHAPE="#d00";this.config.CL_NORM_BG="#fff";this.config.CL_HIGHLIGHT_BOX="#E32636";this.config.CL_HIGHLIGHT_SHAPE="#d00";this.config.CL_HIGHLIGHT_BG="#fff";this.config.CL_KNOB="#555";this.config.bounding_box=true;this.config.label="%n";this.config.label_class="imgmap_label";this.config.label_style="font: bold 10px Arial";this.config.hint="#%n %h";this.config.draw_opacity="35";this.config.norm_opacity="50";this.config.highlight_opacity="70";this.config.cursor_default="crosshair";var ua=navigator.userAgent;this.isMSIE=(navigator.appName=="Microsoft Internet Explorer");this.isMSIE5=this.isMSIE&&(ua.indexOf("MSIE 5")!=-1);this.isMSIE5_0=this.isMSIE&&(ua.indexOf("MSIE 5.0")!=-1);this.isMSIE7=this.isMSIE&&(ua.indexOf("MSIE 7")!=-1);this.isGecko=ua.indexOf("Gecko")!=-1;this.isSafari=ua.indexOf("Safari")!=-1;this.isOpera=(typeof window.opera!="undefined");this.setup(_1);}
imgmap.prototype.assignOID=function(_3){try{if(typeof _3=="undefined"){this.log("Undefined object passed to assignOID.");return null;}else{if(typeof _3=="object"){return _3;}else{if(typeof _3=="string"){return document.getElementById(_3);}}}}
catch(err){this.log("Error in assignOID",1);}
return null;};imgmap.prototype.setup=function(_4){for(var i in _4){if(_4.hasOwnProperty(i)){this.config[i]=_4[i];}}
this.addEvent(document,"keydown",this.eventHandlers.doc_keydown=this.doc_keydown.bind(this));this.addEvent(document,"keyup",this.eventHandlers.doc_keyup=this.doc_keyup.bind(this));this.addEvent(document,"mousedown",this.eventHandlers.doc_mousedown=this.doc_mousedown.bind(this));if(_4&&_4.pic_container){this.pic_container=this.assignOID(_4.pic_container);this.disableSelection(this.pic_container);}
if(!this.config.baseroot){var _6=document.getElementsByTagName("base");var _7="";for(i=0;i<_6.length;i++){if(_6[i].href){_7=_6[i].href;if(_7.charAt(_7.length-1)!="/"){_7+="/";}
break;}}
var _8=document.getElementsByTagName("script");for(i=0;i<_8.length;i++){if(_8[i].src&&_8[i].src.match(/imgmap\w*\.js(\?.*?)?$/)){var _9=_8[i].src;_9=_9.substring(0,_9.lastIndexOf("/")+1);if(_7&&_9.indexOf("://")==-1){this.config.baseroot=_7+_9;}else{this.config.baseroot=_9;}
break;}}}
if(this.isMSIE&&typeof window.CanvasRenderingContext2D=="undefined"&&typeof G_vmlCanvasManager=="undefined"){this.loadScript(this.config.baseroot+"excanvas.js");}
if(!this.config.lang){this.config.lang=this.detectLanguage();}
if(typeof imgmapStrings=="undefined"){this.loadScript(this.config.baseroot+"lang_"+this.config.lang+".js");}
var _a,j,le;for(i in this.config.custom_callbacks){if(this.config.custom_callbacks.hasOwnProperty(i)){_a=false;for(j=0,le=this.event_types.length;j<le;j++){if(i==this.event_types[j]){_a=true;break;}}
if(!_a){this.log("Unknown custom callback: "+i,1);}}}
this.addEvent(window,"load",this.onLoad.bind(this));return true;};imgmap.prototype.retryDelayed=function(fn,_e,_f){if(typeof fn.tries=="undefined"){fn.tries=0;}
if(fn.tries++<_f){window.setTimeout(function(){fn.apply(this);},_e);}};imgmap.prototype.onLoad=function(e){if(this.isLoaded){return true;}
var _11=this;if(typeof imgmapStrings=="undefined"){if(this.cntReloads++<5){window.setTimeout(function(){_11.onLoad(e);},1200);this.log("Delaying onload (language "+this.config.lang+" not loaded, try: "+this.cntReloads+")");return false;}else{if(this.config.lang!=this.config.defaultLang&&this.config.defaultLang!="en"){this.log("Falling back to default language: "+this.config.defaultLang);this.cntReloads=0;this.config.lang=this.config.defaultLang;this.loadScript(this.config.baseroot+"lang_"+this.config.lang+".js");window.setTimeout(function(){_11.onLoad(e);},1200);return false;}else{if(this.config.lang!="en"){this.log("Falling back to english language");this.cntReloads=0;this.config.lang="en";this.loadScript(this.config.baseroot+"lang_"+this.config.lang+".js");window.setTimeout(function(){_11.onLoad(e);},1200);return false;}}}}
try{this.loadStrings(imgmapStrings);}
catch(err){this.log("Unable to load language strings",1);}
if(this.isMSIE){if(typeof window.CanvasRenderingContext2D=="undefined"&&typeof G_vmlCanvasManager=="undefined"){this.log(this.strings.ERR_EXCANVAS_LOAD,2);}}
if(this.config.mode=="highlighter"){imgmap_spawnObjects(this.config);}
this.isLoaded=true;return true;};imgmap.prototype.addEvent=function(obj,evt,_14){if(obj.attachEvent){return obj.attachEvent("on"+evt,_14);}else{if(obj.addEventListener){obj.addEventListener(evt,_14,false);return true;}else{obj["on"+evt]=_14;}}};imgmap.prototype.removeEvent=function(obj,evt,_17){if(obj.detachEvent){return obj.detachEvent("on"+evt,_17);}else{if(obj.removeEventListener){obj.removeEventListener(evt,_17,false);return true;}else{obj["on"+evt]=null;}}};imgmap.prototype.addLoadEvent=function(obj,_19){if(obj.attachEvent){return obj.attachEvent("onreadystatechange",_19);}else{if(obj.addEventListener){obj.addEventListener("load",_19,false);return true;}else{obj.onload=_19;}}};imgmap.prototype.loadScript=function(url){if(url===""){return false;}
if(this.loadedScripts[url]==1){return true;}
this.log("Loading script: "+url);try{var _1b=document.getElementsByTagName("head")[0];var _1c=document.createElement("SCRIPT");_1c.setAttribute("language","javascript");_1c.setAttribute("type","text/javascript");_1c.setAttribute("src",url);_1b.appendChild(_1c);this.addLoadEvent(_1c,this.script_load.bind(this));}
catch(err){this.log("Error loading script: "+url);}
return true;};imgmap.prototype.script_load=function(e){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;var url=obj.src;var _20=false;if(typeof obj.readyState!="undefined"){if(obj.readyState=="complete"){_20=true;}}else{_20=true;}
if(_20){this.loadedScripts[url]=1;this.log("Loaded script: "+url);return true;}};imgmap.prototype.loadStrings=function(obj){for(var key in obj){if(obj.hasOwnProperty(key)){this.strings[key]=obj[key];}}};imgmap.prototype.loadImage=function(img,_24,_25){if(typeof this.pic_container=="undefined"){this.log("You must have pic_container defined to use loadImage!",2);return false;}
this.removeAllAreas();this.globalscale=1;this.fireEvent("onHtmlChanged","");if(!this._getLastArea()){if(this.config.mode!="editor2"){this.addNewArea();}}
if(typeof img=="string"){if(typeof this.pic=="undefined"){this.pic=document.createElement("IMG");this.pic_container.appendChild(this.pic);this.addEvent(this.pic,"mousedown",this.eventHandlers.img_mousedown=this.img_mousedown.bind(this));this.addEvent(this.pic,"mouseup",this.eventHandlers.img_mouseup=this.img_mouseup.bind(this));this.addEvent(this.pic,"mousemove",this.eventHandlers.img_mousemove=this.img_mousemove.bind(this));this.pic.style.cursor=this.config.cursor_default;}
this.log("Loading image: "+img,0);var q="?";if(img.indexOf("?")>-1){q="&";}
this.pic.src=img+q+(new Date().getTime());if(_24&&_24>0){this.pic.setAttribute("width",_24);}
if(_25&&_25>0){this.pic.setAttribute("height",_25);}
this.fireEvent("onLoadImage",this.pic);return true;}else{if(typeof img=="object"){var src=img.src;if(src===""&&img.getAttribute("mce_src")!==""){src=img.getAttribute("mce_src");}else{if(src===""&&img.getAttribute("_fcksavedurl")!==""){src=img.getAttribute("_fcksavedurl");}}
if(!_24){_24=img.clientWidth;}
if(!_25){_25=img.clientHeight;}
return this.loadImage(src,_24,_25);}}};imgmap.prototype.useImage=function(img){this.removeAllAreas();if(!this._getLastArea()){if(this.config.mode!="editor2"){this.addNewArea();}}
img=this.assignOID(img);if(typeof img=="object"){if(typeof this.pic!="undefined"){this.removeEvent(this.pic,"mousedown",this.eventHandlers.img_mousedown);this.removeEvent(this.pic,"mouseup",this.eventHandlers.img_mouseup);this.removeEvent(this.pic,"mousemove",this.eventHandlers.img_mousemove);this.pic.style.cursor="";}
this.pic=img;this.addEvent(this.pic,"mousedown",this.eventHandlers.img_mousedown=this.img_mousedown.bind(this));this.addEvent(this.pic,"mouseup",this.eventHandlers.img_mouseup=this.img_mouseup.bind(this));this.addEvent(this.pic,"mousemove",this.eventHandlers.img_mousemove=this.img_mousemove.bind(this));this.pic.style.cursor=this.config.cursor_default;if(this.pic.parentNode.className=="pic_container"){this.pic_container=this.pic.parentNode;}else{this.pic_container=document.createElement("div");this.pic_container.className="pic_container";this.pic.parentNode.insertBefore(this.pic_container,this.pic);this.pic_container.appendChild(this.pic);}
this.fireEvent("onLoadImage",this.pic);return true;}};imgmap.prototype.statusMessage=function(str){this.fireEvent("onStatusMessage",str);};imgmap.prototype.log=function(obj,_2b){if(_2b===""||typeof _2b=="undefined"){_2b=0;}
if(this.config.loglevel!=-1&&_2b>=this.config.loglevel){this.logStore.push({level:_2b,obj:obj});}
if(typeof console=="object"){console.log(obj);}else{if(this.isOpera){opera.postError(_2b+": "+obj);}else{if(typeof air=="object"){if(typeof air.Introspector=="object"){air.Introspector.Console.log(obj);}else{air.trace(obj);}}else{if(_2b>1){var msg="";for(var i=0,le=this.logStore.length;i<le;i++){msg+=this.logStore[i].level+": "+this.logStore[i].obj+"\n";}
alert(msg);}else{window.defaultStatus=(_2b+": "+obj);}}}}};imgmap.prototype.getMapHTML=function(_2f){var _30="<map id=\""+this.getMapId()+"\" name=\""+this.getMapName()+"\">"+this.getMapInnerHTML(_2f)+this.waterMark+"</map>";this.fireEvent("onGetMap",_30);return _30;};imgmap.prototype.getMapInnerHTML=function(_31){var _32,_33;_32="";for(var i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){if(this.areas[i].shape&&this.areas[i].shape!="undefined"){_33=this.areas[i].lastInput;if(_31&&_31.match(/noscale/)){var cs=_33.split(",");for(var j=0,le2=cs.length;j<le2;j++){cs[j]=Math.round(cs[j]*this.globalscale);}
_33=cs.join(",");}
_32+="<area shape=\""+this.areas[i].shape+"\""+" alt=\""+this.areas[i].aalt+"\""+" title=\""+this.areas[i].atitle+"\""+" coords=\""+_33+"\""+" href=\""+this.areas[i].ahref+"\""+" target=\""+this.areas[i].atarget+"\" />";}}}
return _32;};imgmap.prototype.getMapName=function(){if(this.mapname===""){if(this.mapid!==""){return this.mapid;}
var now=new Date();this.mapname="imgmap"+now.getFullYear()+(now.getMonth()+1)+now.getDate()+now.getHours()+now.getMinutes()+now.getSeconds();}
return this.mapname;};imgmap.prototype.getMapId=function(){if(this.mapid===""){this.mapid=this.getMapName();}
return this.mapid;};imgmap.prototype._normShape=function(_3a){if(!_3a){return"rect";}
_3a=this.trim(_3a).toLowerCase();if(_3a.substring(0,4)=="rect"){return"rect";}
if(_3a.substring(0,4)=="circ"){return"circle";}
if(_3a.substring(0,4)=="poly"){return"poly";}
return"rect";};imgmap.prototype._normCoords=function(_3b,_3c,_3d){var i;var sx;var sy;var gx;var gy;var _43,le;_3b=this.trim(_3b);if(_3b===""){return"";}
var _45=_3b;_3b=_3b.replace(/(\d)(\D)+(\d)/g,"$1,$3");_3b=_3b.replace(/,\D+(\d)/g,",$1");_3b=_3b.replace(/,0+(\d)/g,",$1");_3b=_3b.replace(/(\d)(\D)+,/g,"$1,");_3b=_3b.replace(/^\D+(\d)/g,"$1");_3b=_3b.replace(/^0+(\d)/g,"$1");_3b=_3b.replace(/(\d)(\D)+$/g,"$1");var _46=_3b.split(",");if(_3c=="rect"){if(_3d=="fromcircle"){var r=_46[2];_46[0]=_46[0]-r;_46[1]=_46[1]-r;_46[2]=parseInt(_46[0],10)+2*r;_46[3]=parseInt(_46[1],10)+2*r;}else{if(_3d=="frompoly"){sx=parseInt(_46[0],10);gx=parseInt(_46[0],10);sy=parseInt(_46[1],10);gy=parseInt(_46[1],10);for(i=0,le=_46.length;i<le;i++){if(i%2===0&&parseInt(_46[i],10)<sx){sx=parseInt(_46[i],10);}
if(i%2===1&&parseInt(_46[i],10)<sy){sy=parseInt(_46[i],10);}
if(i%2===0&&parseInt(_46[i],10)>gx){gx=parseInt(_46[i],10);}
if(i%2===1&&parseInt(_46[i],10)>gy){gy=parseInt(_46[i],10);}}
_46[0]=sx;_46[1]=sy;_46[2]=gx;_46[3]=gy;}}
if(!(parseInt(_46[1],10)>=0)){_46[1]=_46[0];}
if(!(parseInt(_46[2],10)>=0)){_46[2]=parseInt(_46[0],10)+10;}
if(!(parseInt(_46[3],10)>=0)){_46[3]=parseInt(_46[1],10)+10;}
if(parseInt(_46[0],10)>parseInt(_46[2],10)){_43=_46[0];_46[0]=_46[2];_46[2]=_43;}
if(parseInt(_46[1],10)>parseInt(_46[3],10)){_43=_46[1];_46[1]=_46[3];_46[3]=_43;}
_3b=_46[0]+","+_46[1]+","+_46[2]+","+_46[3];}else{if(_3c=="circle"){if(_3d=="fromrect"){sx=parseInt(_46[0],10);gx=parseInt(_46[2],10);sy=parseInt(_46[1],10);gy=parseInt(_46[3],10);_46[2]=(gx-sx<gy-sy)?gx-sx:gy-sy;_46[2]=Math.floor(_46[2]/2);_46[0]=sx+_46[2];_46[1]=sy+_46[2];}else{if(_3d=="frompoly"){sx=parseInt(_46[0],10);gx=parseInt(_46[0],10);sy=parseInt(_46[1],10);gy=parseInt(_46[1],10);for(i=0,le=_46.length;i<le;i++){if(i%2===0&&parseInt(_46[i],10)<sx){sx=parseInt(_46[i],10);}
if(i%2===1&&parseInt(_46[i],10)<sy){sy=parseInt(_46[i],10);}
if(i%2===0&&parseInt(_46[i],10)>gx){gx=parseInt(_46[i],10);}
if(i%2===1&&parseInt(_46[i],10)>gy){gy=parseInt(_46[i],10);}}
_46[2]=(gx-sx<gy-sy)?gx-sx:gy-sy;_46[2]=Math.floor(_46[2]/2);_46[0]=sx+_46[2];_46[1]=sy+_46[2];}}
if(!(parseInt(_46[1],10)>0)){_46[1]=_46[0];}
if(!(parseInt(_46[2],10)>0)){_46[2]=10;}
_3b=_46[0]+","+_46[1]+","+_46[2];}else{if(_3c=="poly"){if(_3d=="fromrect"){_46[4]=_46[2];_46[5]=_46[3];_46[2]=_46[0];_46[6]=_46[4];_46[7]=_46[1];}else{if(_3d=="fromcircle"){var _48=parseInt(_46[0],10);var _49=parseInt(_46[1],10);var _4a=parseInt(_46[2],10);var j=0;_46[j++]=_48+_4a;_46[j++]=_49;var _4c=60;for(i=0;i<=_4c;i++){var _4d=i/_4c;var _4e=Math.cos(_4d*2*Math.PI);var _4f=Math.sin(_4d*2*Math.PI);var _50=_48+_4e*_4a;var _51=_49+_4f*_4a;_46[j++]=Math.round(_50);_46[j++]=Math.round(_51);}}}
_3b=_46.join(",");}else{if(_3c=="bezier1"){_3b=_46.join(",");}}}}
if(_3d=="preserve"&&_45!=_3b){return _45;}
return _3b;};imgmap.prototype.setMapHTML=function(map){if(this.viewmode===1){return;}
this.fireEvent("onSetMap",map);this.removeAllAreas();var _53;if(typeof map=="string"){var _54=document.createElement("DIV");_54.innerHTML=map;_53=_54.firstChild;}else{if(typeof map=="object"){_53=map;}}
if(!_53||_53.nodeName.toLowerCase()!=="map"){return false;}
this.mapname=_53.name;this.mapid=_53.id;var _55=_53.getElementsByTagName("area");var _56,_57,_58,alt,_5a,_5b,id;for(var i=0,le=_55.length;i<le;i++){_56=_57=_58=alt=_5a=_5b="";id=this.addNewArea();_56=this._normShape(_55[i].getAttribute("shape",2));this.initArea(id,_56);if(_55[i].getAttribute("coords",2)){_57=this._normCoords(_55[i].getAttribute("coords",2),_56);this.areas[id].lastInput=_57;}
_58=_55[i].getAttribute("href",2);var _5f=_55[i].getAttribute("_fcksavedurl");if(_5f){_58=_5f;}
if(_58){this.areas[id].ahref=_58;}
alt=_55[i].getAttribute("alt");if(alt){this.areas[id].aalt=alt;}
_5a=_55[i].getAttribute("title");if(!_5a){_5a=alt;}
if(_5a){this.areas[id].atitle=_5a;}
_5b=_55[i].getAttribute("target");if(_5b){_5b=_5b.toLowerCase();}
this.areas[id].atarget=_5b;this._recalculate(id,_57);this.relaxArea(id);this.fireEvent("onAreaChanged",this.areas[id]);}
this.fireEvent("onHtmlChanged",this.getMapHTML());return true;};imgmap.prototype.togglePreview=function(){var i,le;if(!this.pic){return false;}
if(!this.preview){this.preview=document.createElement("DIV");this.preview.style.display="none";this.pic_container.appendChild(this.preview);}
if(this.viewmode===0){for(i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){this.areas[i].style.display="none";if(this.areas[i].label){this.areas[i].label.style.display="none";}}}
this.preview.innerHTML=this.getMapHTML("noscale");this.pic.setAttribute("border","0",0);this.pic.setAttribute("usemap","#"+this.mapname,0);this.pic.style.cursor="auto";this.viewmode=1;this.statusMessage(this.strings.PREVIEW_MODE);}else{for(i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){this.areas[i].style.display="";if(this.areas[i].label&&this.config.label){this.areas[i].label.style.display="";}}}
this.preview.innerHTML="";this.pic.style.cursor=this.config.cursor_default;this.pic.removeAttribute("usemap",0);this.viewmode=0;this.statusMessage(this.strings.DESIGN_MODE);this.is_drawing=0;}
this.fireEvent("onModeChanged",this.viewmode);return this.viewmode;};imgmap.prototype.addNewArea=function(){if(this.viewmode===1){return;}
var _62=this._getLastArea();var id=(_62)?_62.aid+1:0;this.areas[id]=document.createElement("DIV");this.areas[id].id=this.mapname+"area"+id;this.areas[id].aid=id;this.areas[id].shape="undefined";this.currentid=id;this.fireEvent("onAddArea",id);return id;};imgmap.prototype.initArea=function(id,_65){if(!this.areas[id]){return false;}
if(this.areas[id].parentNode){this.areas[id].parentNode.removeChild(this.areas[id]);}
if(this.areas[id].label){this.areas[id].label.parentNode.removeChild(this.areas[id].label);}
this.areas[id]=null;this.areas[id]=document.createElement("CANVAS");this.pic_container.appendChild(this.areas[id]);this.pic_container.style.position="relative";if(typeof G_vmlCanvasManager!="undefined"){this.areas[id]=G_vmlCanvasManager.initElement(this.areas[id]);}
this.areas[id].id=this.mapname+"area"+id;this.areas[id].aid=id;this.areas[id].shape=_65;this.areas[id].ahref="";this.areas[id].atitle="";this.areas[id].aalt="";this.areas[id].atarget="";this.areas[id].style.position="absolute";this.areas[id].style.top=this.pic.offsetTop+"px";this.areas[id].style.left=this.pic.offsetLeft+"px";this._setopacity(this.areas[id],this.config.CL_DRAW_BG,this.config.draw_opacity);this.areas[id].ondblclick=this.area_dblclick.bind(this);this.areas[id].onmousedown=this.area_mousedown.bind(this);this.areas[id].onmouseup=this.area_mouseup.bind(this);this.areas[id].onmousemove=this.area_mousemove.bind(this);this.areas[id].onmouseover=this.area_mouseover.bind(this);this.areas[id].onmouseout=this.area_mouseout.bind(this);this.memory[id]={};this.memory[id].downx=0;this.memory[id].downy=0;this.memory[id].left=0;this.memory[id].top=0;this.memory[id].width=0;this.memory[id].height=0;this.memory[id].xpoints=[];this.memory[id].ypoints=[];this.areas[id].label=document.createElement("DIV");this.pic_container.appendChild(this.areas[id].label);this.areas[id].label.className=this.config.label_class;this.assignCSS(this.areas[id].label,this.config.label_style);this.areas[id].label.style.position="absolute";};imgmap.prototype.relaxArea=function(id){if(!this.areas[id]){return;}
this.fireEvent("onRelaxArea",id);this._setBorder(id,"NORM");this._setopacity(this.areas[id],this.config.CL_NORM_BG,this.config.norm_opacity);};imgmap.prototype.relaxAllAreas=function(){for(var i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){this.relaxArea(i);}}};imgmap.prototype._setBorder=function(id,_6a){if(this.areas[id].shape=="rect"||this.config.bounding_box){this.areas[id].style.borderWidth="1px";this.areas[id].style.borderStyle=(_6a=="DRAW"?"dotted":"solid");this.areas[id].style.borderColor=this.config["CL_"+_6a+"_"+(this.areas[id].shape=="rect"?"SHAPE":"BOX")];}else{this.areas[id].style.border="";}};imgmap.prototype._setopacity=function(_6b,_6c,pct){if(_6c){_6b.style.backgroundColor=_6c;}
if(pct&&typeof pct=="string"&&pct.match(/^\d*\-\d+$/)){var _6e=pct.split("-");if(typeof _6e[0]!="undefined"){_6e[0]=parseInt(_6e[0],10);this._setopacity(_6b,_6c,_6e[0]);}
if(typeof _6e[1]!="undefined"){_6e[1]=parseInt(_6e[1],10);var _6f=this._getopacity(_6b);var _70=this;var _71=Math.round(_6e[1]-_6f);if(_71>5){window.setTimeout(function(){_70._setopacity(_6b,null,"-"+_6e[1]);},20);pct=1*_6f+5;}else{if(_71<-3){window.setTimeout(function(){_70._setopacity(_6b,null,"-"+_6e[1]);},20);pct=1*_6f-3;}else{pct=_6e[1];}}}}
if(!isNaN(pct)){pct=Math.round(parseInt(pct,10));_6b.style.opacity=pct/100;_6b.style.filter="alpha(opacity="+pct+")";}};imgmap.prototype._getopacity=function(_72){if(_72.style.opacity<=1){return _72.style.opacity*100;}
if(_72.style.filter){return parseInt(_72.style.filter.replace(/alpha\(opacity\=([^\)]*)\)/ig,"$1"),10);}
return 100;};imgmap.prototype.removeArea=function(id,_74){if(this.viewmode===1){return;}
if(id===null||typeof id=="undefined"){return;}
try{this.areas[id].label.parentNode.removeChild(this.areas[id].label);this.areas[id].parentNode.removeChild(this.areas[id]);this.areas[id].label.className=null;this.areas[id].label=null;this.areas[id].onmouseover=null;this.areas[id].onmouseout=null;this.areas[id].onmouseup=null;this.areas[id].onmousedown=null;this.areas[id].onmousemove=null;}
catch(err){}
this.areas[id]=null;this.fireEvent("onRemoveArea",id);if(!_74){this.fireEvent("onHtmlChanged",this.getMapHTML());}};imgmap.prototype.removeAllAreas=function(){for(var i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){this.removeArea(i,true);}}
this.fireEvent("onHtmlChanged",this.getMapHTML());};imgmap.prototype.scaleAllAreas=function(_77){var _78=1;try{_78=_77/this.globalscale;}
catch(err){this.log("Invalid (global)scale",1);}
this.globalscale=_77;for(var i=0,le=this.areas.length;i<le;i++){if(this.areas[i]&&this.areas[i].shape!="undefined"){this.scaleArea(i,_78);}}};imgmap.prototype.scaleArea=function(id,_7c){this.areas[id].style.top=parseInt(this.areas[id].style.top,10)*_7c+"px";this.areas[id].style.left=parseInt(this.areas[id].style.left,10)*_7c+"px";this.setAreaSize(id,this.areas[id].width*_7c,this.areas[id].height*_7c);if(this.areas[id].shape=="poly"||this.areas[id].shape=="bezier1"){for(var i=0,le=this.areas[id].xpoints.length;i<le;i++){this.areas[id].xpoints[i]*=_7c;this.areas[id].ypoints[i]*=_7c;}}
this._repaint(this.areas[id],this.config.CL_NORM_SHAPE);this._updatecoords(id);};imgmap.prototype._putlabel=function(id){if(this.viewmode===1){return;}
if(!this.areas[id].label){return;}
try{if(!this.config.label){this.areas[id].label.innerHTML="";this.areas[id].label.style.display="none";}else{this.areas[id].label.style.display="";var _80=this.config.label;_80=_80.replace(/%n/g,String(id));_80=_80.replace(/%c/g,String(this.areas[id].lastInput));_80=_80.replace(/%h/g,String(this.areas[id].ahref));_80=_80.replace(/%a/g,String(this.areas[id].aalt));_80=_80.replace(/%t/g,String(this.areas[id].atitle));this.areas[id].label.innerHTML=_80;}
this.areas[id].label.style.top=this.areas[id].style.top;this.areas[id].label.style.left=this.areas[id].style.left;}
catch(err){this.log("Error putting label",1);}};imgmap.prototype._puthint=function(id){try{if(!this.config.hint){this.areas[id].title="";this.areas[id].alt="";}else{var _82=this.config.hint;_82=_82.replace(/%n/g,String(id));_82=_82.replace(/%c/g,String(this.areas[id].lastInput));_82=_82.replace(/%h/g,String(this.areas[id].ahref));_82=_82.replace(/%a/g,String(this.areas[id].aalt));_82=_82.replace(/%t/g,String(this.areas[id].atitle));this.areas[id].title=_82;this.areas[id].alt=_82;}}
catch(err){this.log("Error putting hint",1);}};imgmap.prototype._repaintAll=function(){for(var i=0,le=this.areas.length;i<le;i++){if(this.areas[i]){this._repaint(this.areas[i],this.config.CL_NORM_SHAPE);}}};imgmap.prototype._repaint=function(_85,_86,x,y){var ctx;var _8a,_8b,_8c,top;var i,le;if(_85.shape=="circle"){_8a=parseInt(_85.style.width,10);var _90=Math.floor(_8a/2)-1;ctx=_85.getContext("2d");ctx.clearRect(0,0,_8a,_8a);ctx.beginPath();ctx.strokeStyle=_86;ctx.arc(_90,_90,_90,0,Math.PI*2,0);ctx.stroke();ctx.closePath();ctx.strokeStyle=this.config.CL_KNOB;ctx.strokeRect(_90,_90,1,1);this._putlabel(_85.aid);this._puthint(_85.aid);}else{if(_85.shape=="rect"){this._putlabel(_85.aid);this._puthint(_85.aid);}else{if(_85.shape=="poly"){_8a=parseInt(_85.style.width,10);_8b=parseInt(_85.style.height,10);_8c=parseInt(_85.style.left,10);top=parseInt(_85.style.top,10);if(_85.xpoints){ctx=_85.getContext("2d");ctx.clearRect(0,0,_8a,_8b);ctx.beginPath();ctx.strokeStyle=_86;ctx.moveTo(_85.xpoints[0]-_8c,_85.ypoints[0]-top);for(i=1,le=_85.xpoints.length;i<le;i++){ctx.lineTo(_85.xpoints[i]-_8c,_85.ypoints[i]-top);}
if(this.is_drawing==this.DM_POLYGON_DRAW||this.is_drawing==this.DM_POLYGON_LASTDRAW){ctx.lineTo(x-_8c-5,y-top-5);}
ctx.lineTo(_85.xpoints[0]-_8c,_85.ypoints[0]-top);ctx.stroke();ctx.closePath();}
this._putlabel(_85.aid);this._puthint(_85.aid);}else{if(_85.shape=="bezier1"){_8a=parseInt(_85.style.width,10);_8b=parseInt(_85.style.height,10);_8c=parseInt(_85.style.left,10);top=parseInt(_85.style.top,10);if(_85.xpoints){ctx=_85.getContext("2d");ctx.clearRect(0,0,_8a,_8b);ctx.beginPath();ctx.strokeStyle=_86;ctx.moveTo(_85.xpoints[0]-_8c,_85.ypoints[0]-top);for(i=2,le=_85.xpoints.length;i<le;i+=2){ctx.quadraticCurveTo(_85.xpoints[i-1]-_8c,_85.ypoints[i-1]-top,_85.xpoints[i]-_8c,_85.ypoints[i]-top);}
if(this.is_drawing==this.DM_BEZIER_DRAW||this.is_drawing==this.DM_BEZIER_LASTDRAW){if(_85.xpoints.length%2===0&&_85.xpoints.length>1){ctx.quadraticCurveTo(_85.xpoints[_85.xpoints.length-1]-_8c-5,_85.ypoints[_85.ypoints.length-1]-top-5,x-_8c-5,y-top-5);}else{ctx.lineTo(x-_8c-5,y-top-5);}}
ctx.lineTo(_85.xpoints[0]-_8c,_85.ypoints[0]-top);ctx.stroke();ctx.closePath();}
this._putlabel(_85.aid);this._puthint(_85.aid);}}}}};imgmap.prototype._updatecoords=function(id){var _92=Math.round(parseInt(this.areas[id].style.left,10)/this.globalscale);var top=Math.round(parseInt(this.areas[id].style.top,10)/this.globalscale);var _94=Math.round(parseInt(this.areas[id].style.height,10)/this.globalscale);var _95=Math.round(parseInt(this.areas[id].style.width,10)/this.globalscale);var _96="";if(this.areas[id].shape=="rect"){_96=_92+","+top+","+(_92+_95)+","+(top+_94);this.areas[id].lastInput=_96;}else{if(this.areas[id].shape=="circle"){var _97=Math.floor(_95/2)-1;_96=(_92+_97)+","+(top+_97)+","+_97;this.areas[id].lastInput=_96;}else{if(this.areas[id].shape=="poly"||this.areas[id].shape=="bezier1"){if(this.areas[id].xpoints){for(var i=0,le=this.areas[id].xpoints.length;i<le;i++){_96+=Math.round(this.areas[id].xpoints[i]/this.globalscale)+","+Math.round(this.areas[id].ypoints[i]/this.globalscale)+",";}
_96=_96.substring(0,_96.length-1);}
this.areas[id].lastInput=_96;}}}
this.fireEvent("onAreaChanged",this.areas[id]);this.fireEvent("onHtmlChanged",this.getMapHTML());};imgmap.prototype._recalculate=function(id,_9b){try{if(_9b){_9b=this._normCoords(_9b,this.areas[id].shape,"preserve");}else{_9b=this.areas[id].lastInput||"";}
var _9c=_9b.split(",");if(this.areas[id].shape=="rect"){if(_9c.length!=4||parseInt(_9c[0],10)>parseInt(_9c[2],10)||parseInt(_9c[1],10)>parseInt(_9c[3],10)){throw"invalid coords";}
this.areas[id].style.left=this.globalscale*(this.pic.offsetLeft+parseInt(_9c[0],10))+"px";this.areas[id].style.top=this.globalscale*(this.pic.offsetTop+parseInt(_9c[1],10))+"px";this.setAreaSize(id,this.globalscale*(_9c[2]-_9c[0]),this.globalscale*(_9c[3]-_9c[1]));this._repaint(this.areas[id],this.config.CL_NORM_SHAPE);}else{if(this.areas[id].shape=="circle"){if(_9c.length!=3||parseInt(_9c[2],10)<0){throw"invalid coords";}
var _9d=2*(_9c[2]);this.setAreaSize(id,this.globalscale*_9d,this.globalscale*_9d);this.areas[id].style.left=this.globalscale*(this.pic.offsetLeft+parseInt(_9c[0],10)-_9d/2)+"px";this.areas[id].style.top=this.globalscale*(this.pic.offsetTop+parseInt(_9c[1],10)-_9d/2)+"px";this._repaint(this.areas[id],this.config.CL_NORM_SHAPE);}else{if(this.areas[id].shape=="poly"||this.areas[id].shape=="bezier1"){if(_9c.length<2){throw"invalid coords";}
this.areas[id].xpoints=[];this.areas[id].ypoints=[];for(var i=0,le=_9c.length;i<le;i+=2){this.areas[id].xpoints[this.areas[id].xpoints.length]=this.globalscale*(this.pic.offsetLeft+parseInt(_9c[i],10));this.areas[id].ypoints[this.areas[id].ypoints.length]=this.globalscale*(this.pic.offsetTop+parseInt(_9c[i+1],10));this._polygongrow(this.areas[id],this.globalscale*_9c[i],this.globalscale*_9c[i+1]);}
this._polygonshrink(this.areas[id]);}}}}
catch(err){var msg=(err.message)?err.message:"error calculating coordinates";this.log(msg,1);this.statusMessage(this.strings.ERR_INVALID_COORDS);if(this.areas[id].lastInput){this.fireEvent("onAreaChanged",this.areas[id]);}
this._repaint(this.areas[id],this.config.CL_NORM_SHAPE);return;}
this.areas[id].lastInput=_9b;};imgmap.prototype._polygongrow=function(_a1,_a2,_a3){var _a4=_a2-parseInt(_a1.style.left,10);var _a5=_a3-parseInt(_a1.style.top,10);var pad=0;var _a7=0;if(_a2<parseInt(_a1.style.left,10)){_a1.style.left=(_a2-pad)+"px";this.setAreaSize(_a1.aid,parseInt(_a1.style.width,10)+Math.abs(_a4)+_a7,null);}else{if(_a2>parseInt(_a1.style.left,10)+parseInt(_a1.style.width,10)){this.setAreaSize(_a1.aid,_a2-parseInt(_a1.style.left,10)+_a7,null);}}
if(_a3<parseInt(_a1.style.top,10)){_a1.style.top=(_a3-pad)+"px";this.setAreaSize(_a1.aid,null,parseInt(_a1.style.height,10)+Math.abs(_a5)+_a7);}else{if(_a3>parseInt(_a1.style.top,10)+parseInt(_a1.style.height,10)){this.setAreaSize(_a1.aid,null,_a3-parseInt(_a1.style.top,10)+_a7);}}};imgmap.prototype._polygonshrink=function(_a8){_a8.style.left=(_a8.xpoints[0])+"px";_a8.style.top=(_a8.ypoints[0])+"px";this.setAreaSize(_a8.aid,0,0);for(var i=0,le=_a8.xpoints.length;i<le;i++){this._polygongrow(_a8,_a8.xpoints[i],_a8.ypoints[i]);}
this._repaint(_a8,this.config.CL_NORM_SHAPE);};imgmap.prototype.img_mousemove=function(e){var x;var y;var _ae;var _af;var _b0;if(this.viewmode===1){return;}
var pos=this._getPos(this.pic);x=(this.isMSIE)?(window.event.x-this.pic.offsetLeft):(e.pageX-pos.x);y=(this.isMSIE)?(window.event.y-this.pic.offsetTop):(e.pageY-pos.y);x=x+this.pic_container.scrollLeft;y=y+this.pic_container.scrollTop;if(x<0||y<0||x>this.pic.width||y>this.pic.height){return;}
if(this.memory[this.currentid]){var top=this.memory[this.currentid].top;var _b3=this.memory[this.currentid].left;var _b4=this.memory[this.currentid].height;var _b5=this.memory[this.currentid].width;}
if(this.isSafari){if(e.shiftKey){if(this.is_drawing==this.DM_RECTANGLE_DRAW){this.is_drawing=this.DM_SQUARE_DRAW;this.statusMessage(this.strings.SQUARE2_DRAW);}}else{if(this.is_drawing==this.DM_SQUARE_DRAW&&this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_DRAW;this.statusMessage(this.strings.RECTANGLE_DRAW);}}}
if(this.is_drawing==this.DM_RECTANGLE_DRAW){this.fireEvent("onDrawArea",this.currentid);_ae=x-this.memory[this.currentid].downx;_af=y-this.memory[this.currentid].downy;this.setAreaSize(this.currentid,Math.abs(_ae),Math.abs(_af));if(_ae<0){this.areas[this.currentid].style.left=(x+1)+"px";}
if(_af<0){this.areas[this.currentid].style.top=(y+1)+"px";}}else{if(this.is_drawing==this.DM_SQUARE_DRAW){this.fireEvent("onDrawArea",this.currentid);_ae=x-this.memory[this.currentid].downx;_af=y-this.memory[this.currentid].downy;if(Math.abs(_ae)<Math.abs(_af)){_b0=Math.abs(parseInt(_ae,10));}else{_b0=Math.abs(parseInt(_af,10));}
this.setAreaSize(this.currentid,_b0,_b0);if(_ae<0){this.areas[this.currentid].style.left=(this.memory[this.currentid].downx+_b0*-1)+"px";}
if(_af<0){this.areas[this.currentid].style.top=(this.memory[this.currentid].downy+_b0*-1+1)+"px";}}else{if(this.is_drawing==this.DM_POLYGON_DRAW||this.is_drawing==this.DM_BEZIER_DRAW){this.fireEvent("onDrawArea",this.currentid);this._polygongrow(this.areas[this.currentid],x,y);}else{if(this.is_drawing==this.DM_RECTANGLE_MOVE||this.is_drawing==this.DM_SQUARE_MOVE){this.fireEvent("onMoveArea",this.currentid);x=x-this.memory[this.currentid].rdownx;y=y-this.memory[this.currentid].rdowny;if(x+_b5>this.pic.width||y+_b4>this.pic.height){return;}
if(x<0||y<0){return;}
this.areas[this.currentid].style.left=x+1+"px";this.areas[this.currentid].style.top=y+1+"px";}else{if(this.is_drawing==this.DM_POLYGON_MOVE||this.is_drawing==this.DM_BEZIER_MOVE){this.fireEvent("onMoveArea",this.currentid);x=x-this.memory[this.currentid].rdownx;y=y-this.memory[this.currentid].rdowny;if(x+_b5>this.pic.width||y+_b4>this.pic.height){return;}
if(x<0||y<0){return;}
_ae=x-_b3;_af=y-top;if(this.areas[this.currentid].xpoints){for(var i=0,le=this.areas[this.currentid].xpoints.length;i<le;i++){this.areas[this.currentid].xpoints[i]=this.memory[this.currentid].xpoints[i]+_ae;this.areas[this.currentid].ypoints[i]=this.memory[this.currentid].ypoints[i]+_af;}}
this.areas[this.currentid].style.left=x+"px";this.areas[this.currentid].style.top=y+"px";}else{if(this.is_drawing==this.DM_SQUARE_RESIZE_LEFT){this.fireEvent("onResizeArea",this.currentid);_b0=x-_b3;if((_b5+(-1*_b0))>0){this.areas[this.currentid].style.left=x+1+"px";this.areas[this.currentid].style.top=(top+(_b0/2))+"px";this.setAreaSize(this.currentid,parseInt(_b5+(-1*_b0),10),parseInt(_b4+(-1*_b0),10));}else{this.memory[this.currentid].width=0;this.memory[this.currentid].height=0;this.memory[this.currentid].left=x;this.memory[this.currentid].top=y;this.is_drawing=this.DM_SQUARE_RESIZE_RIGHT;}}else{if(this.is_drawing==this.DM_SQUARE_RESIZE_RIGHT){this.fireEvent("onResizeArea",this.currentid);_b0=x-_b3-_b5;if((_b5+(_b0))-1>0){this.areas[this.currentid].style.top=(top+(-1*_b0/2))+"px";this.setAreaSize(this.currentid,(_b5+(_b0))-1,(_b4+(_b0)));}else{this.memory[this.currentid].width=0;this.memory[this.currentid].height=0;this.memory[this.currentid].left=x;this.memory[this.currentid].top=y;this.is_drawing=this.DM_SQUARE_RESIZE_LEFT;}}else{if(this.is_drawing==this.DM_SQUARE_RESIZE_TOP){this.fireEvent("onResizeArea",this.currentid);_b0=y-top;if((_b5+(-1*_b0))>0){this.areas[this.currentid].style.top=y+1+"px";this.areas[this.currentid].style.left=(_b3+(_b0/2))+"px";this.setAreaSize(this.currentid,(_b5+(-1*_b0)),(_b4+(-1*_b0)));}else{this.memory[this.currentid].width=0;this.memory[this.currentid].height=0;this.memory[this.currentid].left=x;this.memory[this.currentid].top=y;this.is_drawing=this.DM_SQUARE_RESIZE_BOTTOM;}}else{if(this.is_drawing==this.DM_SQUARE_RESIZE_BOTTOM){this.fireEvent("onResizeArea",this.currentid);_b0=y-top-_b4;if((_b5+(_b0))-1>0){this.areas[this.currentid].style.left=(_b3+(-1*_b0/2))+"px";this.setAreaSize(this.currentid,(_b5+(_b0))-1,(_b4+(_b0))-1);}else{this.memory[this.currentid].width=0;this.memory[this.currentid].height=0;this.memory[this.currentid].left=x;this.memory[this.currentid].top=y;this.is_drawing=this.DM_SQUARE_RESIZE_TOP;}}else{if(this.is_drawing==this.DM_RECTANGLE_RESIZE_LEFT){this.fireEvent("onResizeArea",this.currentid);_ae=x-_b3;if(_b5+(-1*_ae)>0){this.areas[this.currentid].style.left=x+1+"px";this.setAreaSize(this.currentid,_b5+(-1*_ae),null);}else{this.memory[this.currentid].width=0;this.memory[this.currentid].left=x;this.is_drawing=this.DM_RECTANGLE_RESIZE_RIGHT;}}else{if(this.is_drawing==this.DM_RECTANGLE_RESIZE_RIGHT){this.fireEvent("onResizeArea",this.currentid);_ae=x-_b3-_b5;if((_b5+(_ae))-1>0){this.setAreaSize(this.currentid,(_b5+(_ae))-1,null);}else{this.memory[this.currentid].width=0;this.memory[this.currentid].left=x;this.is_drawing=this.DM_RECTANGLE_RESIZE_LEFT;}}else{if(this.is_drawing==this.DM_RECTANGLE_RESIZE_TOP){this.fireEvent("onResizeArea",this.currentid);_af=y-top;if((_b4+(-1*_af))>0){this.areas[this.currentid].style.top=y+1+"px";this.setAreaSize(this.currentid,null,(_b4+(-1*_af)));}else{this.memory[this.currentid].height=0;this.memory[this.currentid].top=y;this.is_drawing=this.DM_RECTANGLE_RESIZE_BOTTOM;}}else{if(this.is_drawing==this.DM_RECTANGLE_RESIZE_BOTTOM){this.fireEvent("onResizeArea",this.currentid);_af=y-top-_b4;if((_b4+(_af))-1>0){this.setAreaSize(this.currentid,null,(_b4+(_af))-1);}else{this.memory[this.currentid].height=0;this.memory[this.currentid].top=y;this.is_drawing=this.DM_RECTANGLE_RESIZE_TOP;}}}}}}}}}}}}}}
if(this.is_drawing){this._repaint(this.areas[this.currentid],this.config.CL_DRAW_SHAPE,x,y);this._updatecoords(this.currentid);}};imgmap.prototype.img_mouseup=function(e){if(this.viewmode===1){return;}
var pos=this._getPos(this.pic);var x=(this.isMSIE)?(window.event.x-this.pic.offsetLeft):(e.pageX-pos.x);var y=(this.isMSIE)?(window.event.y-this.pic.offsetTop):(e.pageY-pos.y);x=x+this.pic_container.scrollLeft;y=y+this.pic_container.scrollTop;if(this.is_drawing!=this.DM_RECTANGLE_DRAW&&this.is_drawing!=this.DM_SQUARE_DRAW&&this.is_drawing!=this.DM_POLYGON_DRAW&&this.is_drawing!=this.DM_POLYGON_LASTDRAW&&this.is_drawing!=this.DM_BEZIER_DRAW&&this.is_drawing!=this.DM_BEZIER_LASTDRAW){this.draggedId=null;this.is_drawing=0;this.statusMessage(this.strings.READY);this.relaxArea(this.currentid);if(this.areas[this.currentid]==this._getLastArea()){return;}
this.memory[this.currentid].downx=x;this.memory[this.currentid].downy=y;}};imgmap.prototype.img_mousedown=function(e){if(this.viewmode===1){return;}
if(!this.areas[this.currentid]&&this.config.mode!="editor2"){return;}
var pos=this._getPos(this.pic);var x=(this.isMSIE)?(window.event.x-this.pic.offsetLeft):(e.pageX-pos.x);var y=(this.isMSIE)?(window.event.y-this.pic.offsetTop):(e.pageY-pos.y);x=x+this.pic_container.scrollLeft;y=y+this.pic_container.scrollTop;if(!e){e=window.event;}
if(e.shiftKey){if(this.is_drawing==this.DM_POLYGON_DRAW){this.is_drawing=this.DM_POLYGON_LASTDRAW;}else{if(this.is_drawing==this.DM_BEZIER_DRAW){this.is_drawing=this.DM_BEZIER_LASTDRAW;}}}
if(this.is_drawing==this.DM_POLYGON_DRAW||this.is_drawing==this.DM_BEZIER_DRAW){this.areas[this.currentid].xpoints[this.areas[this.currentid].xpoints.length]=x-5;this.areas[this.currentid].ypoints[this.areas[this.currentid].ypoints.length]=y-5;this.memory[this.currentid].downx=x;this.memory[this.currentid].downy=y;return;}else{if(this.is_drawing&&this.is_drawing!=this.DM_POLYGON_DRAW&&this.is_drawing!=this.DM_BEZIER_DRAW){if(this.is_drawing==this.DM_POLYGON_LASTDRAW||this.is_drawing==this.DM_BEZIER_LASTDRAW){this.areas[this.currentid].xpoints[this.areas[this.currentid].xpoints.length]=x-5;this.areas[this.currentid].ypoints[this.areas[this.currentid].ypoints.length]=y-5;this._updatecoords(this.currentid);this.is_drawing=0;this._polygonshrink(this.areas[this.currentid]);}
this.is_drawing=0;this.statusMessage(this.strings.READY);this.relaxArea(this.currentid);if(this.areas[this.currentid]==this._getLastArea()){if(this.config.mode!="editor2"){this.addNewArea();}
return;}
return;}}
if(this.config.mode=="editor2"){if(!this.nextShape){return;}
this.addNewArea();this.initArea(this.currentid,this.nextShape);}else{if(this.areas[this.currentid].shape=="undefined"||this.areas[this.currentid].shape=="poly"){var _c0=this.nextShape;if(!_c0){_c0="rect";}
this.initArea(this.currentid,_c0);}}
if(this.areas[this.currentid].shape=="poly"){this.is_drawing=this.DM_POLYGON_DRAW;this.statusMessage(this.strings.POLYGON_DRAW);this.areas[this.currentid].style.left=x+"px";this.areas[this.currentid].style.top=y+"px";this.areas[this.currentid].style.width=0;this.areas[this.currentid].style.height=0;this.areas[this.currentid].xpoints=[];this.areas[this.currentid].ypoints=[];this.areas[this.currentid].xpoints[0]=x;this.areas[this.currentid].ypoints[0]=y;}else{if(this.areas[this.currentid].shape=="bezier1"){this.is_drawing=this.DM_BEZIER_DRAW;this.statusMessage(this.strings.BEZIER_DRAW);this.areas[this.currentid].style.left=x+"px";this.areas[this.currentid].style.top=y+"px";this.areas[this.currentid].style.width=0;this.areas[this.currentid].style.height=0;this.areas[this.currentid].xpoints=[];this.areas[this.currentid].ypoints=[];this.areas[this.currentid].xpoints[0]=x;this.areas[this.currentid].ypoints[0]=y;}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_DRAW;this.statusMessage(this.strings.RECTANGLE_DRAW);this.areas[this.currentid].style.left=x+"px";this.areas[this.currentid].style.top=y+"px";this.areas[this.currentid].style.width=0;this.areas[this.currentid].style.height=0;}else{if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_DRAW;this.statusMessage(this.strings.SQUARE_DRAW);this.areas[this.currentid].style.left=x+"px";this.areas[this.currentid].style.top=y+"px";this.areas[this.currentid].style.width=0;this.areas[this.currentid].style.height=0;}}}}
this._setBorder(this.currentid,"DRAW");this.memory[this.currentid].downx=x;this.memory[this.currentid].downy=y;};imgmap.prototype.highlightArea=function(id,_c2){if(this.is_drawing){return;}
if(this.areas[id]&&this.areas[id].shape!="undefined"){this.fireEvent("onFocusArea",this.areas[id]);this._setBorder(id,"HIGHLIGHT");var _c3=this.config.highlight_opacity;if(_c2=="grad"){_c3="-"+_c3;}
this._setopacity(this.areas[id],this.config.CL_HIGHLIGHT_BG,_c3);this._repaint(this.areas[id],this.config.CL_HIGHLIGHT_SHAPE);}};imgmap.prototype.blurArea=function(id,_c5){if(this.is_drawing){return;}
if(this.areas[id]&&this.areas[id].shape!="undefined"){this.fireEvent("onBlurArea",this.areas[id]);this._setBorder(id,"NORM");var _c6=this.config.norm_opacity;if(_c5=="grad"){_c6="-"+_c6;}
this._setopacity(this.areas[id],this.config.CL_NORM_BG,_c6);this._repaint(this.areas[id],this.config.CL_NORM_SHAPE);}};imgmap.prototype.area_mousemove=function(e){if(this.viewmode===1){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
if(this.isOpera){e.layerX=e.offsetX;e.layerY=e.offsetY;}
var _c9=(this.isMSIE)?(window.event.offsetX):(e.layerX);var _ca=(this.isMSIE)?(window.event.offsetY):(e.layerY);var _cb=(obj.shape=="rect"||obj.shape=="circle");if(_cb&&_c9<6&&_ca>6){obj.style.cursor="w-resize";}else{if(_cb&&_c9>parseInt(obj.style.width,10)-6&&_ca>6){obj.style.cursor="e-resize";}else{if(_cb&&_c9>6&&_ca<6){obj.style.cursor="n-resize";}else{if(_cb&&_ca>parseInt(obj.style.height,10)-6&&_c9>6){obj.style.cursor="s-resize";}else{obj.style.cursor="move";}}}}
if(obj.aid!=this.draggedId){if(obj.style.cursor=="move"){obj.style.cursor="default";}
return;}
if(_c9<6&&_ca>6){if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_RESIZE_LEFT;this.statusMessage(this.strings.SQUARE_RESIZE_LEFT);}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_RESIZE_LEFT;this.statusMessage(this.strings.RECTANGLE_RESIZE_LEFT);}}}else{if(_c9>parseInt(this.areas[this.currentid].style.width,10)-6&&_ca>6){if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_RESIZE_RIGHT;this.statusMessage(this.strings.SQUARE_RESIZE_RIGHT);}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_RESIZE_RIGHT;this.statusMessage(this.strings.RECTANGLE_RESIZE_RIGHT);}}}else{if(_c9>6&&_ca<6){if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_RESIZE_TOP;this.statusMessage(this.strings.SQUARE_RESIZE_TOP);}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_RESIZE_TOP;this.statusMessage(this.strings.RECTANGLE_RESIZE_TOP);}}}else{if(_ca>parseInt(this.areas[this.currentid].style.height,10)-6&&_c9>6){if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_RESIZE_BOTTOM;this.statusMessage(this.strings.SQUARE_RESIZE_BOTTOM);}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_RESIZE_BOTTOM;this.statusMessage(this.strings.RECTANGLE_RESIZE_BOTTOM);}}}else{if(this.areas[this.currentid].shape=="circle"){this.is_drawing=this.DM_SQUARE_MOVE;this.statusMessage(this.strings.SQUARE_MOVE);this.memory[this.currentid].rdownx=_c9;this.memory[this.currentid].rdowny=_ca;}else{if(this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_MOVE;this.statusMessage(this.strings.RECTANGLE_MOVE);this.memory[this.currentid].rdownx=_c9;this.memory[this.currentid].rdowny=_ca;}else{if(this.areas[this.currentid].shape=="poly"||this.areas[this.currentid].shape=="bezier1"){if(this.areas[this.currentid].xpoints){for(var i=0,le=this.areas[this.currentid].xpoints.length;i<le;i++){this.memory[this.currentid].xpoints[i]=this.areas[this.currentid].xpoints[i];this.memory[this.currentid].ypoints[i]=this.areas[this.currentid].ypoints[i];}}
if(this.areas[this.currentid].shape=="poly"){this.is_drawing=this.DM_POLYGON_MOVE;this.statusMessage(this.strings.POLYGON_MOVE);}else{if(this.areas[this.currentid].shape=="bezier1"){this.is_drawing=this.DM_BEZIER_MOVE;this.statusMessage(this.strings.BEZIER_MOVE);}}
this.memory[this.currentid].rdownx=_c9;this.memory[this.currentid].rdowny=_ca;}}}}}}}
this.memory[this.currentid].width=parseInt(this.areas[this.currentid].style.width,10);this.memory[this.currentid].height=parseInt(this.areas[this.currentid].style.height,10);this.memory[this.currentid].top=parseInt(this.areas[this.currentid].style.top,10);this.memory[this.currentid].left=parseInt(this.areas[this.currentid].style.left,10);this._setBorder(this.currentid,"DRAW");this._setopacity(this.areas[this.currentid],this.config.CL_DRAW_BG,this.config.draw_opacity);}else{this.img_mousemove(e);}};imgmap.prototype.area_mouseup=function(e){if(this.viewmode===1){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
if(this.areas[this.currentid]!=obj){if(typeof obj.aid=="undefined"){this.log("Cannot identify target area",1);return;}}
this.draggedId=null;}else{this.img_mouseup(e);}};imgmap.prototype.area_mouseover=function(e){if(this.viewmode===1&&this.config.mode!=="highlighter_spawn"){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
this.highlightArea(obj.aid,"grad");}};imgmap.prototype.area_mouseout=function(e){if(this.viewmode===1&&this.config.mode!=="highlighter_spawn"){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
this.blurArea(obj.aid,"grad");}};imgmap.prototype.area_dblclick=function(e){if(this.viewmode===1){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
if(this.areas[this.currentid]!=obj){if(typeof obj.aid=="undefined"){this.log("Cannot identify target area",1);return;}
this.currentid=obj.aid;}
this.fireEvent("onDblClickArea",this.areas[this.currentid]);if(this.isMSIE){window.event.cancelBubble=true;}else{e.stopPropagation();}}};imgmap.prototype.area_mousedown=function(e){if(this.viewmode===1&&this.config.mode!=="highlighter_spawn"){return;}
if(!this.is_drawing){var obj=(this.isMSIE)?window.event.srcElement:e.currentTarget;if(obj.tagName=="DIV"){obj=obj.parentNode;}
if(obj.tagName=="image"||obj.tagName=="group"||obj.tagName=="shape"||obj.tagName=="stroke"){obj=obj.parentNode.parentNode;}
if(this.areas[this.currentid]!=obj){if(typeof obj.aid=="undefined"){this.log("Cannot identify target area",1);return;}
this.currentid=obj.aid;}
this.draggedId=this.currentid;this.selectedId=this.currentid;this.fireEvent("onSelectArea",this.areas[this.currentid]);if(this.isMSIE){window.event.cancelBubble=true;}else{e.stopPropagation();}}else{this.img_mousedown(e);}};imgmap.prototype.doc_keydown=function(e){if(this.viewmode===1){return;}
var key=(this.isMSIE)?event.keyCode:e.keyCode;if(key==46){if(this.selectedId!==null&&!this.is_drawing){this.removeArea(this.selectedId);}}else{if(key==16){if(this.is_drawing==this.DM_RECTANGLE_DRAW){this.is_drawing=this.DM_SQUARE_DRAW;this.statusMessage(this.strings.SQUARE2_DRAW);}}}};imgmap.prototype.doc_keyup=function(e){var key=(this.isMSIE)?event.keyCode:e.keyCode;if(key==16){if(this.is_drawing==this.DM_SQUARE_DRAW&&this.areas[this.currentid].shape=="rect"){this.is_drawing=this.DM_RECTANGLE_DRAW;this.statusMessage(this.strings.RECTANGLE_DRAW);}}};imgmap.prototype.doc_mousedown=function(e){if(this.viewmode===1){return;}
if(!this.is_drawing){this.selectedId=null;}};imgmap.prototype._getPos=function(_dd){var _de=0;var _df=0;if(_dd){var _e0=_dd.offsetParent;if(_e0){while((_e0=_dd.offsetParent)){if(_dd.offsetLeft>0){_de+=_dd.offsetLeft;}
if(_dd.offsetTop>0){_df+=_dd.offsetTop;}
_dd=_e0;}}else{_de=_dd.offsetLeft;_df=_dd.offsetTop;}}
return{x:_de,y:_df};};imgmap.prototype._getLastArea=function(){for(var i=this.areas.length-1;i>=0;i--){if(this.areas[i]){return this.areas[i];}}
return null;};imgmap.prototype.assignCSS=function(obj,_e3){var _e4=_e3.split(";");for(var i=0;i<_e4.length;i++){var p=_e4[i].split(":");var pp=this.trim(p[0]).split("-");var _e8=pp[0];for(var j=1;j<pp.length;j++){_e8+=pp[j].replace(/^\w/,pp[j].substring(0,1).toUpperCase());}
obj.style[this.trim(_e8)]=this.trim(p[1]);}};imgmap.prototype.fireEvent=function(evt,obj){if(typeof this.config.custom_callbacks[evt]=="function"){return this.config.custom_callbacks[evt](obj);}};imgmap.prototype.setAreaSize=function(id,w,h){if(id===null){id=this.currentid;}
if(w!==null){this.areas[id].width=w;this.areas[id].style.width=(w)+"px";this.areas[id].setAttribute("width",w);}
if(h!==null){this.areas[id].height=h;this.areas[id].style.height=(h)+"px";this.areas[id].setAttribute("height",h);}};imgmap.prototype.detectLanguage=function(){var _ef;if(navigator.userLanguage){_ef=navigator.userLanguage.toLowerCase();}else{if(navigator.language){_ef=navigator.language.toLowerCase();}else{return this.config.defaultLang;}}
if(_ef.length>=2){_ef=_ef.substring(0,2);return _ef;}
return this.config.defaultLang;};imgmap.prototype.disableSelection=function(_f0){if(typeof _f0=="undefined"||!_f0){return false;}
if(typeof _f0.onselectstart!="undefined"){_f0.onselectstart=function(){return false;};}
if(typeof _f0.unselectable!="undefined"){_f0.unselectable="on";}
if(typeof _f0.style.MozUserSelect!="undefined"){_f0.style.MozUserSelect="none";}};Function.prototype.bind=function(_f1){var _f2=this;return function(){return _f2.apply(_f1,arguments);};};imgmap.prototype.trim=function(str){return str.replace(/^\s+|\s+$/g,"");};function imgmap_spawnObjects(_f4){var _f5=document.getElementsByTagName("map");var _f6=document.getElementsByTagName("img");var _f7=[];var _f8;for(var i=0,le=_f5.length;i<le;i++){for(var j=0,le2=_f6.length;j<le2;j++){if("#"+_f5[i].name==_f6[j].getAttribute("usemap")){_f4.mode="highlighter_spawn";_f8=new imgmap(_f4);_f8.useImage(_f6[j]);_f8.setMapHTML(_f5[i]);_f8.viewmode=1;_f7.push(_f8);}}}} | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/jscripts/imgmap_packed.js | imgmap_packed.js |
document.createElement("canvas").getContext||(function(){var s=Math,j=s.round,F=s.sin,G=s.cos,V=s.abs,W=s.sqrt,k=10,v=k/2;function X(){return this.context_||(this.context_=new H(this))}var L=Array.prototype.slice;function Y(b,a){var c=L.call(arguments,2);return function(){return b.apply(a,c.concat(L.call(arguments)))}}var M={init:function(b){if(/MSIE/.test(navigator.userAgent)&&!window.opera){var a=b||document;a.createElement("canvas");a.attachEvent("onreadystatechange",Y(this.init_,this,a))}},init_:function(b){b.namespaces.g_vml_||
b.namespaces.add("g_vml_","urn:schemas-microsoft-com:vml","#default#VML");b.namespaces.g_o_||b.namespaces.add("g_o_","urn:schemas-microsoft-com:office:office","#default#VML");if(!b.styleSheets.ex_canvas_){var a=b.createStyleSheet();a.owningElement.id="ex_canvas_";a.cssText="canvas{display:inline-block;overflow:hidden;text-align:left;width:300px;height:150px}g_vml_\\:*{behavior:url(#default#VML)}g_o_\\:*{behavior:url(#default#VML)}"}var c=b.getElementsByTagName("canvas"),d=0;for(;d<c.length;d++)this.initElement(c[d])},
initElement:function(b){if(!b.getContext){b.getContext=X;b.innerHTML="";b.attachEvent("onpropertychange",Z);b.attachEvent("onresize",$);var a=b.attributes;if(a.width&&a.width.specified)b.style.width=a.width.nodeValue+"px";else b.width=b.clientWidth;if(a.height&&a.height.specified)b.style.height=a.height.nodeValue+"px";else b.height=b.clientHeight}return b}};function Z(b){var a=b.srcElement;switch(b.propertyName){case "width":a.style.width=a.attributes.width.nodeValue+"px";a.getContext().clearRect();
break;case "height":a.style.height=a.attributes.height.nodeValue+"px";a.getContext().clearRect();break}}function $(b){var a=b.srcElement;if(a.firstChild){a.firstChild.style.width=a.clientWidth+"px";a.firstChild.style.height=a.clientHeight+"px"}}M.init();var N=[],B=0;for(;B<16;B++){var C=0;for(;C<16;C++)N[B*16+C]=B.toString(16)+C.toString(16)}function I(){return[[1,0,0],[0,1,0],[0,0,1]]}function y(b,a){var c=I(),d=0;for(;d<3;d++){var f=0;for(;f<3;f++){var h=0,g=0;for(;g<3;g++)h+=b[d][g]*a[g][f];c[d][f]=
h}}return c}function O(b,a){a.fillStyle=b.fillStyle;a.lineCap=b.lineCap;a.lineJoin=b.lineJoin;a.lineWidth=b.lineWidth;a.miterLimit=b.miterLimit;a.shadowBlur=b.shadowBlur;a.shadowColor=b.shadowColor;a.shadowOffsetX=b.shadowOffsetX;a.shadowOffsetY=b.shadowOffsetY;a.strokeStyle=b.strokeStyle;a.globalAlpha=b.globalAlpha;a.arcScaleX_=b.arcScaleX_;a.arcScaleY_=b.arcScaleY_;a.lineScale_=b.lineScale_}function P(b){var a,c=1;b=String(b);if(b.substring(0,3)=="rgb"){var d=b.indexOf("(",3),f=b.indexOf(")",d+
1),h=b.substring(d+1,f).split(",");a="#";var g=0;for(;g<3;g++)a+=N[Number(h[g])];if(h.length==4&&b.substr(3,1)=="a")c=h[3]}else a=b;return{color:a,alpha:c}}function aa(b){switch(b){case "butt":return"flat";case "round":return"round";case "square":default:return"square"}}function H(b){this.m_=I();this.mStack_=[];this.aStack_=[];this.currentPath_=[];this.fillStyle=this.strokeStyle="#000";this.lineWidth=1;this.lineJoin="miter";this.lineCap="butt";this.miterLimit=k*1;this.globalAlpha=1;this.canvas=b;
var a=b.ownerDocument.createElement("div");a.style.width=b.clientWidth+"px";a.style.height=b.clientHeight+"px";a.style.overflow="hidden";a.style.position="absolute";b.appendChild(a);this.element_=a;this.lineScale_=this.arcScaleY_=this.arcScaleX_=1}var i=H.prototype;i.clearRect=function(){this.element_.innerHTML=""};i.beginPath=function(){this.currentPath_=[]};i.moveTo=function(b,a){var c=this.getCoords_(b,a);this.currentPath_.push({type:"moveTo",x:c.x,y:c.y});this.currentX_=c.x;this.currentY_=c.y};
i.lineTo=function(b,a){var c=this.getCoords_(b,a);this.currentPath_.push({type:"lineTo",x:c.x,y:c.y});this.currentX_=c.x;this.currentY_=c.y};i.bezierCurveTo=function(b,a,c,d,f,h){var g=this.getCoords_(f,h),l=this.getCoords_(b,a),e=this.getCoords_(c,d);Q(this,l,e,g)};function Q(b,a,c,d){b.currentPath_.push({type:"bezierCurveTo",cp1x:a.x,cp1y:a.y,cp2x:c.x,cp2y:c.y,x:d.x,y:d.y});b.currentX_=d.x;b.currentY_=d.y}i.quadraticCurveTo=function(b,a,c,d){var f=this.getCoords_(b,a),h=this.getCoords_(c,d),g={x:this.currentX_+
0.6666666666666666*(f.x-this.currentX_),y:this.currentY_+0.6666666666666666*(f.y-this.currentY_)};Q(this,g,{x:g.x+(h.x-this.currentX_)/3,y:g.y+(h.y-this.currentY_)/3},h)};i.arc=function(b,a,c,d,f,h){c*=k;var g=h?"at":"wa",l=b+G(d)*c-v,e=a+F(d)*c-v,m=b+G(f)*c-v,r=a+F(f)*c-v;if(l==m&&!h)l+=0.125;var n=this.getCoords_(b,a),o=this.getCoords_(l,e),q=this.getCoords_(m,r);this.currentPath_.push({type:g,x:n.x,y:n.y,radius:c,xStart:o.x,yStart:o.y,xEnd:q.x,yEnd:q.y})};i.rect=function(b,a,c,d){this.moveTo(b,
a);this.lineTo(b+c,a);this.lineTo(b+c,a+d);this.lineTo(b,a+d);this.closePath()};i.strokeRect=function(b,a,c,d){var f=this.currentPath_;this.beginPath();this.moveTo(b,a);this.lineTo(b+c,a);this.lineTo(b+c,a+d);this.lineTo(b,a+d);this.closePath();this.stroke();this.currentPath_=f};i.fillRect=function(b,a,c,d){var f=this.currentPath_;this.beginPath();this.moveTo(b,a);this.lineTo(b+c,a);this.lineTo(b+c,a+d);this.lineTo(b,a+d);this.closePath();this.fill();this.currentPath_=f};i.createLinearGradient=function(b,
a,c,d){var f=new D("gradient");f.x0_=b;f.y0_=a;f.x1_=c;f.y1_=d;return f};i.createRadialGradient=function(b,a,c,d,f,h){var g=new D("gradientradial");g.x0_=b;g.y0_=a;g.r0_=c;g.x1_=d;g.y1_=f;g.r1_=h;return g};i.drawImage=function(b){var a,c,d,f,h,g,l,e,m=b.runtimeStyle.width,r=b.runtimeStyle.height;b.runtimeStyle.width="auto";b.runtimeStyle.height="auto";var n=b.width,o=b.height;b.runtimeStyle.width=m;b.runtimeStyle.height=r;if(arguments.length==3){a=arguments[1];c=arguments[2];h=g=0;l=d=n;e=f=o}else if(arguments.length==
5){a=arguments[1];c=arguments[2];d=arguments[3];f=arguments[4];h=g=0;l=n;e=o}else if(arguments.length==9){h=arguments[1];g=arguments[2];l=arguments[3];e=arguments[4];a=arguments[5];c=arguments[6];d=arguments[7];f=arguments[8]}else throw Error("Invalid number of arguments");var q=this.getCoords_(a,c),t=[];t.push(" <g_vml_:group",' coordsize="',k*10,",",k*10,'"',' coordorigin="0,0"',' style="width:',10,"px;height:",10,"px;position:absolute;");if(this.m_[0][0]!=1||this.m_[0][1]){var E=[];E.push("M11=",
this.m_[0][0],",","M12=",this.m_[1][0],",","M21=",this.m_[0][1],",","M22=",this.m_[1][1],",","Dx=",j(q.x/k),",","Dy=",j(q.y/k),"");var p=q,z=this.getCoords_(a+d,c),w=this.getCoords_(a,c+f),x=this.getCoords_(a+d,c+f);p.x=s.max(p.x,z.x,w.x,x.x);p.y=s.max(p.y,z.y,w.y,x.y);t.push("padding:0 ",j(p.x/k),"px ",j(p.y/k),"px 0;filter:progid:DXImageTransform.Microsoft.Matrix(",E.join(""),", sizingmethod='clip');")}else t.push("top:",j(q.y/k),"px;left:",j(q.x/k),"px;");t.push(' ">','<g_vml_:image src="',b.src,
'"',' style="width:',k*d,"px;"," height:",k*f,'px;"',' cropleft="',h/n,'"',' croptop="',g/o,'"',' cropright="',(n-h-l)/n,'"',' cropbottom="',(o-g-e)/o,'"'," />","</g_vml_:group>");this.element_.insertAdjacentHTML("BeforeEnd",t.join(""))};i.stroke=function(b){var a=[],c=P(b?this.fillStyle:this.strokeStyle),d=c.color,f=c.alpha*this.globalAlpha;a.push("<g_vml_:shape",' filled="',!!b,'"',' style="position:absolute;width:',10,"px;height:",10,'px;"',' coordorigin="0 0" coordsize="',k*10," ",k*10,'"',' stroked="',
!b,'"',' path="');var h={x:null,y:null},g={x:null,y:null},l=0;for(;l<this.currentPath_.length;l++){var e=this.currentPath_[l];switch(e.type){case "moveTo":a.push(" m ",j(e.x),",",j(e.y));break;case "lineTo":a.push(" l ",j(e.x),",",j(e.y));break;case "close":a.push(" x ");e=null;break;case "bezierCurveTo":a.push(" c ",j(e.cp1x),",",j(e.cp1y),",",j(e.cp2x),",",j(e.cp2y),",",j(e.x),",",j(e.y));break;case "at":case "wa":a.push(" ",e.type," ",j(e.x-this.arcScaleX_*e.radius),",",j(e.y-this.arcScaleY_*e.radius),
" ",j(e.x+this.arcScaleX_*e.radius),",",j(e.y+this.arcScaleY_*e.radius)," ",j(e.xStart),",",j(e.yStart)," ",j(e.xEnd),",",j(e.yEnd));break}if(e){if(h.x==null||e.x<h.x)h.x=e.x;if(g.x==null||e.x>g.x)g.x=e.x;if(h.y==null||e.y<h.y)h.y=e.y;if(g.y==null||e.y>g.y)g.y=e.y}}a.push(' ">');if(b)if(typeof this.fillStyle=="object"){var m=this.fillStyle,r=0,n={x:0,y:0},o=0,q=1;if(m.type_=="gradient"){var t=m.x1_/this.arcScaleX_,E=m.y1_/this.arcScaleY_,p=this.getCoords_(m.x0_/this.arcScaleX_,m.y0_/this.arcScaleY_),
z=this.getCoords_(t,E);r=Math.atan2(z.x-p.x,z.y-p.y)*180/Math.PI;if(r<0)r+=360;if(r<1.0E-6)r=0}else{var p=this.getCoords_(m.x0_,m.y0_),w=g.x-h.x,x=g.y-h.y;n={x:(p.x-h.x)/w,y:(p.y-h.y)/x};w/=this.arcScaleX_*k;x/=this.arcScaleY_*k;var R=s.max(w,x);o=2*m.r0_/R;q=2*m.r1_/R-o}var u=m.colors_;u.sort(function(ba,ca){return ba.offset-ca.offset});var J=u.length,da=u[0].color,ea=u[J-1].color,fa=u[0].alpha*this.globalAlpha,ga=u[J-1].alpha*this.globalAlpha,S=[],l=0;for(;l<J;l++){var T=u[l];S.push(T.offset*q+
o+" "+T.color)}a.push('<g_vml_:fill type="',m.type_,'"',' method="none" focus="100%"',' color="',da,'"',' color2="',ea,'"',' colors="',S.join(","),'"',' opacity="',ga,'"',' g_o_:opacity2="',fa,'"',' angle="',r,'"',' focusposition="',n.x,",",n.y,'" />')}else a.push('<g_vml_:fill color="',d,'" opacity="',f,'" />');else{var K=this.lineScale_*this.lineWidth;if(K<1)f*=K;a.push("<g_vml_:stroke",' opacity="',f,'"',' joinstyle="',this.lineJoin,'"',' miterlimit="',this.miterLimit,'"',' endcap="',aa(this.lineCap),
'"',' weight="',K,'px"',' color="',d,'" />')}a.push("</g_vml_:shape>");this.element_.insertAdjacentHTML("beforeEnd",a.join(""))};i.fill=function(){this.stroke(true)};i.closePath=function(){this.currentPath_.push({type:"close"})};i.getCoords_=function(b,a){var c=this.m_;return{x:k*(b*c[0][0]+a*c[1][0]+c[2][0])-v,y:k*(b*c[0][1]+a*c[1][1]+c[2][1])-v}};i.save=function(){var b={};O(this,b);this.aStack_.push(b);this.mStack_.push(this.m_);this.m_=y(I(),this.m_)};i.restore=function(){O(this.aStack_.pop(),
this);this.m_=this.mStack_.pop()};function ha(b){var a=0;for(;a<3;a++){var c=0;for(;c<2;c++)if(!isFinite(b[a][c])||isNaN(b[a][c]))return false}return true}function A(b,a,c){if(!!ha(a)){b.m_=a;if(c)b.lineScale_=W(V(a[0][0]*a[1][1]-a[0][1]*a[1][0]))}}i.translate=function(b,a){A(this,y([[1,0,0],[0,1,0],[b,a,1]],this.m_),false)};i.rotate=function(b){var a=G(b),c=F(b);A(this,y([[a,c,0],[-c,a,0],[0,0,1]],this.m_),false)};i.scale=function(b,a){this.arcScaleX_*=b;this.arcScaleY_*=a;A(this,y([[b,0,0],[0,a,
0],[0,0,1]],this.m_),true)};i.transform=function(b,a,c,d,f,h){A(this,y([[b,a,0],[c,d,0],[f,h,1]],this.m_),true)};i.setTransform=function(b,a,c,d,f,h){A(this,[[b,a,0],[c,d,0],[f,h,1]],true)};i.clip=function(){};i.arcTo=function(){};i.createPattern=function(){return new U};function D(b){this.type_=b;this.r1_=this.y1_=this.x1_=this.r0_=this.y0_=this.x0_=0;this.colors_=[]}D.prototype.addColorStop=function(b,a){a=P(a);this.colors_.push({offset:b,color:a.color,alpha:a.alpha})};function U(){}G_vmlCanvasManager=
M;CanvasRenderingContext2D=H;CanvasGradient=D;CanvasPattern=U})(); | zopyx.tinymceplugins.imgmap | /zopyx.tinymceplugins.imgmap-0.3.2.1.zip/zopyx.tinymceplugins.imgmap-0.3.2.1/zopyx/tinymceplugins/imgmap/skins/zopyx_tinymceplugins_imgmap/imgmap/jscripts/excanvas.js | excanvas.js |
(function(){var r="function",f="string",i="unload",d=true,q="OK",t="TinyAutoSave",b=null,a=false,M="2.1.2",c="tinyautosave",h=a,n=a,l=a,E={"%":"%1","&":"%2",";":"%3","=":"%4","<":"%5"},D={"%1":"%","%2":"&","%3":";","%4":"=","%5":"<"},s=[],o={},u={},m="TinyAutoSave_Test_",g=b,p={dataKey:t,cookieFilter:b,saveDelegate:b,saveFinalDelegate:b,restoreDelegate:b,disposeDelegate:b,restoreImage:"",progressImage:"progress.gif",intervalSeconds:60,retentionMinutes:20,minSaveLength:50,askBeforeUnload:a,canRestore:a,busy:a,timer:b};try{localStorage.setItem(m,q);if(localStorage.getItem(m)===q){localStorage.removeItem(m);h=d}}catch(A){try{sessionStorage.setItem(m,q);if(sessionStorage.getItem(m)===q){sessionStorage.removeItem(m);n=d}}catch(A){l=tinymce.isIE}}tinymce.PluginManager.requireLangPack(c);tinymce.create("tinymce.plugins.TinyAutoSavePlugin",{editor:b,url:"",key:"",onPreSave:b,onPostSave:b,onSaveError:b,onPreRestore:b,onPostRestore:b,onRestoreError:b,showSaveProgress:d,progressDisplayTime:1200,init:function(d,m){var h="mceTinyAutoSaveRestore",e=this,o=tinymce.is,q=tinymce.resolve,a,k,n;if(l){if(!g)g=d.getElement();g.style.behavior="url('#default#userData')"}e.editor=d;e.id=d.id;e.url=m;e.key=d.getParam(c+"_key",d.id);a=C(e);a.restoreImage=m+"/images/restore."+(tinymce.isIE6?"gif":"png");e.setProgressImage(m+"/images/"+p.progressImage);a.intervalSeconds=Math.max(1,parseInt(d.getParam(c+"_interval_seconds",b)||d.getParam(c+"_interval",a.intervalSeconds)));a.retentionMinutes=Math.max(1,parseInt(d.getParam(c+"_retention_minutes",b)||d.getParam(c+"_retention",a.retentionMinutes)));a.minSaveLength=Math.max(1,parseInt(d.getParam(c+"_minlength",a.minSaveLength)));e.showSaveProgress=d.getParam(c+"_showsaveprogress",e.showSaveProgress);a.askBeforeUnload=d.getParam(c+"_ask_beforeunload",a.askBeforeUnload);a.canRestore=e.hasSavedContent();a.saveDelegate=j(e,z);a.saveFinalDelegate=j(e,x);a.restoreDelegate=j(e,y);d.addCommand("mceTinyAutoSave",a.saveDelegate);d.addCommand(h,a.restoreDelegate);d.addButton(c,{title:c+".restore_content",cmd:h,image:a.restoreImage});a.timer=window.setInterval(a.saveDelegate,a.intervalSeconds*1e3);tinymce.dom.Event.add(window,i,a.saveFinalDelegate);d.onRemove.add(a.saveFinalDelegate);d.onPostRender.add(function(b){b.controlManager.setDisabled(c,!a.canRestore)});k=d.getParam(c+"_oninit",b);if(o(k,f)){n=q(k);o(n,r)&&n.apply(e)}a.askBeforeUnload&&tinymce.dom.Event.add(window,i,tinymce.plugins.AutoSavePlugin._beforeUnloadHandler)},getInfo:function(){return {longname:t,author:"Speednet",authorurl:"http://www.speednet.biz/",infourl:"http://tinyautosave.googlecode.com/",version:M}},clear:function(){var d=this,f=d.editor,b=e(d);if(h)localStorage.removeItem(b.dataKey);else if(n)sessionStorage.removeItem(b.dataKey);else if(l)F(d);else tinymce.util.Cookie.remove(b.dataKey);b.canRestore=a;f.controlManager.setDisabled(c,d)},hasSavedContent:function(){var g=this,b=e(g),i=new Date,c,f;try{if(h||n){c=((h?localStorage.getItem(b.dataKey):sessionStorage.getItem(b.dataKey))||"").toString(),f=c.indexOf(",");if(f>8&&f<c.length-1){if(new Date(c.slice(0,f))>i)return d;if(h)localStorage.removeItem(b.dataKey);else sessionStorage.removeItem(b.dataKey)}return a}else if(l)return (w(g)||"").length>0;return (tinymce.util.Cookie.get(b.dataKey)||"").length>0}catch(j){return a}},setProgressImage:function(a){tinymce.is(a,f)&&J(e(this).progressImage=a)},"static":{_beforeUnloadHandler:function(){var b;tinymce.each(tinyMCE.editors,function(c){if(c.getParam("fullscreen_is_enabled"))return;if(c.isDirty()){b=c.getLang("autosave.unload_msg");return a}});return b}}});function L(){var b=this,a=e(b);a.timer&&window.clearInterval(a.timer);tinymce.dom.Event.remove(window,i,a.saveFinalDelegate);a.askBeforeUnload&&tinymce.dom.Event.remove(window,i,tinymce.plugins.AutoSavePlugin._beforeUnloadHandler);b.editor.onRemove.remove(a.saveFinalDelegate);B(b)}function k(a){if(!a)return d;var c,b,e=tinymce.is;if(e(a,f)){c=u[a];if(c)b=c[a];else u[a]=b=tinymce.resolve(a)}else if(e(a,r))b=a;else return d;return b.apply(this)}function x(){var a=e(this);a.saveDelegate();a.disposeDelegate()}function z(){var g=this,q=g.editor,b=e(g),u=tinymce.is,o=a,t=new Date,i,m,r,j,p,s;if(q&&!b.busy){b.busy=d;i=q.getContent();if(u(i,f)&&i.length>=b.minSaveLength){if(!k.call(g,g.onPreSave)){b.busy=a;return a}m=new Date(t.getTime()+b.retentionMinutes*60*1e3);try{if(h)localStorage.setItem(b.dataKey,m.toString()+","+v(i));else if(n)sessionStorage.setItem(b.dataKey,m.toString()+","+v(i));else if(l)K(g,i,m);else{r=b.dataKey+"=";j="; expires="+m.toUTCString();document.cookie=r+I(i).slice(0,4096-r.length-j.length)+j}o=d}catch(w){k.call(g,g.onSaveError)}if(o){p=q.controlManager;b.canRestore=d;p.setDisabled(c,a);if(g.showSaveProgress){j=tinymce.DOM.get(p.get(c).id);if(j){s=b.restoreImage;j.firstChild.src=b.progressImage;window.setTimeout(function(){j.firstChild.src=s},Math.min(g.progressDisplayTime,b.intervalSeconds*1e3-100))}}k.call(g,g.onPostSave)}}b.busy=a}return o}function y(){var g=this,m=g.editor,j=e(g),i=b,q=tinymce.is,o,p;if(m&&j.canRestore&&!j.busy){j.busy=d;if(!k.call(g,g.onPreRestore)){j.busy=a;return}try{if(h||n){i=((h?localStorage.getItem(j.dataKey):sessionStorage.getItem(j.dataKey))||"").toString();o=i.indexOf(",");if(o==-1)i=b;else i=G(i.slice(o+1,i.length))}else if(l)i=w(g);else{p=j.cookieFilter.exec(document.cookie);if(p)i=H(p[1])}if(!q(i,f))m.windowManager.alert(c+".no_content");else if(m.getContent().replace(/\s| |<\/?p[^>]*>|<br[^>]*>/gi,"").length===0){m.setContent(i);k.call(g,g.onPostRestore)}else m.windowManager.confirm(c+".warning_message",function(b){if(b){m.setContent(i);k.call(g,g.onPostRestore)}j.busy=a},g)}catch(r){k.call(g,g.onRestoreError)}j.busy=a}}function K(a,c,b){g.setAttribute(e(a).dataKey,c);g.expires=b.toUTCString();g.save("TinyMCE")}function w(a){g.load("TinyMCE");return g.getAttribute(e(a).dataKey)}function F(a){g.removeAttribute(e(a).dataKey)}function I(a){return a.replace(/[\x00-\x1f]+| | /gi," ").replace(/(.)\1{5,}|[%&;=<]/g,function(a){if(a.length>1)return "%0"+a.charAt(0)+a.length.toString()+"%";return E[a]})}function H(a){return a.replace(/%[1-5]|%0(.)(\d+)%/g,function(c,f,e){var a,b,d;if(c.length==2)return D[c];for(a=[],b=0,d=parseInt(e);b<d;b++)a.push(f);return a.join("")})}function v(a){return a.replace(/,/g,",")}function G(a){return a.replace(/,/g,",")}function J(b){var a=s.length;s[a]=new Image;s[a].src=b}function j(b,a){return function(){return a.apply(b)}}function C(a){var b=a.key,c=o[b];if(!c)c=o[b]=tinymce.extend({},p,{dataKey:p.dataKey+b,saveDelegate:j(a,z),saveFinalDelegate:j(a,x),restoreDelegate:j(a,y),disposeDelegate:j(a,L),cookieFilter:new RegExp("(?:^|;\\s*)"+p.dataKey+b+"=([^;]*)(?:;|$)","i")});return c}function e(a){return o[a.key]}function B(a){delete o[a.key]}tinymce.PluginManager.add(c,tinymce.plugins.TinyAutoSavePlugin)})(); | zopyx.tinymceplugins.tinyautosave | /zopyx.tinymceplugins.tinyautosave-1.0.1.tar.gz/zopyx.tinymceplugins.tinyautosave-1.0.1/zopyx/tinymceplugins/tinyautosave/skins/autosave/tinyautosave/editor_plugin.js | editor_plugin.js |
(function() {
//************************************************************************
// PRIVATE VARIABLES
var version = "2.1.2",
// The name of the plugin, as specified to TinyMCE
pluginName = "tinyautosave",
// Specifies if localStorage (HTML 5) is available
useLocalStorage = false,
// Specifies if sessionStorage (HTML 5) is available
useSessionStorage = false,
// Specifies if UserData (IE client storage) is available
useUserData = false,
// Translation keys for encoding/decoding cookie values
cookieEncodeKey = {"%": "%1", "&": "%2", ";": "%3", "=": "%4", "<": "%5"},
cookieDecodeKey = {"%1": "%", "%2": "&", "%3": ";", "%4": "=", "%5": "<"},
// Internal storage for preloaded images
preloadImages = [],
// Internal storage of settings for each plugin instance
instanceSettings = {},
// Cached storage of callback function resolution, for performance
callbackLookup = {},
// Unique key used to test if HTML 5 storage methods are available
testKey = "TinyAutoSave_Test_",
// The HTML element that IE's UserData will be attached to
userDataElement = null,
// Default settings for each plugin instance
settingsTemplate = {
dataKey: "TinyAutoSave",
cookieFilter: null,
saveDelegate: null,
saveFinalDelegate: null,
restoreDelegate: null,
disposeDelegate: null,
restoreImage: "",
progressImage: "progress.gif",
intervalSeconds: 60,
retentionMinutes: 20,
minSaveLength: 50,
askBeforeUnload: false,
canRestore: false,
busy: false,
timer: null
};
//************************************************************************
// TEST STORAGE METHODS
// Determine best storage method by storing and retrieving test data
try {
localStorage.setItem(testKey, "OK");
if (localStorage.getItem(testKey) === "OK") {
localStorage.removeItem(testKey);
useLocalStorage = true;
}
}
catch (e) {
try {
sessionStorage.setItem(testKey, "OK");
if (sessionStorage.getItem(testKey) === "OK") {
sessionStorage.removeItem(testKey);
useSessionStorage = true;
}
}
catch (e) {
useUserData = tinymce.isIE;
}
}
//************************************************************************
// TINYMCE INTEROP
tinymce.PluginManager.requireLangPack(pluginName);
tinymce.create("tinymce.plugins.TinyAutoSavePlugin", {
/// <summary>
/// Automatically saves the editor contents periodically and just before leaving the current page.
/// Allows the user to rescue the contents of the last autosave, in case they did not intend to
/// navigate away from the current page or the browser window was closed before posting the content.
/// </summary>
/// <field name="editor" type="Object" mayBeNull="false">
/// A reference to the TinyMCE editor instance that contains this TinyAutoSave plugin instance.
/// </field>
/// <field name="url" type="String" mayBeNull="false">
/// The URL of the folder containing the TinyAutoSave plugin. Does not include a trailing slash.
/// </field>
/// <field name="key" type="String" mayBeNull="false">
/// A string value identifying the storage and settings for the plugin, as set by tinyautosave_key.
/// </field>
/// <field name="onPreSave" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called before each auto-save is performed.
/// (Function) A function that gets called before each auto-save is performed.
/// The callback function must return a Boolean value of true if the auto-save is to proceed
/// normally, or false if the auto-save is to be canceled. The editor instance is the context of the
/// callback (assigned to 'this').
/// </field>
/// <field name="onPostSave" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called after each auto-save is performed.
/// (Function) A function that gets called after each auto-save is performed.
/// Any return value from the callback function is ignored. The editor instance is the context of
/// the callback (assigned to 'this').
/// </field>
/// <field name="onSaveError" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called each time an auto-save fails in an error condition.
/// (Function) A function that gets called each time an auto-save fails in an error condition.
/// The editor instance is the context of the callback (assigned to 'this').
/// </field>
/// <field name="onPreRestore" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called before a restore request is performed.
/// (Function) A function that gets called before a restore request is performed.
/// The callback function must return a Boolean value of true if the restore is to proceed normally,
/// or false if the restore is to be canceled. The editor instance is the context of the callback
/// (assigned to 'this').
/// </field>
/// <field name="onPostRestore" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called after a restore request is performed.
/// (Function) A function that gets called after a restore request is performed.
/// Any return value from the callback function is ignored. The editor instance is the context of
/// the callback (assigned to 'this').
/// </field>
/// <field name="onRestoreError" type="String or Function" mayBeNull="false">
/// (String) Name of a callback function that gets called each time a restore request fails in an error condition.
/// (Function) A function that gets called each time a restore request fails in an error condition.
/// The editor instance is the context of the callback (assigned to 'this').
/// </field>
/// <field name="progressDisplayTime" type="Number" integer="true" mayBeNull="false">
/// Number of milliseconds that the progress image is displayed after an auto-save. The default is
/// 1200, which is the equivalent of 1.2 seconds.
/// </field>
/// <field name="showSaveProgress" type="Boolean" mayBeNull="false">
/// Receives the Boolean value specified in the tinyautosave_showsaveprogress configuration option,
/// or true if none is specified. This is a public read/write property, and the behavior of the
/// toolbar button throbber/progress can be altered dynamically by changing this property.
/// </field>
/// <remarks>
///
/// CONFIGURATION OPTIONS:
///
/// tinyautosave_key - (String, default = editor id) A string value used to identify the autosave
/// storage and settings to use for the plug instance. If tinyautosave_key is not specified, then
/// the editor's id property is used. If you set the tinyautosave_key for all editors to the same value,
/// that would create a single autosave storage instance and a single set of autosave settings to use
/// with all editors. Because each key maintains its own plugin settings, tinyautosave_key can also be
/// used to apply a different UI or behavior to individual editors. For example, two editors on the same
/// page could use different progress images, or they could autosave at different intervals.
///
/// tinyautosave_interval_seconds - (Number, default = 60) The number of seconds between automatic saves.
/// When the editor is first displayed, an autosave will not occur for at least this amount of time.
///
/// tinyautosave_minlength - (Number, default = 50) The minimum number of characters that must be in the
/// editor before an autosave will occur. The character count includes all non-visible characters,
/// such as HTML tags. Although this can be set to 0 (zero), it is not recommended. Doing so would
/// open the possibility that if the user accidentally refreshes the page, the empty editor contents
/// would overwrite the rescue content, effectively defeating the purpose of the plugin.
///
/// tinyautosave_retention_minutes - (Number, default = 20) The number of minutes since the last autosave
/// that content will remain in the rescue storage space before it is automatically expired.
///
/// tinyautosave_oninit - (String, default = null) The name of a function to call immediately after the
/// TinyAutoSave plugin instance is initialized. Can include dot-notation, e.g., "myObject.myFunction".
/// The context of the function call (the value of 'this') is the plugin instance. This function is
/// a good place to set any of the public properties that you want to configure.
///
/// tinyautosave_showsaveprogress - (Boolean, default = true) When true, the toolbar button will show a
/// brief animation every time an autosave occurs.
///
/// COMMANDS:
///
/// Available TinyMCE commands are:
/// mceTinyAutoSave - Perform an auto-save
/// mceTinyAutoSaveRestore - Restore auto-saved content into the editor
///
/// PUBLIC PROPERTIES:
///
/// Available public properties of the TinyAutoSave plugin are:
/// editor (Object)
/// url (String)
/// key (String)
/// onPreSave (String)
/// onPostSave (String)
/// onSaveError (String)
/// onPreRestore (String)
/// onPostRestore (String)
/// onRestoreError (String)
/// progressDisplayTime (Number)
/// showSaveProgress (Boolean)
///
/// See [field] definitions above for detailed descriptions of the public properties.
///
/// PUBLIC METHODS:
///
/// Available public methods of the TinyAutoSave plugin are:
/// init() - [Called by TinyMCE]
/// getInfo() - [Called by TinyMCE]
/// clear() - Clears any auto-saved content currently stored, and "dims" the Restore toolbar button.
/// hasSavedContent() - Returns true if there is auto-save content available to be restored, or false if not.
/// setProgressImage() - Sets the URL of the image that will be displayed every time an auto-save occurs.
///
/// TECHNOLOGY DISCUSSION:
///
/// The plugin attempts to use the most advanced features available in the current browser to save
/// as much content as possible. There are a total of four different methods used to autosave the
/// content. In order of preference, they are:
///
/// 1. localStorage - A new feature of HTML 5, localStorage can store megabytes of data per domain
/// on the client computer. Data stored in the localStorage area has no expiration date, so we must
/// manage expiring the data ourselves. localStorage is fully supported by IE8, and it is supposed
/// to be working in Firefox 3 and Safari 3.2, but in reality is is flaky in those browsers. As
/// HTML 5 gets wider support, the TinyAutoSave plugin will use it automatically. In Windows Vista/7,
/// localStorage is stored in the following folder:
/// C:\Users\[username]\AppData\Local\Microsoft\Internet Explorer\DOMStore\[tempFolder]
///
/// 2. sessionStorage - A new feature of HTML 5, sessionStorage works similarly to localStorage,
/// except it is designed to expire after a certain amount of time. Because the specification
/// around expiration date/time is very loosely-described, it is preferrable to use locaStorage and
/// manage the expiration ourselves. sessionStorage has similar storage characteristics to
/// localStorage, although it seems to have better support by Firefox 3 at the moment. (That will
/// certainly change as Firefox continues getting better at HTML 5 adoption.)
///
/// 3. UserData - A very under-exploited feature of Microsoft Internet Explorer, UserData is a
/// way to store up to 128K of data per "document", or up to 1MB of data per domain, on the client
/// computer. The feature is available for IE 5+, which makes it available for every version of IE
/// supported by TinyMCE. The content is persistent across browser restarts and expires on the
/// date/time specified, just like a cookie. However, the data is not cleared when the user clears
/// cookies on the browser, which makes it well-suited for rescuing autosaved content. UserData,
/// like other Microsoft IE browser technologies, is implemented as a behavior attached to a
/// specific DOM object, so in this case we attach the behavior to the same DOM element that the
/// TinyMCE editor instance is attached to.
///
/// 4. Cookies - When none of the above methods is available, the autosave content is stored in a
/// cookie. This limits the total saved content to around 4,000 characters, but we use every bit
/// of that space as we can. To maximize space utilization, before saving the content, we remove
/// all newlines and other control characters less than ASCII code 32, change instances to
/// a regular space character, and do some minor compression techniques. (TO-DO: add more
/// compressiion techniques.) Unfortunately, because the data is stored in a cookie, we have to
/// waste some space encoding certain characters to avoid server warnings about dangerous content
/// (as well as overcoming some browser bugs in Safari). Instead of using the built-in escape()
/// function, we do a proprietary encoding that only encodes the bare minimum characters, and uses
/// only two bytes per encoded character, rather than 3 bytes like escape() does. escape() encodes
/// most non-alpha-numeric characters because it is designed for encoding URLs, not for encoding
/// cookies. It is a huge space-waster in cookies, and in this case would have cut the amount
/// of autosaved content by at least half.
///
/// </remarks>
//************************************************************************
// PUBLIC PROPERTIES
editor: null,
url: "",
key: "",
onPreSave: null,
onPostSave: null,
onSaveError: null,
onPreRestore: null,
onPostRestore: null,
onRestoreError: null,
showSaveProgress: true,
progressDisplayTime: 1200, // Milliseconds
//************************************************************************
// PUBLIC METHODS
init: function (ed, url) {
/// <summary>
/// Initialization function called by TinyMCE.
/// </summary>
var t = this,
is = tinymce.is,
resolve = tinymce.resolve,
s, onInit, f;
if (useUserData) {
if (!userDataElement) {
userDataElement = ed.getElement();
}
userDataElement.style.behavior = "url('#default#userData')";
}
t.editor = ed;
t.id = ed.id;
t.url = url;
t.key = ed.getParam(pluginName + "_key", ed.id);
s = newInstanceSettings(t);
s.restoreImage = url + "/images/restore." + (tinymce.isIE6? "gif" : "png");
t.setProgressImage(url + "/images/" + settingsTemplate.progressImage);
// Get the auto-save interval from the TinyMCE config. (i.e., auto-save every 'x' seconds.)
// Integer value. If not specified in config, default is 60 seconds; minimum is 1 second.
// Either 'tinyautosave_interval_seconds' or 'tinyautosave_interval' can be used, but 'tinyautosave_interval_seconds' provides better clarity.
s.intervalSeconds = Math.max(1, parseInt(ed.getParam(pluginName + "_interval_seconds", null) || ed.getParam(pluginName + "_interval", s.intervalSeconds)));
// Get the rescue content retention time from the TinyMCE config. (i.e., rescue content available for 'x' minutes after navigating from page.)
// Integer value. If not specified in config, default is 20 minutes; minimum is 1 minute.
// Don't make this too long; users will get weirded out if content from long ago is still hanging around.
// Either 'tinyautosave_retention_minutes' or 'tinyautosave_retention' can be used, but 'tinyautosave_retention_minutes' provides better clarity.
s.retentionMinutes = Math.max(1, parseInt(ed.getParam(pluginName + "_retention_minutes", null) || ed.getParam(pluginName + "_retention", s.retentionMinutes)));
// Get the minimum content length from the TinyMCE config. (i.e., minimum number of characters in the editor before an auto-save can occur.)
// Integer value. If not specified in config, default is 50 characters; minimum is 1 character.
// Prevents situation where user accidentally hits Refresh, then their rescue content is wiped out when the editor auto-saves the blank editor on the refreshed page. No need to auto-save a few characters.
// Specified as 'tinyautosave_minlength' in the config.
s.minSaveLength = Math.max(1, parseInt(ed.getParam(pluginName + "_minlength", s.minSaveLength)));
// Determine if progress animation should occur by reading TinyMCE config.
// Boolean value. If not specified in config, default is true, progress animation will be displayed after each auto-save.
// Specified as 'tinyautosave_showsaveprogress' in the config.
t.showSaveProgress = ed.getParam(pluginName + "_showsaveprogress", t.showSaveProgress);
s.askBeforeUnload = ed.getParam(pluginName + "_ask_beforeunload", s.askBeforeUnload);
s.canRestore = t.hasSavedContent();
// Save action delegates with context
s.saveDelegate = createDelegate(t, save);
s.saveFinalDelegate = createDelegate(t, saveFinal);
s.restoreDelegate = createDelegate(t, restore);
// Register commands
ed.addCommand("mceTinyAutoSave", s.saveDelegate);
ed.addCommand("mceTinyAutoSaveRestore", s.restoreDelegate);
// Register restore button
ed.addButton(pluginName, {
title: pluginName + ".restore_content",
cmd: "mceTinyAutoSaveRestore",
image: s.restoreImage
});
// Set save interval
s.timer = window.setInterval(s.saveDelegate, s.intervalSeconds * 1000);
// Ensures content is autosaved before window closes or navigates to new page
tinymce.dom.Event.add(window, "unload", s.saveFinalDelegate);
// Save when editor is removed (may be different than window's onunload event, so we need to do both)
ed.onRemove.add(s.saveFinalDelegate);
// Set initial state of restore button
ed.onPostRender.add(function (ed, cm) {
ed.controlManager.setDisabled(pluginName, !s.canRestore);
});
// Call tinyautosave_oninit, if specified
// This config option is a String value specifying the name of a function to call. Can include dot-notation, e.g., "myObject.myFunction".
// The context of the function call (the value of 'this') is the plugin instance.
onInit = ed.getParam(pluginName + "_oninit", null);
if (is(onInit, "string")) {
f = resolve(onInit);
if (is(f, "function")) {
f.apply(t);
}
}
// Add ask before unload dialog
if (s.askBeforeUnload) {
tinymce.dom.Event.add(window, "unload", tinymce.plugins.AutoSavePlugin._beforeUnloadHandler);
}
},
getInfo: function() {
/// <summary>
/// Called by TinyMCE, returns standard information about the plugin
/// to display in the About box.
/// </summary>
return {
longname: "TinyAutoSave",
author: "Speednet",
authorurl: "http://www.speednet.biz/",
infourl: "http://tinyautosave.googlecode.com/",
version: version
};
},
clear: function () {
/// <summary>
/// Removes the autosave content from storage. Disables the 'tinyautosave' toolbar button.
/// </summary>
var t = this,
ed = t.editor,
s = getInstanceSettings(t);
if (useLocalStorage) {
localStorage.removeItem(s.dataKey);
}
else if (useSessionStorage) {
sessionStorage.removeItem(s.dataKey);
}
else if (useUserData) {
removeUserData(t);
}
else {
tinymce.util.Cookie.remove(s.dataKey);
}
s.canRestore = false;
ed.controlManager.setDisabled(pluginName, t);
},
hasSavedContent: function () {
/// <summary>
/// Returns true if there is unexpired autosave content available to be restored.
/// </summary>
/// <returns type="Boolean"></returns>
var t = this,
s = getInstanceSettings(t),
now = new Date(),
content, i;
try {
if (useLocalStorage || useSessionStorage) {
content = ((useLocalStorage? localStorage.getItem(s.dataKey) : sessionStorage.getItem(s.dataKey)) || "").toString(),
i = content.indexOf(",");
if ((i > 8) && (i < content.length - 1)) {
if ((new Date(content.slice(0, i))) > now) {
return true;
}
// Remove expired content
if (useLocalStorage) {
localStorage.removeItem(s.dataKey);
}
else {
sessionStorage.removeItem(s.dataKey);
}
}
return false;
}
else if (useUserData) {
return ((getUserData(t) || "").length > 0);
}
return ((tinymce.util.Cookie.get(s.dataKey) || "").length > 0);
}
catch (e) {
return false;
}
},
setProgressImage: function (url) {
/// <summary>
/// Sets the progress image/throbber to a specified URL. The progress image
/// temporarily replaces the image on the TinyAutoSave toolbar button every
/// time an auto-save occurs. The default value is
/// "[tinymce]/plugins/tinyautosave/images/progress.gif". Can be set any time
/// after the plugin initializes. The progress image is normally an animated GIF,
/// but it can be any image type. Because the image will be displayed on a toolbar
/// button, so the recommended size is 20 x 20 (using a centered 16 x 16 image).
/// </summary>
/// <param name="url" type="String" optional="false" mayBeNull="false">
/// The URL of the image that will be displayed on the restore toolbar button
/// every time an auto-save occurs.
/// </param>
if (tinymce.is(url, "string")) {
preloadImage(getInstanceSettings(this).progressImage = url);
}
},
"static": {
_beforeUnloadHandler: function () {
var msg;
tinymce.each(tinyMCE.editors, function (ed) {
if (ed.getParam("fullscreen_is_enabled")) {
return;
}
if (ed.isDirty()) {
msg = ed.getLang("autosave.unload_msg");
return false;
}
});
return msg;
}
}
});
//************************************************************************
// PRIVATE FUNCTIONS
function dispose() {
/// <summary>
/// Called just before the current page unloads. Cleans up memory, releases
/// timers and events.
/// </summary>
/// <remarks>
/// Must be called with context ("this" keyword) set to plugin instance
/// </remarks>
var t = this,
s = getInstanceSettings(t);
if (s.timer) {
window.clearInterval(s.timer);
}
tinymce.dom.Event.remove(window, "unload", s.saveFinalDelegate);
if (s.askBeforeUnload) {
tinymce.dom.Event.remove(window, "unload", tinymce.plugins.AutoSavePlugin._beforeUnloadHandler);
}
t.editor.onRemove.remove(s.saveFinalDelegate);
removeInstanceSettings(t);
}
function execCallback(n) {
/// <summary>
/// Executes a callback function. The callback function can be specified
/// either as a string or a function.
/// </summary>
/// <remarks>
/// Must be called with context ("this" keyword) set to plugin instance
/// </remarks>
if (!n) {
return true;
}
var c, f,
is = tinymce.is;
if (is(n, "string")) {
c = callbackLookup[n];
if (c) {
f = c[n];
}
else {
callbackLookup[n] = f = tinymce.resolve(n);
}
}
else if (is(n, "function")) {
f = n;
}
else {
return true;
}
return f.apply(this);
}
function saveFinal() {
/// <summary>
/// Called just before the current page is unloaded. Performs a final save, then
/// cleans up memory to prevent leaks.
/// </summary>
/// <remarks>
/// Must be called with context ("this" keyword) set to plugin instance
/// </remarks>
var s = getInstanceSettings(this);
s.saveDelegate();
s.disposeDelegate();
}
function save() {
/// <summary>
/// Performs a single, one-time autosave. Checks to be sure there is at least the
/// specified minimum number of characters in the editor before saving. Briefly
/// animates the toolbar button. Enables the 'tinyautosave' button to indicate
/// autosave content is available.
/// </summary>
/// <returns type="Boolean">
/// Returns true if content was saved, or false if not.
/// </returns>
/// <remarks>
/// Must be called with context ("this" keyword) set to plugin instance
/// </remarks>
var t = this,
ed = t.editor,
s = getInstanceSettings(t),
is = tinymce.is,
saved = false,
now = new Date(),
content, exp, a, b, cm, img;
if ((ed) && (!s.busy)) {
s.busy = true;
content = ed.getContent();
if (is(content, "string") && (content.length >= s.minSaveLength)) {
if (!execCallback.call(t, t.onPreSave)) {
s.busy = false;
return false;
}
exp = new Date(now.getTime() + (s.retentionMinutes * 60 * 1000));
try {
if (useLocalStorage) {
localStorage.setItem(s.dataKey, exp.toString() + "," + encodeStorage(content)); // Uses local time for expiration
}
else if (useSessionStorage) {
sessionStorage.setItem(s.dataKey, exp.toString() + "," + encodeStorage(content)); // Uses local time for expiration
}
else if (useUserData) {
setUserData(t, content, exp);
}
else {
a = s.dataKey + "=";
b = "; expires=" + exp.toUTCString();
document.cookie = a + encodeCookie(content).slice(0, 4096 - a.length - b.length) + b;
}
saved = true;
}
catch (e) {
execCallback.call(t, t.onSaveError);
}
if (saved) {
cm = ed.controlManager;
s.canRestore = true;
cm.setDisabled(pluginName, false);
if (t.showSaveProgress) {
b = tinymce.DOM.get(cm.get(pluginName).id);
if (b) {
img = s.restoreImage;
b.firstChild.src = s.progressImage;
window.setTimeout(
function () {
b.firstChild.src = img;
},
Math.min(t.progressDisplayTime, s.intervalSeconds * 1000 - 100)
);
}
}
execCallback.call(t, t.onPostSave);
}
}
s.busy = false;
}
return saved;
}
function restore() {
/// <summary>
/// Called when the user clicks the 'tinyautosave' button on the toolbar.
/// Replaces the contents of the editor with the autosaved content. If the editor
/// contains more than just whitespace, the user is warned and given the option
/// to abort. The autosaved content remains in storage.
/// </summary>
/// <remarks>
/// Must be called with context ("this" keyword) set to plugin instance
/// </remarks>
var t = this,
ed = t.editor,
s = getInstanceSettings(t),
content = null,
is = tinymce.is,
i, m;
if ((ed) && (s.canRestore) && (!s.busy)) {
s.busy = true;
if (!execCallback.call(t, t.onPreRestore)) {
s.busy = false;
return;
}
try {
if (useLocalStorage || useSessionStorage) {
content = ((useLocalStorage? localStorage.getItem(s.dataKey) : sessionStorage.getItem(s.dataKey)) || "").toString();
i = content.indexOf(",");
if (i == -1) {
content = null;
}
else {
content = decodeStorage(content.slice(i + 1, content.length));
}
}
else if (useUserData) {
content = getUserData(t);
}
else {
m = s.cookieFilter.exec(document.cookie);
if (m) {
content = decodeCookie(m[1]);
}
}
if (!is(content, "string")) {
ed.windowManager.alert(pluginName + ".no_content");
}
else {
// If current contents are empty or whitespace, the confirmation is unnecessary
if (ed.getContent().replace(/\s| |<\/?p[^>]*>|<br[^>]*>/gi, "").length === 0) {
ed.setContent(content);
execCallback.call(t, t.onPostRestore);
}
else {
ed.windowManager.confirm(
pluginName + ".warning_message",
function (ok) {
if (ok) {
ed.setContent(content);
execCallback.call(t, t.onPostRestore);
}
s.busy = false;
},
t
);
}
}
}
catch (e) {
execCallback.call(t, t.onRestoreError);
}
s.busy = false;
}
}
function setUserData(inst, str, exp) {
/// <summary>
/// IE browsers only. Saves a string to the 'UserData' storage area.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="false">
/// Plugin instance for which to set the UserData
/// </param>
/// <param name="str" type="String" optional="false" mayBeNull="false">
/// String value to save.
/// </param>
/// <param name="exp" type="Date" optional="false" mayBeNull="false">
/// Date object specifying the expiration date of the content
/// </param>
/// <remarks>
/// Maximum size of the autosave data is 128K for regular Internet Web sites or
/// 512KB for intranet sites. Total size of all data for one domain is 1MB for
/// Internet sites and 10MB for intranet sites.
/// </remarks>
userDataElement.setAttribute(getInstanceSettings(inst).dataKey, str);
userDataElement.expires = exp.toUTCString();
userDataElement.save("TinyMCE");
}
function getUserData(inst) {
/// <summary>
/// IE browsers only. Retrieves a string from the 'UserData' storage area.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="false">
/// Plugin instance from which to get the UserData
/// </param>
/// <returns type="String"></returns>
userDataElement.load("TinyMCE");
return userDataElement.getAttribute(getInstanceSettings(inst).dataKey);
}
function removeUserData(inst) {
/// <summary>
/// IE browsers only. Removes a string from the 'UserData' storage area.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="false">
/// Plugin instance from which to remove the UserData
/// </param>
userDataElement.removeAttribute(getInstanceSettings(inst).dataKey);
}
function encodeCookie(str) {
/// <summary>
/// Encodes a string value intended for storage in a cookie. Used instead of
/// escape() to be more space-efficient and to apply some minor compression.
/// </summary>
/// <param name="str" type="String" optional="false" mayBeNull="false">
/// String to encode for cookie storage
/// </param>
/// <returns type="String"></returns>
/// <remarks>
/// Depends on the existence of the cookieEncodeKey property. Used as a lookup table.
/// TO DO: Implement additional compression techniques.
/// </remarks>
return str.replace(/[\x00-\x1f]+| | /gi, " ")
.replace(/(.)\1{5,}|[%&;=<]/g,
function (c) {
if (c.length > 1) {
return ("%0" + c.charAt(0) + c.length.toString() + "%");
}
return cookieEncodeKey[c];
}
);
}
function decodeCookie(str) {
/// <summary>
/// Decodes a string value that was previously encoded with encodeCookie().
/// </summary>
/// <param name="str" type="String" optional="false" mayBeNull="false">
/// String that was previously encoded with encodeCookie()
/// </param>
/// <returns type="String"></returns>
/// <remarks>
/// Depends on the existence of the cookieDecodeKey property. Used as a lookup table.
/// TO DO: Implement additional compression techniques.
/// </remarks>
return str.replace(/%[1-5]|%0(.)(\d+)%/g,
function (c, m, d) {
var a, i, l;
if (c.length == 2) {
return cookieDecodeKey[c];
}
for (a=[], i=0, l=parseInt(d); i<l; i++) {
a.push(m);
}
return a.join("");
});
}
function encodeStorage(str) {
/// <summary>
/// Encodes a string value intended for storage in either localStorage or sessionStorage.
/// </summary>
/// <param name="str" type="String" optional="false" mayBeNull="false">
/// String to encode for localStorage or sessionStorage
/// </param>
/// <returns type="String"></returns>
/// <remarks>
/// Necessary because a bug in Safari truncates the string at the first comma.
/// </remarks>
return str.replace(/,/g, ",");
}
function decodeStorage(str) {
/// <summary>
/// Decodes a string value that was previously encoded with encodeStorage().
/// </summary>
/// <param name="str" type="String" optional="false" mayBeNull="false">
/// String that was previously encoded with encodeStorage()
/// </param>
/// <returns type="String"></returns>
return str.replace(/,/g, ",");
}
function preloadImage(imageURL) {
/// <summary>
/// Preloads an image so it will be instantly displayed the first time it's needed.
/// </summary>
var i = preloadImages.length;
preloadImages[i] = new Image();
preloadImages[i].src = imageURL;
}
function createDelegate(t, method) {
/// <summary>
/// Returns a delegate function, used for callbacks. Ensures 'this' refers
/// to the desired object.
/// </summary>
/// <param name="t" type="Object" optional="false" mayBeNull="true">
/// Object that will be 'this' within the callback function.
/// </param>
/// <param name="method" type="Function" optional="false" mayBeNull="false">
/// Callback function
/// </param>
/// <returns type="Function"></returns>
return function () {
return method.apply(t);
};
}
function newInstanceSettings(inst) {
/// <summary>
/// Creates new settings storage for a plugin instance.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="true">
/// The plugin instance for which to create the settings storage.
/// </param>
/// <returns type="Object"></returns>
var key = inst.key,
s = instanceSettings[key];
if (!s) {
s = instanceSettings[key] = tinymce.extend({}, settingsTemplate, {
dataKey: settingsTemplate.dataKey + key,
saveDelegate: createDelegate(inst, save),
saveFinalDelegate: createDelegate(inst, saveFinal),
restoreDelegate: createDelegate(inst, restore),
disposeDelegate: createDelegate(inst, dispose),
cookieFilter: new RegExp("(?:^|;\\s*)" + settingsTemplate.dataKey + key + "=([^;]*)(?:;|$)", "i")
});
}
return s;
}
function getInstanceSettings(inst) {
/// <summary>
/// Retrieves the settings for a plugin instance.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="true">
/// The plugin instance for which to retrieve the settings.
/// </param>
/// <returns type="Object"></returns>
return instanceSettings[inst.key];
}
function removeInstanceSettings(inst) {
/// <summary>
/// Deletes the settings for a plugin instance.
/// </summary>
/// <param name="inst" type="Object" optional="false" mayBeNull="true">
/// The plugin instance for which to delete the settings.
/// </param>
delete instanceSettings[inst.key];
}
//************************************************************************
// REGISTER PLUGIN
tinymce.PluginManager.add(pluginName, tinymce.plugins.TinyAutoSavePlugin);
})(); | zopyx.tinymceplugins.tinyautosave | /zopyx.tinymceplugins.tinyautosave-1.0.1.tar.gz/zopyx.tinymceplugins.tinyautosave-1.0.1/zopyx/tinymceplugins/tinyautosave/skins/autosave/tinyautosave/editor_plugin_src.js | editor_plugin_src.js |
zopyx.together
==============
Experimental integration of Plone 4 with TogetherJS
Installation
------------
Add ``zopyx.together`` to the ``eggs`` and ``zcml`` option of your buildout
configuration, re-run buildout and activate ``zopyx.together`` within Plone.
A new button ``Start TogetherJS`` will appear below the content headline.
Clicking the button will activate the TogetherJS functionality.
Requirements
------------
* Plone 4.x (tested with Plone 4.3 only)
Changes
-------
0.2.0 (2014-02-14)
------------------
- support for custom or self-hosted hub servers
- added dedicated control panel to the Plone site setup screen
0.1.0 (2014-02-13)
------------------
- Initial release
Author
------
| ZOPYX
| Andreas Jung
| [email protected]
| www.zopyx.com
| zopyx.together | /zopyx.together-0.2.1.zip/zopyx.together-0.2.1/README.rst | README.rst |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zopyx.together | /zopyx.together-0.2.1.zip/zopyx.together-0.2.1/bootstrap.py | bootstrap.py |
Introduction
============
``zopyx.pypitrashfinder`` performs some sanity checks on packages
registered on PyPI (metadata and release files). A proper package
should contain at least name and email of the author or maintainer
and a reasonable description.
Installation
------------
easy_install zopyx.trashfinder
Usage
-----
pypi-trashfinder <package-prefix>
e.g.
pypi-trashfinder collective.
Author
------
Andreas Jung
[email protected]
License
--------
Published under the Zope Public License (ZPL)
| zopyx.trashfinder | /zopyx.trashfinder-0.1.4.tar.gz/zopyx.trashfinder-0.1.4/README.txt | README.txt |
import sys
from xmlrpclib import Server
def main():
prefix = ''
if len(sys.argv) > 1:
prefix = sys.argv[1]
server = Server('http://pypi.python.org/pypi')
packages = server.list_packages()
if prefix:
packages = [p for p in packages if p.startswith(prefix)]
num_packages = len(packages)
for i, package in enumerate(packages):
print 'Processing %r (%d/%d)' % (package, i+1, num_packages)
versions = server.package_releases(package)
versions.sort()
for version in versions:
print ' ', version
urls = server.release_urls(package, version)
metadata = server.release_data(package, version)
# PyPI hosted packages
if urls:
have_eggs = False
have_sdist = False
for url in urls:
url = url['url']
print ' ', url
if url.endswith('.bz2') or url.endswith('.zip') or url.endswith('gz'):
have_sdist = True
if url.endswith('egg'):
have_eggs = True
if have_eggs and not have_sdist:
print 'CRAP: %s has only egg release files but no sdist release file' % package
if have_eggs and have_sdist:
print 'CRAP(possible): %s has egg *and* sdist release file' % package
# externally hosted packages
else:
download_url = metadata['download_url']
if download_url == 'UNKNOWN':
print 'CRAP: %s==%s - no release files, no valid download_url' % (package, version)
if len(metadata['description'] or '') < 40:
print 'CRAP: %s==%s - description < 40 chars' % (package, version)
if len(metadata['summary'] or '') < 10:
print 'CRAP: %s==%s - summary < 10 chars' % (package, version)
if not metadata['author_email'] and not metadata['maintainer_email']:
print 'CRAP: %s==%s - no author and no maintainer email given' % (package, version)
if not metadata['author'] and not metadata['maintainer']:
print 'CRAP: %s==%s - no author and no maintainer name given' % (package, version)
if __name__ == '__main__':
main() | zopyx.trashfinder | /zopyx.trashfinder-0.1.4.tar.gz/zopyx.trashfinder-0.1.4/zopyx/trashfinder/cli.py | cli.py |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
options, args = parser.parse_args()
######################################################################
# load/install setuptools
to_reload = False
try:
import pkg_resources
import setuptools
except ImportError:
ez = {}
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# XXX use a more permanent ez_setup.py URL when available.
exec(urlopen('https://bitbucket.org/pypa/setuptools/raw/0.7.2/ez_setup.py'
).read(), ez)
setup_args = dict(to_dir=tmpeggs, download_delay=0)
ez['use_setuptools'](**setup_args)
if to_reload:
reload(pkg_resources)
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s",
repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/bootstrap.py | bootstrap.py |
from zope.interface import implements
from zopyx.txng3.core.interfaces import IParseTreeNode
class BaseNode:
""" base class for all nodes """
implements(IParseTreeNode)
def __init__(self, v, field=None):
if isinstance(self.__class__, BaseNode):
raise ImplementationError("Can't instantiate BaseNode")
self._field = field
self._parent = None
self.setValue(v)
def getType(self):
return self.__class__.__name__
def getValue(self):
return self._value
def getField(self):
return self._field
def setValue(self, value):
if isinstance(value, (list, tuple)):
for v in value:
if issubclass(v.__class__, BaseNode):
v._parent = self
elif issubclass(value.__class__, BaseNode):
value._parent = self
self._value = value
def sameAs(self, node):
return bool(self.getType()==node.getType() and self.getValue()==node.getValue())
def __repr__(self):
if self._field:
return "%s(%s::%r)" % (self.__class__.__name__, self.getField(), self.getValue())
else:
return "%s(%r)" % (self.__class__.__name__, self.getValue())
class WordNode(BaseNode): pass
class GlobNode(BaseNode): pass
class TruncNode(BaseNode): pass
class SubstringNode(BaseNode): pass
class LTruncNode(BaseNode): pass
class SimNode(BaseNode): pass
class NotNode(BaseNode): pass
class AndNode(BaseNode): pass
class OrNode(BaseNode): pass
class NearNode(BaseNode): pass
class PhraseNode(BaseNode): pass
class RangeNode(BaseNode): pass
def stopword_remover(node, stopwords):
""" removes all WordNodes that represent a stopword from the query """
v = node.getValue()
if isinstance(v, (list, tuple)):
node.setValue(
[child for child in v
if not(isinstance(child, WordNode) and
child.getValue().lower() in stopwords)])
for child in node.getValue():
stopword_remover(child, stopwords)
elif isinstance(v, BaseNode):
stopword_remover(v, stopwords)
def node_splitter(node, splitter):
"""Split word nodes with splitter."""
v = node.getValue()
if isinstance(v, (list, tuple)):
for child in v:
node_splitter(child, splitter)
elif isinstance(v, BaseNode):
node_splitter(v, splitter)
elif isinstance(v, unicode):
split = splitter.split(v)
if len(split) == 1:
node.setValue(split[0])
elif len(split) > 1:
original_node = node
nodes = [WordNode(v) for v in split]
node = AndNode(nodes)
if original_node._parent:
parent_value = original_node._parent.getValue()
if isinstance(parent_value, BaseNode):
parent_value = node
elif isinstance(parent_value, (tuple, list)):
parent_value = list(parent_value)
parent_value[parent_value.index(original_node)] = node
original_node._parent.setValue(parent_value)
return node
def flatten_NearNode(node, lst=[]):
""" return a sequence of all Wordnodes within a NearNode subtree. We need this
because the current query parser creates only a nested tree of NearNodes
instead of a flat list.
"""
if isinstance(node, WordNode):
lst.append(node)
elif isinstance(node, NearNode):
flatten_NearNode(node.getValue(), lst)
elif isinstance(node, (list, tuple)):
for child in node:
flatten_NearNode(child, lst)
return lst | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/parsetree.py | parsetree.py |
from zopyx.txng3.core.parsetree import *
from docidlist import DocidList
from resultset import ResultSet, unionResultSets, intersectionResultSets, inverseResultSet
from stemmer import getStemmer
from logger import LOG
class Evaluator:
""" evaluator for ParseTree instances """
def __init__(self, searchrequest):
self.searchrequest = searchrequest
self.fields = self.searchrequest.index.fields
def _getField(self, node):
""" return field for a given node + some checking """
# first look in the field node directly
field = node.getField()
# if not look at the first parent node
if not field:
parent = node._parent
if parent:
field = parent.getField()
# we got something, now check if the index is configured for this field
if field and not field in self.fields:
raise ValueError("Index not configured for field '%s'" % field)
# return the default fieldname as given through the query options
return self.searchrequest.field
def WordNode(self, node):
return lookup_word(self.searchrequest,
node.getValue(),
self._getField(node))
def GlobNode(self, node):
return lookup_by_pattern(self.searchrequest,
node.getValue(),
self._getField(node))
def TruncNode(self, node):
return lookup_by_right_truncation(self.searchrequest,
node.getValue(),
self._getField(node))
def LTruncNode(self, node):
return lookup_by_left_truncation(self.searchrequest,
node.getValue(),
self._getField(node))
def SimNode(self, node):
return lookup_by_similarity(self.searchrequest,
node.getValue(),
self._getField(node))
def SubstringNode(self, node):
return lookup_by_substring(self.searchrequest,
node.getValue(),
self._getField(node))
def RangeNode(self, node):
return lookup_by_range(self.searchrequest,
node.getValue()[0],
node.getValue()[1],
self._getField(node))
def AndNode(self, node):
sets = [self(n) for n in node.getValue()]
return intersectionResultSets(sets)
def OrNode(self, node):
sets = [self(n) for n in node.getValue()]
return unionResultSets(sets)
def NotNode(self, node):
return inverseResultSet(self.searchrequest.index.getStorage(self.searchrequest.field).getDocIds(), self(node.getValue()))
def PhraseNode(self, node):
# Dealing with PhraseNodes is somewhat tricks
# node.getValue() should return a sequence of WordNodes representing
# the terms of the phrase
# first tcreate he a copy of the ordered(!) terms
words = [n.getValue() for n in node.getValue()]
# So first perform a simple word search for all terms
sets = [self(n) for n in node.getValue()]
# Now intersect the results (AND). This descreases the number of documents
# to be checked.
rs = intersectionResultSets(sets)
# Now check if the found documents really contain the words as phrase
return lookup_by_phrase(self.searchrequest,
rs.getDocids(),
words,
self._getField(node))
def NearNode(self, node):
""" near search isn't working yet """
word_nodes = []
word_nodes = flatten_NearNode(node.getValue(), word_nodes)
sets = [self(n) for n in word_nodes]
rs = intersectionResultSets(sets)
raise NotImplementedError('Near search not implemented yet')
def run(self):
return self(self.searchrequest.parsetree)
def __call__(self, node):
return getattr(self, node.__class__.__name__)(node)
################################################################
# helper methods to perform a low-level word lookup
# within the index
################################################################
def lookup_word(SR, word, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
# Stemmer support only works with disabled autoexpansion
S = getStemmer(SR.language)
if S:
word = S.stem([word])[0]
wordid = lexicon.getWordId(word, SR.language)
if SR.autoexpand != 'off':
raise ValueError('auto expansion is only available without enabled stemmer support')
_words, _wids = [word], [wordid]
else:
wordid = lexicon.getWordId(word, SR.language)
# perform autoexpansion only if the length of the given term is longer or
# equal to the autoexpand_limit configuration parameter of the index
if (SR.autoexpand=='always' or (SR.autoexpand=='on_miss' and not wordid)) \
and len(word) >= index.autoexpand_limit:
# lookup all words with 'word' as prefix
words = list(lexicon.getWordsForRightTruncation(word, SR.language))
# obtain wordids for words
wids = lexicon.getWordIds(words, SR.language)
# add the original word and wordid
wids.append(wordid)
words.append(word)
_words, _wids = words, wids
else:
_words, _wids = [word], [wordid]
# Thesaurus handling: check if thesaurus is set to a list of configured
# thesauruses. If yes, perform a lookup for every word and enrich the
# resultset
if SR.thesaurus:
for word in _words[:]:
for id in SR.thesaurus:
import zope.component
from zopyx.txng3.core.interfaces import IThesaurus
TH = zope.component.queryUtility(IThesaurus, id)
if TH is None:
raise ValueError('No thesaurus "%s" configured' % id)
related_terms = TH.getTermsFor(word)
if related_terms:
_words.extend(related_terms)
wids = lexicon.getWordIds(related_terms, SR.language)
_wids.extend(wids)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(_wids), [(w, field) for w in _words])
def lookup_by_right_truncation(SR, pattern, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Right truncation is not supported with stemming enabled')
words = lexicon.getWordsForRightTruncation(pattern, SR.language)
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_left_truncation(SR, pattern, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Left truncation is not supported with stemming enabled')
words = lexicon.getWordsForLeftTruncation(pattern, SR.language)
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_pattern(SR, pattern, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Pattern search is not supported with stemming enabled')
words = lexicon.getWordsForPattern(pattern, SR.language)
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_substring(SR, pattern, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Substring search is not supported with stemming enabled')
words = lexicon.getWordsForSubstring(pattern, SR.language)
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_similarity(SR, pattern, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Similarity search is not supported with stemming enabled')
words = [word for word, ratio in lexicon.getSimiliarWords(pattern, SR.similarity_ratio, SR.language)]
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_range(SR, from_word, to_word, field):
index = SR.getIndex()
lexicon = index.getLexicon()
if index.use_stemmer:
raise ValueError('Range search is not supported with stemming enabled')
words = lexicon.getWordsInRange(from_word, to_word, SR.language)
wids = lexicon.getWordIds(words, SR.language)
return ResultSet(index.getStorage(field).getDocumentsForWordIds(wids), [(w, field) for w in words])
def lookup_by_phrase(SR, docids, words, field):
index = SR.getIndex()
lexicon = index.getLexicon()
storage = index.getStorage(field)
if index.use_stemmer:
S = getStemmer(SR.language)
if S:
words = S.stem(words)
wids = lexicon.getWordIds(words, SR.language)
docids = [docid for docid in docids if storage.hasContigousWordids(docid, wids)]
return ResultSet(DocidList(docids), [(w, field) for w in words]) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/evaluator.py | evaluator.py |
from zope.interface import implements
from BTrees.IIBTree import IIBucket, intersection, union, difference
from docidlist import DocidList
from wordlist import WordList
from zopyx.txng3.core.interfaces import IResultSet
from ranking import cosine_ranking
from config import DEFAULT_LANGUAGE
class ResultSet:
""" A datastructure to store results from subqueries """
implements(IResultSet)
def __init__(self, docids, words):
self.docids = docids # sequence of document ids
self.words = words # sequence of tuples (word, field)
self.ranked_results = None
def __repr__(self):
return '%s(%s), %s' % (self.__class__.__name__, self.docids, [ (w,f) for w,f in self.words])
def getWords(self):
return self.words
def getDocids(self):
return self.docids
def ranking(self, ranking_function, index, language=DEFAULT_LANGUAGE,
nbest=50):
self.ranked_results = ranking_function(index, self, language, nbest)
def cosine_ranking(self, index, language=DEFAULT_LANGUAGE, nbest=50):
# BBB, fall back to cosine ranking
self.ranking(cosine_ranking, index, language, nbest)
def getRankedResults(self):
return self.items()
def values(self):
""" just implement the values() method to make the stupid
ZCatalog happy to be able to call the items() method to
obtain a sequence of (docid, score) tuples.
"""
pass
def items(self):
d = IIBucket()
if self.ranked_results:
max = self.ranked_results[0][1]
for k,v in self.ranked_results:
if max == 0:
d[k] = 0
else:
d[k] = int(v / max * 1024.0)
return d
################################################################
# some methods of create new result sets from existing ones
################################################################
def intersectionResultSets(sets):
""" perform intersection of ResultSets """
if not sets:
return ResultSet(DocidList(), WordList())
docids = sets[0].getDocids()
words = WordList(sets[0].getWords())
for set in sets[1:]:
docids = intersection(docids, set.docids)
words.extend(set.words)
return ResultSet(docids, words)
def unionResultSets(sets):
""" perform intersection of ResultSets """
docids = DocidList()
words = WordList()
for set in sets:
docids = union(docids, set.docids)
words.extend(set.words)
return ResultSet(docids, words)
def inverseResultSet(all_docids, set):
""" perform difference between all docids and a resultset """
docids = difference(DocidList(all_docids), set.getDocids())
return ResultSet(docids, set.getWords()) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/resultset.py | resultset.py |
import warnings
from zope.component import getUtility
from zope.component.interfaces import ComponentLookupError
from zope.interface import implements
from zopyx.txng3.core.interfaces import IConverter
from zopyx.txng3.core.interfaces import IIndexContentCollector, IIndexableContent
from config import DEFAULT_LANGUAGE, DEFAULT_ENCODING
from compatible import callable
from logger import LOG
class IndexContentCollector:
implements(IIndexContentCollector)
def __init__(self):
self._d = {}
def addContent(self, field, text, language=None):
if not isinstance(text, unicode):
raise ValueError("Argument for 'text' must be of type unicode (got: %s)" % type(text))
infos = self._d.get(field, ())
self._d[field] = infos + ({'content': text, 'language': language},)
def addBinary(self, field, data, mimetype, encoding=None, language=None,
logError=False, raiseException=False):
try:
converter = getUtility(IConverter, mimetype)
except ComponentLookupError:
LOG.warn('No converter registered for %s' % mimetype)
return
text, encoding = converter.convert(data, encoding, mimetype,
logError, raiseException)
# The result should be string/unicode. If not, convert the returned
# content to unicode using the returned encoding. The converter is
# in charge to handle encoding issues correctly.
assert isinstance(text, basestring)
if isinstance(text, str):
text = unicode(text, encoding, 'ignore')
infos = self._d.get(field, ())
self._d[field] = infos + ({'content': text, 'language': language},)
def getFields(self):
return self._d.keys()
def getFieldData(self, field):
return self._d[field]
def __nonzero__(self):
return len(self._d) > 0
def extract_content(fields, obj, default_encoding=DEFAULT_ENCODING, default_language=DEFAULT_LANGUAGE):
""" This helper methods tries to extract indexable content from a content
object in different ways. First we try to check for ITextIndexable
interface or ITextIndexableRaw interfaces which are the preferred
way to interace with TextIndexNG indexes. Otherwise we fall back
to the standard Zope 2 behaviour and try to get the content by
looking at the corresponding attributes or methods directly.
Please note that this method will not contain content-type
specific extraction code. This should be handled in every case by
the content-type implementation itself or through an adapter.
"""
adapter = IIndexableContent(obj, None)
if adapter:
# the official TXNG3 indexer API
icc = adapter.indexableContent(fields)
elif hasattr(obj, 'txng_get'):
# old Zope behaviour for objects providing the txng_get() hook
warnings.warn('Using the txng_get() hook for class %s is deprecated.'
' Use IndexContentCollector implementation instead' % obj.__class__.__name__,
DeprecationWarning,
stacklevel=2)
result = obj.txng_get(fields)
if result is None:
return None
# unpack result triple
source, mimetype, encoding = result
icc = IndexContentCollector()
icc.addBinary(fields[0], source, mimetype, encoding, default_language)
else:
# old Zope 2 behaviour: look up value either as attribute of the object
# or as method providing a return value as indexable content
d = {}
icc = IndexContentCollector()
for f in fields:
v = getattr(obj, f, None)
if not v: continue
if callable(v):
v = v()
# accept only a string/unicode string
if not isinstance(v, basestring):
raise TypeError('Value returned for field "%s" must be string or unicode (got: %s, %s)' % (f, repr(v), type(v)))
if isinstance(v, str):
v = unicode(v, default_encoding, 'ignore')
icc.addContent(f, v, default_language)
return icc or None | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/content.py | content.py |
from math import log, sqrt
from nbest import NBest
from config import DEFAULT_LANGUAGE
def cosine_ranking(index, resultset, language=DEFAULT_LANGUAGE, max=50):
N = len(index) # collection size, number of indexed documents
nbest = NBest(max) # storage for the 'max' best hits
word_field_sequence = resultset.getWords() # sequence of (word, field) tuples
lexicon_getWordId = index.getLexicon().getWordId # shortcut
IDF = {} # inverse document frequency
wid_cache = {} # maps word -> wid for performance reasons
storage_cache = {} # cache for field -> index.getStorage(field)
frequencies_cache = {} # cache for field -> index.getStorage().getTermFrequency()
# first calculate the inverse document frequency for all found words
for word, field in word_field_sequence:
# might be replaced with getWordIds()
wid = lexicon_getWordId(word, language)
if not wid: continue
wid_cache[word] = wid
if not storage_cache.has_key(field):
storage_cache[field] = index.getStorage(field)
frequencies_cache[field] = storage_cache[field].getTermFrequency()
docids = storage_cache[field].getDocumentsForWordId(wid)
TF = len(docids) # term frequency
# calculate the inverse document frequency
if TF == 0:
IDF[word] = 0
else:
IDF[word] = log(1.0 + N / TF)
# now rank all documents
for docid in resultset.getDocids():
# accumulated rank
rank = 0.0
for word, field in word_field_sequence:
wid = wid_cache.get(word)
if not wid: continue
# document term frequency
try:
DTF = frequencies_cache[field][docid][wid]
except KeyError:
DTF = 0
# document term weight
if DTF == 0:
DTW = 0.0
else:
DTW = ((1.0 + log(DTF)) * IDF[word])
# query term frequency and query max frequency are set to 1
QTF = QMF = 1.0
# query term weight
QTW = ((0.5 + (0.5 * QTF / QMF))) * IDF[word]
# accumulate rank
rank += (DTW * QTW)
# document weight
DWT = sqrt(rank)
# normalize rank
if rank != 0.0:
rank = rank / DWT
# add to NBest instance - we are only interesed in the
# documents with the best score (rank)
nbest.add(docid, rank)
return nbest.getbest() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/ranking.py | ranking.py |
import os
import subprocess
import tempfile
from zope.interface import implements
from zopyx.txng3.core.interfaces import IConverter
from zopyx.txng3.core.exceptions import BaseConverterError
class TmpFile:
def __init__(self, data):
self.fname = tempfile.mktemp()
out = open(self.fname,'w+b')
if isinstance(data, str):
out.write(data)
out.flush()
return
from ZPublisher.Iterators import IStreamIterator
if IStreamIterator.isImplementedBy(data):
for block in data:
out.write(block)
block._p_deactivate()
out.flush()
else:
raise ValueError("Don't know how to write data!")
def __str__(self): return self.fname
__repr__ = __str__
def __del__(self):
os.unlink(self.fname)
def findOnWin32Path(fname):
lookat = os.getenv("PATH", '').split(';')
executables = os.getenv("PATHEXT", '.EXE;.COM;.BAT;.CMD').split(';')
for dir in lookat:
if os.path.exists(os.path.join(dir,fname)):
return True
for exe in executables:
if os.path.exists(os.path.join(dir,fname+exe)):
return True
return False
class BaseConverter(object):
""" Base class for all converters """
content_type = None
content_description = None
implements(IConverter)
def __init__(self):
if not self.content_type:
raise BaseConverterError('content_type undefinied')
if not self.content_description:
raise BaseConverterError('content_description undefinied')
def execute(self, com):
try:
import win32pipe
return win32pipe.popen(com).read()
except ImportError:
return os.popen(com).read()
def saveFile(self, data):
return TmpFile(data)
def getDescription(self):
return self.content_description
def getType(self):
return self.content_type
def getDependency(self):
return getattr(self, 'depends_on', None)
def __call__(self, s):
return self.convert(s)
def isAvailable(self):
depends_on = self.getDependency()
if depends_on:
if os.name == 'posix':
out = subprocess.Popen(['which', depends_on],
stdout=subprocess.PIPE).communicate()[0]
if out.find('no %s' % depends_on) > - 1 or out.lower().find('not found') > -1 or len(out.strip()) == 0:
return 'no'
return 'yes'
elif os.name == 'nt':
if findOnWin32Path(depends_on):
return 'yes'
else:
return 'no'
else:
return 'unknown'
else:
return 'always' | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/baseconverter.py | baseconverter.py |
import re
from zope.component.interfaces import IFactory
from zope.interface import implements, implementedBy
from zopyx.txng3.core.interfaces import ISplitter
from zopyx.txng3.ext.splitter import Splitter as _Splitter
class Splitter:
""" A wrapper for TXNGSplitter """
implements(ISplitter)
def __init__(self, *args, **kw):
self._splitter = _Splitter(**kw)
def split(self, content):
return self._splitter.split(content)
class SplitterFactory:
implements(IFactory)
def __call__(self, maxlen=64, singlechar=True, casefolding=True, separator='+'):
splitter = Splitter(maxlen=maxlen, singlechar=singlechar, casefolding=casefolding, separator=separator)
return splitter
def getInterfaces(self):
return implementedBy(Splitter)
SplitterFactory = SplitterFactory()
# patterns used by the splitter (will be compiled to regexes)
SPLIT_AT = '\s|\t'
PUNCTUATION = '\.|,|\?|\!|:|;|"'
ADDITIONAL_CHARS = '\-'
RE_FLAGS = re.I | re.M | re.UNICODE
class SimpleSplitter:
""" A simple unicode-aware splitter """
implements(ISplitter)
def __init__(self,
casefolding=1,
split_at=SPLIT_AT,
punctuation=PUNCTUATION,
additional_chars=ADDITIONAL_CHARS,
*args, **kw):
""" 'split_at' -- a regular expression that is used to split strings.
The regular expression is passed unchanged to re.compile().
"""
self.splitter = re.compile(split_at, RE_FLAGS)
self.punctuation = punctuation
self.casefolding = casefolding
self.regex = re.compile(r'\w+[\w%s]*' % additional_chars, RE_FLAGS)
def split(self, content):
""" Split a text string (prefered unicode into terms according to the
splitter regular expression.
"""
if self.casefolding:
content = content.lower()
terms = [t.strip(self.punctuation) for t in self.splitter.split(content)]
terms = [t.replace('_', '') for t in terms]
terms = [''.join(self.regex.findall(t)) for t in terms]
terms = [t for t in terms if t]
return terms
class SimpleSplitterFactory:
implements(IFactory)
def __call__(self, split_at=SPLIT_AT, punctuation=PUNCTUATION, *args, **kw):
return SimpleSplitter(split_at=split_at, punctuation=punctuation, *args, **kw)
def getInterfaces(self):
return implementedBy(SimpleSplitter)
SimpleSplitterFactory = SimpleSplitterFactory() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/splitter.py | splitter.py |
from persistent import Persistent
from zope.interface import implements, implementedBy
from zope.component.interfaces import IFactory
from compatible import Persistent
from BTrees.IOBTree import IOBTree
from BTrees.IIBTree import IITreeSet, IIBTree, union, IISet, difference
import BTrees.Length
from zopyx.txng3.core.interfaces import IStorage, IStorageWithTermFrequency
from zopyx.txng3.core.exceptions import StorageException
from widcode import encode , decode
from docidlist import DocidList
class _PS(Persistent):
""" ZODB-aware wrapper for strings """
def __init__(self, s):
self.s = s
def get(self):
return self.s
class Storage(Persistent):
""" storage to keep the mapping wordId to sequence
of document ids and vis-versa.
"""
implements(IStorage)
def __init__(self):
self.clear()
def clear(self):
self._doc2wid = IOBTree() # docid -> [wordids]
self._wid2doc = IOBTree() # wordid -> [docids]
self._docweight = IIBTree() # docid -> (# terms in document)
self._length = BTrees.Length.Length()
def __len__(self): return self._length()
numberDocuments = __len__
def insertDocument(self, docid, widlist):
if not self._doc2wid.has_key(docid):
self._length.change(1)
enc_widlist = encode(widlist)
old_enc_widlist = self._doc2wid.get(docid)
if old_enc_widlist is not None:
old_enc_widlist = old_enc_widlist.get() # unwrap _PS instance
removed_wordids = []
if old_enc_widlist != enc_widlist :
self._doc2wid[docid] = _PS(enc_widlist)
if old_enc_widlist is not None:
old_widlist = IISet(decode(old_enc_widlist))
removed_wordids = difference(old_widlist, IISet(widlist))
tree = self._wid2doc
tree_has = tree.has_key
count = 0
for wid in widlist:
count += 1
if not tree_has(wid):
tree[wid] = DocidList([docid])
else:
if not docid in tree[wid]:
tree[wid].insert(docid)
for wid in removed_wordids:
if tree_has(wid):
try:
tree[wid].remove(docid)
except KeyError:
pass
self._docweight[docid] = count
def removeDocument(self, docid):
try:
wordids = self._doc2wid[docid]
except KeyError:
return # silently ignore
wordids = wordids.get() # unwrap _PS instance
tree = self._wid2doc
tree_has = tree.has_key
for wordid in decode(wordids):
if tree_has(wordid):
try:
tree[wordid].remove(docid)
except KeyError:
pass
if not tree[wordid]:
del tree[wordid]
del self._doc2wid[docid]
del self._docweight[docid]
self._length.change(-1)
def getDocIds(self):
return self._doc2wid.keys()
def getDocumentsForWordId(self, wordid):
try:
return self._wid2doc[wordid]
except (TypeError, KeyError):
return DocidList()
def getDocumentsForWordIds(self, wordidlist):
r = DocidList()
for wordid in wordidlist:
try:
docids = self._wid2doc[wordid]
except (TypeError, KeyError):
continue
r = union(r, docids)
return r
def getWordIdsForDocId(self, docid):
try:
ps_wrapper = self._doc2wid[docid]
return decode(ps_wrapper.get())
except KeyError:
raise StorageException('No such docid: %d' % docid)
def numberWordsInDocument(self, docid):
try:
return self._docweight[docid]
except KeyError:
raise StorageException('No such docid: %d' % docid)
def hasContigousWordids(self, docid, wordids):
# *The trick* to perform a phrase search is to use the feature
# that the string encoded wids can be searched through string.find().
# However string.find() is not sufficient since it might find occurences
# where the next byte does not represent the start of a new word (with
# 7th bit set). So we must loop search until we find a hit (and we don't
# return on the first occurence anymore)
encoded_wids = encode(wordids)
encoded_wids_len = len(encoded_wids)
encoded_document = self._doc2wid[docid].get()
encoded_document_len = len(encoded_document)
found = False
offset = 0
while 1:
pos = encoded_document[offset:].find(encoded_wids)
if pos == -1: # end of string?
break
if pos != -1: # found something
if offset+pos+encoded_wids_len < encoded_document_len:
# check if the next token represents a new word (with
# 7th bit set)
next_c = encoded_document[offset+pos+encoded_wids_len]
if ord(next_c) > 127:
# start of a new word -> we *really* found a word
found = True
break
else:
# we found a word and we are the end of the complete string
found = True
break
offset = offset + pos + 1
return found
return encoded_wids in encoded_document
def getPositions(self, docid, wordid):
""" return a sequence of positions of occurrences of wordid within
a document given by its docid.
"""
encoded_wid = encode((wordid,))
encoded_document = self._doc2wid[docid].get()
positions = IITreeSet()
for pos, wid in enumerate(decode(encoded_document)):
if wid == wordid:
positions.insert(pos)
return positions
class StorageWithTermFrequency(Storage):
implements(IStorageWithTermFrequency)
def clear(self):
Storage.clear(self)
self._frequencies = IOBTree() # docid -> (wordid -> #occurences)
def insertDocument(self, docid, widlist):
Storage.insertDocument(self, docid, widlist)
occurences = {} # wid -> #(occurences)
num_wids = float(len(widlist))
for wid in widlist:
if not occurences.has_key(wid):
occurences[wid] = 1
else:
occurences[wid] += 1
self._frequencies[docid] = IIBTree()
tree = self._frequencies[docid]
for wid,num in occurences.items():
tree[wid] = num
def removeDocument(self, docid):
# note that removing a non existing document should not
# raise an exception
Storage.removeDocument(self, docid)
try:
del self._frequencies[docid]
except KeyError:
pass
def getTermFrequency(self):
return self._frequencies
class _StorageFactory:
implements(IFactory)
def __init__(self, klass):
self._klass = klass
def __call__(self):
return self._klass()
def getInterfaces(self):
return implementedBy(self._klass)
StorageFactory = _StorageFactory(Storage)
StorageWithTermFrequencyFactory = _StorageFactory(StorageWithTermFrequency) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/storage.py | storage.py |
# A byte-aligned encoding for lists of non-negative ints, using fewer bytes
# for smaller ints. This is intended for lists of word ids (wids). The
# ordinary string .find() method can be used to find the encoded form of a
# desired wid-string in an encoded wid-string. As in UTF-8, the initial byte
# of an encoding can't appear in the interior of an encoding, so find() can't
# be fooled into starting a match "in the middle" of an encoding. Unlike
# UTF-8, the initial byte does not tell you how many continuation bytes
# follow; and there's no ASCII superset property.
# Details:
#
# + Only the first byte of an encoding has the sign bit set.
#
# + The first byte has 7 bits of data.
#
# + Bytes beyond the first in an encoding have the sign bit clear, followed
# by 7 bits of data.
#
# + The first byte doesn't tell you how many continuation bytes are
# following. You can tell by searching for the next byte with the
# high bit set (or the end of the string).
#
# The int to be encoded can contain no more than 28 bits.
#
# If it contains no more than 7 bits, 0abcdefg, the encoding is
# 1abcdefg
#
# If it contains 8 thru 14 bits,
# 00abcdef ghijkLmn
# the encoding is
# 1abcdefg 0hijkLmn
#
# Static tables _encoding and _decoding capture all encodes and decodes for
# 14 or fewer bits.
#
# If it contains 15 thru 21 bits,
# 000abcde fghijkLm nopqrstu
# the encoding is
# 1abcdefg 0hijkLmn 0opqrstu
#
# If it contains 22 thru 28 bits,
# 0000abcd efghijkL mnopqrst uvwxyzAB
# the encoding is
# 1abcdefg 0hijkLmn 0opqrstu 0vwxyzAB
assert 0x80**2 == 0x4000
assert 0x80**4 == 0x10000000
import re
def encode(wids):
# Encode a list of wids as a string.
wid2enc = _encoding
n = len(wid2enc)
return "".join([w < n and wid2enc[w] or _encode(w) for w in wids])
_encoding = [None] * 0x4000 # Filled later, and converted to a tuple
def _encode(w):
assert 0x4000 <= w < 0x10000000
b, c = divmod(w, 0x80)
a, b = divmod(b, 0x80)
s = chr(b) + chr(c)
if a < 0x80: # no more than 21 data bits
return chr(a + 0x80) + s
a, b = divmod(a, 0x80)
assert a < 0x80, (w, a, b, s) # else more than 28 data bits
return (chr(a + 0x80) + chr(b)) + s
_prog = re.compile(r"[\x80-\xFF][\x00-\x7F]*")
def decode(code):
# Decode a string into a list of wids.
get = _decoding.get
# Obscure: while _decoding does have the key '\x80', its value is 0,
# so the "or" here calls _decode('\x80') anyway.
return [get(p) or _decode(p) for p in _prog.findall(code)]
_decoding = {} # Filled later
def _decode(s):
if s == '\x80':
# See comment in decode(). This is here to allow a trick to work.
return 0
if len(s) == 3:
a, b, c = map(ord, s)
assert a & 0x80 == 0x80 and not b & 0x80 and not c & 0x80
return ((a & 0x7F) << 14) | (b << 7) | c
assert len(s) == 4, `s`
a, b, c, d = map(ord, s)
assert a & 0x80 == 0x80 and not b & 0x80 and not c & 0x80 and not d & 0x80
return ((a & 0x7F) << 21) | (b << 14) | (c << 7) | d
def _fill():
global _encoding
for i in range(0x80):
s = chr(i + 0x80)
_encoding[i] = s
_decoding[s] = i
for i in range(0x80, 0x4000):
hi, lo = divmod(i, 0x80)
s = chr(hi + 0x80) + chr(lo)
_encoding[i] = s
_decoding[s] = i
_encoding = tuple(_encoding)
_fill()
def test():
for i in range(2**20):
if i % 1000 == 0: print i
wids = [i]
code = encode(wids)
assert decode(code) == wids, (wids, code, decode(code))
if __name__ == "__main__":
test() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/widcode.py | widcode.py |
import re
from BTrees.OOBTree import OOBTree
from BTrees.OIBTree import OIBTree
from BTrees.IOBTree import IOBTree
import BTrees.Length
from compatible import Persistent
from zope.component.interfaces import IFactory
from zope.interface import implements, implementedBy
from zopyx.txng3.core.interfaces import ILexicon
from zopyx.txng3.core.exceptions import LexiconError
from config import DEFAULT_LANGUAGE
try:
from zopyx.txng3.ext.levenshtein import ratio
have_lv = True
except ImportError:
have_lv = False
class Lexicon(Persistent):
"""Maps words to word ids """
implements(ILexicon)
def __init__(self, languages=()):
self._words = OOBTree()
self._wids = IOBTree() # wid -> word
self._nextid = BTrees.Length.Length()
for l in languages:
self.addLanguage(l)
def __len__(self):
return sum([len(tree) for tree in self._words.values()])
def addLanguage(self, language):
""" prepare lexicon for a new language """
self._words[language] = OIBTree()
def getLanguages(self):
""" return sequence of languages """
return tuple(self._words.keys())
def hasLanguage(self, language):
""" language handled by lexicon? """
return bool(self._words.has_key(language))
def _getTree(self, language):
""" return tree for a given language """
try:
return self._words[language]
except KeyError:
raise LexiconError('Unsupported language: %s' % language)
def insertWord(self, word, language=DEFAULT_LANGUAGE):
""" insert a word and return the corresponding wordid """
return self.insertWords([word], language)[0]
def insertWords(self, words, language=DEFAULT_LANGUAGE):
""" insert a sequence of words and return a sequence of
corresponding wordids.
"""
tree = self._getTree(language)
wids = []
for word in words:
if not isinstance(word, unicode):
raise LexiconError('Only unicode string can be indexed (%s)' % repr(word))
try:
wids.append(tree[word])
except KeyError:
wid = self._nextid()
self._nextid.change(1)
tree[word] = wid
self._wids[wid] = (word, language)
wids.append(wid)
return wids
def getWordId(self, word, language=DEFAULT_LANGUAGE):
""" Return the wordid for a given word in a given language or None if
the word is not available.
"""
tree = self._getTree(language)
return tree.get(word, None)
def getWordIds(self, words, language=DEFAULT_LANGUAGE):
""" Return sequence of wordids for a sequence of words in a given language """
tree = self._getTree(language)
get = tree.get
return [get(word, None) for word in words]
def getWord(self, wid):
""" Return the word for a given wordid or None if not available """
try:
return self._wids.get(wid)[0]
except KeyError:
return None
def getWordsForLanguage(self, language):
""" Return all words for 'language' """
return list(self._getTree(language))
def getWordAndLanguage(self, wid):
""" Return a tuple (word, language tuple) for a given wordid or None
if not available.
"""
return self._wids.get(wid, None)
def getWordsForRightTruncation(self, prefix, language=DEFAULT_LANGUAGE):
""" Return a sequence of words with a common prefix """
if not isinstance(prefix, unicode):
raise LexiconError('Prefix must be unicode (%s)' % prefix)
tree = self._getTree(language)
return tree.keys(prefix, prefix + u'\uffff')
def getWordsInRange(self, w1, w2, language=DEFAULT_LANGUAGE):
""" return all words within w1...w2 """
if not isinstance(w1, unicode):
raise LexiconError('1. argument must be unicode (%s)' % w1)
if not isinstance(w2, unicode):
raise LexiconError('2. argument must be unicode (%s)' % w2)
tree = self._getTree(language)
return tree.keys(w1, w2)
def getWordsForSubstring(self, sub, language=DEFAULT_LANGUAGE):
""" return all words that match *sub* """
if not isinstance(sub, unicode):
raise LexiconError('Substring must be unicode (%s)' % sub)
tree = self._getTree(language)
return [word for word in tree.keys() if sub in word]
def getWordsForLeftTruncation(self, suffix, language=DEFAULT_LANGUAGE):
""" return all words with a common suffix """
if not isinstance(suffix, unicode):
raise LexiconError('Suffix must be unicode (%s)' % suffix)
tree = self._getTree(language)
return [word for word in tree.keys() if word.endswith(suffix)]
def _createRegex(self, pattern):
"""Translate a 'pattern into a regular expression """
return '%s$' % pattern.replace( '*', '.*').replace( '?', '.')
def getSimiliarWords(self, term, threshold=0.75, language=DEFAULT_LANGUAGE, common_length=-1):
""" return a list of similar words based on the levenshtein distance """
if not have_lv:
raise LexiconError('Method not allowed. Please install the Levenshtein extension properly')
tree = self._getTree(language)
if common_length > -1:
prefix = term[:common_length]
words = tree.keys(prefix, prefix + u'\uffff')
else:
words = tree.keys()
return [(w, ratio(w,term)) for w in words if ratio(w, term) > threshold]
def getWordsForPattern(self, pattern, language=DEFAULT_LANGUAGE):
""" perform full pattern matching """
# search for prefix in word
mo = re.search('([\?\*])', pattern)
if mo is None:
return []
pos = mo.start(1)
if pos==0:
raise LexiconError('pattern "%s" should not start with a globbing character' % pattern)
prefix = pattern[:pos]
tree = self._getTree(language)
words = tree.keys(prefix, prefix + u'\uffff')
regex = re.compile(self._createRegex(pattern), re.UNICODE)
regex_match = regex.match
return [word for word in words if regex_match(word)]
class LexiconFactory:
implements(IFactory)
def __call__(self, languages=()):
return Lexicon(languages)
def getInterfaces(self):
return implementedBy(Lexicon)
LexiconFactory = LexiconFactory() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/lexicon.py | lexicon.py |
import os, re
from zope.interface import implements
from zopyx.txng3.core.interfaces import IThesaurus
th_dir = os.path.join(os.path.dirname(__file__), 'data', 'thesaurus')
# match the encoding header
enc_reg = re.compile('#\s*encoding\s*=\s*([\w\-]+)')
def readThesaurus(language, casefolding=True, filename=None):
""" read thesaurus file """
synonyms = {}
terms = {}
encoding = None
if filename is None:
filename = os.path.join(th_dir, '%s.txt' % language)
if not os.path.exists(filename):
raise ValueError('No thesaurus file for "%s" found'% language)
for idx,l in enumerate(open(filename)):
if not l.strip(): continue
mo = enc_reg.match(l)
if mo:
encoding= mo.group(1)
continue
if l.startswith('#'): continue
term, words = l.split(' ', 1)
if encoding:
term = unicode(term.strip(), encoding)
words = [unicode(w.strip(), encoding) for w in words.split(',')]
if casefolding:
term = term.lower()
words = [w.lower() for w in words]
synonyms[idx] = [term] + words
for t in synonyms[idx]:
if terms.has_key(t):
terms[t].append(idx)
else:
terms[t]=[idx]
else:
raise ValueError("Thesaurus file %s has no 'encoding' parameter specified" % filename)
return synonyms, terms
class Thesaurus:
implements(IThesaurus)
def __init__(self, language, casefolding=True, filename=None):
self._language = language
self._filename = filename
self._synonyms = {} # 1: [word1, word2]
self._terms = {} # word1: [1]
self._casefolding = casefolding
self._loaded = False
def _load(self):
self._synonyms, self._terms = readThesaurus(self._language, self._casefolding, self._filename)
self._loaded = True
def getTermsFor(self, word):
""" return a list of similiar terms for a the given word in a given language"""
if not self._loaded: self._load()
if self._casefolding:
word = word.lower()
result = set()
for synonymIdx in self._terms.get(word,[]):
result.update(self._synonyms.get(synonymIdx,[]))
if result:
result.remove(word)
return result and list(result) or None
def getLanguage(self):
""" return the language of the thesaurus """
return self._language
def getSize(self):
if not self._loaded: self._load()
return len(self._terms)
GermanThesaurus = Thesaurus('de') | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/thesaurus.py | thesaurus.py |
import sys, os, time, atexit
import hotshot, hotshot.stats
from optparse import OptionParser
from zope.component import provideUtility
from zope.component.interfaces import IFactory
from zope.component.testing import setUp
from index import Index
from parsers.english import EnglishParser
from splitter import SplitterFactory
from stopwords import Stopwords
from zopyx.txng3.core.interfaces import IParser, IStopwords, IThesaurus
from zopyx.txng3.core.lexicon import LexiconFactory
from zopyx.txng3.core.storage import StorageWithTermFrequencyFactory
from zopyx.txng3.core.thesaurus import GermanThesaurus
# Setup environment
setUp()
provideUtility(SplitterFactory, IFactory, 'txng.splitters.default')
provideUtility(EnglishParser(), IParser, 'txng.parsers.en')
provideUtility(Stopwords(), IStopwords, 'txng.stopwords')
provideUtility(LexiconFactory, IFactory, 'txng.lexicons.default')
provideUtility(StorageWithTermFrequencyFactory, IFactory, 'txng.storages.default')
provideUtility(GermanThesaurus, IThesaurus, 'txng.thesaurus.de')
try:
import readline
histfile = os.path.expanduser('~/.pyhist')
readline.read_history_file(histfile)
atexit.register(readline.write_history_file, histfile)
except: pass
class Text:
def __init__(self, s):
self.SearchableText = s
parser = OptionParser()
parser.add_option('-d','--directory', action='store',type='string', default='tests/data/texts',
dest='directory',help='directory to be search for input files')
parser.add_option('-p','--profile', action='store_true', default=False,
dest='profile',help='perform profiling of the indexing process')
parser.add_option('-t','--thesaurus', action='store', default=None,
dest='thesaurus',help='ID of thesaurus to be used')
options, files = parser.parse_args()
I = Index(fields=('SearchableText',), autoexpand_limit=4)
ts = time.time()
count = 0
bytes = 0
ID2FILES = {}
def do_index(options, files):
global count, bytes
if not files:
print >>sys.stderr, 'Reading files from %s' % options.directory
files = []
for dirname, dirs, filenames in os.walk(options.directory):
for f in filenames:
fullname = os.path.join(dirname, f)
if f.endswith('txt'):
files.append(fullname)
for docid, fname in enumerate(files):
text = open(fname).read()
I.index_object(Text(unicode(text, 'iso-8859-15')), docid)
count += 1
bytes += len(text)
ID2FILES[docid] = fname
if count % 100 ==0:
print count
if options.profile:
prof = hotshot.Profile('indexer.prof')
prof.runcall(do_index, options, files)
stats = hotshot.stats.load('indexer.prof')
stats.strip_dirs()
stats.sort_stats('cumulative', 'calls')
stats.print_stats(25)
else:
do_index(options, files)
duration = time.time() - ts
print '%d documents, duration: %5.3f seconds,total size: %d bytes, speed: %5.3f bytes/second' % (count, duration, bytes, float(bytes)/duration)
while 1:
query = raw_input('query> ')
query = unicode(query, 'iso-8859-15')
try:
kw = {'autoexpand' : 'off',
'ranking' : True,
'ranking_maxhits' : 100,
'field' : 'SearchableText',
}
if options.thesaurus:
kw['thesaurus'] = options.thesaurus
ts = time.time()
if options.profile:
prof = hotshot.Profile('query.prof')
result = prof.runcall(I.search, query, **kw)
stats = hotshot.stats.load('query.prof')
stats.strip_dirs()
stats.sort_stats('cumulative', 'calls')
stats.print_stats(25)
else:
result = I.search(query, **kw)
te = time.time()
for docid,score in result.getRankedResults().items():
print ID2FILES[docid], score
print '%2.5lf milli-seconds' % (1000.0*(te-ts))
except:
import traceback
traceback.print_exc() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/shell.py | shell.py |
import sys
from zope.component import createObject
from zope.component import getUtility
from zope.interface import implements
from BTrees.OOBTree import OOBTree
from evaluator import Evaluator
from compatible import Persistent
from content import extract_content
from config import defaults
from resultset import unionResultSets
from util import handle_exc
from searchrequest import SearchRequest
from stemmer import getStemmer
from zopyx.txng3.core.exceptions import StorageException
from zopyx.txng3.core.interfaces import (
IIndex, IParser, IStopwords, INormalizer, IStorageWithTermFrequency,
IRanking)
from zopyx.txng3.core.parsetree import node_splitter, stopword_remover
class Index(Persistent, object):
implements(IIndex)
ranking_method = defaults['ranking_method']
def __init__(self, **kw):
# perform argument check first
illegal_args = [k for k in kw.keys() if not k in defaults.keys()]
if illegal_args:
raise ValueError('Unknown parameters: %s' % ', '.join(illegal_args))
# setup preferences using default args (preferences are stored as
# attributes of the index instance
for k,v in defaults.items():
v = kw.get(k, v)
setattr(self, k, v)
self.clear()
def clear(self):
# lexicon & storage
self._lexicon = createObject(self.lexicon, self.languages)
# build either a mapping of storages when using dedicated storages
# otherwise use a single storage
self._feature_ranking = False # this index supports ranking?
if self.dedicated_storage:
self._storage = OOBTree()
for f in self.fields:
self._storage[f] = createObject(self.storage)
self._feature_ranking = IStorageWithTermFrequency.providedBy(self._storage[f])
else:
self._storage = createObject(self.storage)
self._feature_ranking = IStorageWithTermFrequency.providedBy(self._storage)
def getLexicon(self):
""" return the lexicon """
return self._lexicon
def getStorage(self, field):
""" return the storage """
if self.dedicated_storage:
try:
return self._storage[field]
except KeyError:
raise ValueError("No such storage for field '%s'" % field)
else:
return self._storage
def getSettings(self):
""" returns a mapping contains the indexes preferences """
from copy import copy
d = {}
for k in defaults.keys():
d[k] = copy(getattr(self, k))
return d
def index_object(self, obj, docid):
""" index a given object under a given document id """
# Call the content extractor which is responsible for
# the extraction of all related content parts from obj
# based on the object type and the index settings.
# The extractor should return a dictionary that maps
# all fields of the index to a dictionary the following
# key-value pairs:
# 'language' -> language
# 'content' -> content of particular field in obj as unicode
# string
try:
indexable_content = extract_content(self.fields,
obj,
default_encoding=self.default_encoding,
default_language=self.languages[0])
except:
handle_exc('extract_content failed', obj, sys.exc_info())
return False
if indexable_content is None or not indexable_content:
return False
# now iterate over all fields and pass all field related content
# through the indexer pipeline
all_wordids = [] # we need this only if dedicated_storage == False
for field in [f for f in self.fields if f in indexable_content.getFields()]:
wordids = []
for info in indexable_content.getFieldData(field):
content = info['content']
if not isinstance(content, unicode):
raise ValueError('Content must be unicode: %s' % repr(content))
# If a document has an unknown language (other than the ones configured
# for the index), an exception will be raised or the content is
# indexed under the first configured language
language = info['language']
if not language in self.languages:
if self.index_unknown_languages:
language = self.languages[0]
else:
raise ValueError('Unsupported language: %s (allowed: %s)' % (language, ', '.join(self.languages)))
# run content through the pipline (splitter, stopword remover, normalizer etc)
words = self._process_words(content, language)
# now obtain wordids from the lexicon for all words
wordids.extend(self._lexicon.insertWords(words, language))
# If we have dedicated storages for every field then we must insert the
# wordids here otherwise we collect all wordids and insert them into
# the default storage. This implies that one should use dedicated storages
# when indexing multiple fields. If you index multiple fields without
# dedicated storage you will not be able to search by-field.
if self.dedicated_storage:
try:
old_ids = self._storage[field].getWordIdsForDocId(docid)
except StorageException:
old_ids = []
if old_ids != wordids:
self._storage[field].insertDocument(docid, wordids)
else:
all_wordids.extend(wordids)
# Insert everything into the default storage when not using dedicated
# storages
if not self.dedicated_storage:
try:
old_ids = self._storage.getWordIdsForDocId(docid)
except StorageException:
old_ids = []
if old_ids != all_wordids:
self._storage.insertDocument(docid, all_wordids)
return True
def _process_words(self, content, language):
""" implements the processing pipeline """
# first normalize content string
if self.use_normalizer:
normalizer = getUtility(INormalizer)
content = normalizer.process(content, language)
# now create a new splitter
splitter = createObject(self.splitter,
casefolding=self.splitter_casefolding,
separator=self.splitter_additional_chars,
maxlen=self.splitter_max_length,
)
# and split unicode content into list of unicode strings
words = splitter.split(content)
# now filter out all stopwords
if self.use_stopwords:
sw_utility = getUtility(IStopwords)
words = sw_utility.process(words, language)
# Stem words if required. If no stemmer for 'language' is available
# then do not stem
if self.use_stemmer:
S = getStemmer(language)
if S:
words = S.stem(words)
return words
def unindex_object(self, docid):
""" remove a document given its document id from the index """
if self.dedicated_storage:
for field in self.fields:
self._storage[field].removeDocument(docid)
else:
self._storage.removeDocument(docid)
def _prepare_query(self, query, language):
""" performs similar transformations as _process_words() but
only for a query. So we don't need the splitter etc.
"""
# to lowercase if necessary
if self.splitter_casefolding:
query = query.lower()
# normalize query string
if self.use_normalizer:
normalizer = getUtility(INormalizer)
query = normalizer.process(query, language)
return query
query_options = ('parser', 'language',
'field', 'search_all_fields',
'autoexpand',
'similarity_ratio', 'thesaurus',
'ranking', 'ranking_maxhits')
def search(self, query, **kw):
""" Perform a query against the index. Valid query options are:
'parser' -- named utility implementing IParser
'language' -- language to be used to lookup words from the lexicon
'field' -- perform searches against a configured index field
'autoexpand' -- off|always|on_miss (see below)
"""
# queries must be unicode
if not isinstance(query, unicode):
raise ValueError('Query must be unicode string')
# First check query options
for k in kw.keys():
if not k in self.query_options:
raise ValueError('Unknown option: %s (supported query options: %s)' % (k, ', '.join(self.query_options)))
# obtain parser ID (which is the name of named utility implementing IParser)
parser_id = kw.get('parser', self.query_parser)
# determine query language
language = kw.get('language', self.languages[0])
if not language in self.languages:
raise ValueError('Unsupported language: %s (supported languages: %s)' % (language, ', '.join(self.languages)))
# check if field is known to the index
field = kw.get('field')
search_all_fields = kw.get('search_all_fields')
if field and search_all_fields:
raise ValueError('Cannot specify field and search_all_fields')
if search_all_fields:
if not self.dedicated_storage:
raise ValueError(
'search_all_fields cannot be used without dedicated '
'storage.')
search_fields = self.fields
else:
if not field:
field = self.fields[0]
if field not in self.fields:
raise ValueError('Unknown field: %s (known fields: %s)' % (
field, ', '.join(self.fields)))
search_fields = [field]
# perform optional cosine ranking after searching
ranking = bool(kw.get('ranking', self.ranking))
if ranking and not self._feature_ranking:
raise ValueError("The storage used for this index does not support relevance ranking")
# Limit *ranked* result set to at most XXX hits
ranking_maxhits = kw.get('ranking_maxhits', 50)
if not isinstance(ranking_maxhits, int):
raise ValueError('"ranking_maxhits" must be an integer')
if kw.has_key('ranking_maxhits') and not ranking:
raise ValueError('Specify "ranking_maxhits" only with having set ranking=True')
# autoexpansion of query terms
# 'off' -- expand never
# 'always' -- expand always
# 'on_miss' -- expand only for not-found terms in the query string
autoexpand = kw.get('autoexpand', self.autoexpand)
if not autoexpand in ('off', 'always', 'on_miss'):
raise ValueError('"autoexpand" must either be "off", "always" or "on_miss"')
# Use a sequence of configured thesauri (identified by their configured name)
# for additional lookup of terms
thesaurus = kw.get('thesaurus', [])
if isinstance(thesaurus, str):
thesaurus = (thesaurus,)
if not isinstance(thesaurus, (list, tuple)):
raise ValueError('"thesaurus" must be list or tuple of configured thesaurus ids')
# Similarity ratio (measured as Levenshtein distance)
similarity_ratio = float(kw.get('similarity_ratio', 0.75))
if similarity_ratio < 0.0 or similarity_ratio > 1.0:
raise ValueError('similarity_ratio must been 0.0 and 1.0 (value %f)' % similarity_ratio)
# obtain a parser (registered as named utility)
parser = getUtility(IParser, parser_id)
# run query string through normalizer, case normalizer etc.
query = self._prepare_query(query, language)
# create a tree of nodes
parsed_query = parser.parse(query)
if not parsed_query:
raise ValueError('No query specified')
# Post-filter for stopwords. We need to perform this
# outside the query parser because the lex/yacc-based query
# parser implementation can't be used in a reasonable way
# to deal with such additional functionality.
if self.use_stopwords:
sw_utility = getUtility(IStopwords)
stopwords = sw_utility.stopwordsForLanguage(language)
if stopwords:
# The stopword remover removes WordNodes representing
# a stopword *in-place*
stopword_remover(parsed_query, stopwords)
# Split word nodes with the splitter
splitter = createObject(self.splitter,
casefolding=self.splitter_casefolding,
separator=self.splitter_additional_chars,
maxlen=self.splitter_max_length,
)
parsed_query = node_splitter(parsed_query, splitter)
# build an instance for the search
resultsets = []
for field in search_fields:
sr = SearchRequest(self,
query=query,
parsetree=parsed_query,
field=field,
autoexpand=autoexpand,
similarity_ratio=similarity_ratio,
thesaurus=thesaurus,
language=language)
# call the evaluator and produce a ResultSet instance
resultsets.append(Evaluator(sr).run())
resultset = unionResultSets(resultsets)
# optional ranking using the cosine measure or another configure
# ranking method
if ranking:
ranking_method = getUtility(IRanking, name=self.ranking_method)
resultset.ranking(ranking_method,
index=self,
language=language,
nbest=ranking_maxhits)
return resultset
############################################################
# index attributes defined as properties
############################################################
def _setUse_stemmer(self, value):
if not value in (True, False, 0, 1):
raise ValueError('"use_stemmer" must be either True or False')
self._use_stemmer= bool(value)
def _getUse_stemmer(self):
return self._use_stemmer
use_stemmer = property(_getUse_stemmer, _setUse_stemmer)
def _setSplitter(self, value):
self._splitter = value
def _getSplitter(self):
return self._splitter
splitter = property(_getSplitter, _setSplitter)
def _setLexicon(self, value):
self.__lexicon = value # using __lexicon instead of __lexicon to avoid a name clash # (_lexicon is the lexicon object)
def _getLexicon(self):
return self.__lexicon
lexicon = property(_getLexicon, _setLexicon)
def _setDedicated_storage(self, value):
if not value in (True, False):
raise ValueError('"dedicated_storage" must be True or False')
self._dedicated_storage = value
def _getDedicated_storage(self):
return self._dedicated_storage
dedicated_storage = property(_getDedicated_storage, _setDedicated_storage)
def _setSplitter_max_length(self, value):
self._splitter_max_length = value
def _getSplitter_max_length(self):
return self._splitter_max_length
splitter_max_length = property(_getSplitter_max_length, _setSplitter_max_length)
def _setFields(self, value):
self._fields = value
def _getFields(self):
return self._fields
fields = property(_getFields, _setFields)
def _setUse_normalizer(self, value):
if not value in (True, False):
raise ValueError('"use_normalizer" must be True or False')
self._use_normalizer = value
def _getUse_normalizer(self):
return self._use_normalizer
use_normalizer = property(_getUse_normalizer, _setUse_normalizer)
def _setstorage(self, value):
self.__storage = value
def _getstorage(self):
return self.__storage
storage = property(_getstorage, _setstorage)
def _setDefault_encoding(self, value):
self._default_encoding = value
def _getDefault_encoding(self):
return self._default_encoding
default_encoding = property(_getDefault_encoding, _setDefault_encoding)
def _setLanguages(self, value):
if not isinstance(value, (list, tuple)):
raise ValueError('"languages" must be list or tuple of country codes')
if not value:
raise ValueError('No languages given')
self._languages = value
def _getLanguages(self):
return self._languages
languages = property(_getLanguages, _setLanguages)
def _setSplitter_additional_chars(self, value):
self._splitter_additional_chars = value
def _getSplitter_additional_chars(self):
value = getattr(self, '_splitter_additional_chars', None)
if value is None:
return self._splitter_separators
return value
splitter_additional_chars = property(_getSplitter_additional_chars, _setSplitter_additional_chars)
def _setQuery_parser(self, value):
self._query_parser = value
def _getQuery_parser(self):
return self._query_parser
query_parser = property(_getQuery_parser, _setQuery_parser)
def _setSplitter_casefolding(self, value):
if not value in (True, False):
raise ValueError('"splitter_casefolding" must be True or False')
self._splitter_casefolding = value
def _getSplitter_casefolding(self):
return self._splitter_casefolding
splitter_casefolding = property(_getSplitter_casefolding, _setSplitter_casefolding)
def _setIndex_unknown_languages(self, value):
self._index_unknown_languages = value
def _getIndex_unknown_languages(self):
return self._index_unknown_languages
index_unknown_languages = property(_getIndex_unknown_languages, _setIndex_unknown_languages)
def _setAutoexpand(self, value):
self._autoexpand = value
def _getAutoexpand(self):
return getattr(self, '_autoexpand', 'off')
autoexpand = property(_getAutoexpand, _setAutoexpand)
def _setAutoexpand_limit(self, value):
self._autoexpand_limit = value
def _getAutoexpand_limit(self):
return self._autoexpand_limit
autoexpand_limit = property(_getAutoexpand_limit, _setAutoexpand_limit)
def _setRanking(self, value):
self._ranking = value
def _getRanking(self):
return self._ranking
ranking = property(_getRanking, _setRanking)
def _setUse_stopwords(self, value):
if not value in (True, False):
raise ValueError('"use_stopwords" must be True or False')
self._use_stopwords = value
def _getUse_stopwords(self):
return self._use_stopwords
use_stopwords = property(_getUse_stopwords, _setUse_stopwords)
############################################################
# Some helper methods
############################################################
def _dump(self):
""" perform low-level dump of the index """
print 'Lexicon'
for lang in self.getLexicon().getLanguages():
print lang
for k,v in self.getLexicon()._words[lang].items():
print repr(k), v
print
print '-'*80
print 'Storage'
for field in self.fields:
S = self.getStorage(field)
for k, v in S._wid2doc.items():
print k, list(v)
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, ', '.join(['%s=%s' % (k, repr(getattr(self, k, None))) for k in defaults.keys()]))
def __len__(self):
if self.dedicated_storage:
return sum([len(s) for s in self._storage.values()])
else:
return len(self._storage) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/index.py | index.py |
from zope.interface import Interface
class ILexicon(Interface):
""" Interface for Lexicon objects. A TextIndexNG3 lexicon act as a storage
for multilingual content. Words are stored per-language.
"""
def getLanguages():
""" return a list of languages handled by the lexicon """
def addLanguage(language):
""" prepare lexicon to store words in a new language """
def hasLanguage(language):
""" check if the lexicon is configured for a language """
def insertWords(words, language):
""" Insert a sequence of words for a given language. Return a sequence of wordids. """
def insertWord(word, language):
""" Insert a word for a given language. Return a wordid. """
def getWord(wordid):
""" return the word for the given wordid """
def getWordId(word, language='xx'):
""" return the word id for a given word """
def getWordIds(words, language='xx'):
""" return a list of wordid for a list for words """
def getWordsForLanguage(language):
""" return all words for a given language """
def getWordAndLanguage(wordid):
""" return the (word, language) tuple for the given wordid """
def getWordsForRightTruncation(prefix, language='en'):
""" return a sequence of words with a given prefix """
def getWordsForLeftTruncation(suffix , language='en'):
""" return a sequence of words with a given suffix"""
def getWordsForPattern(pattern, language='en'):
""" return a sequence of words that match 'pattern'. 'pattern' is a
sequence of characters including the wildcards '?' and '*'.
"""
def getWordsInRange(w1, w2, language='en'):
""" return a sorted list of words where w1 <= w(x) <= w2 """
def getSimiliarWords(term, threshold, language='en'):
""" return a list of that are similar based on a similarity measure"""
def getWordsForSubstring(sub, language='en'):
""" return all words that match the given substring """ | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/interfaces/lexicon.py | lexicon.py |
entitydefs = {
'AElig': u'\u00c6', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': u'\u00c1', # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': u'\u00c2', # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': u'\u00c0', # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': u'\u0391', # greek capital letter alpha, U+0391
'Aring': u'\u00c5', # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': u'\u00c3', # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': u'\u00c4', # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': u'\u0392', # greek capital letter beta, U+0392
'Ccedil': u'\u00c7', # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': u'\u03a7', # greek capital letter chi, U+03A7
'Dagger': u'\u2021', # double dagger, U+2021 ISOpub
'Delta': u'\u0394', # greek capital letter delta, U+0394 ISOgrk3
'ETH': u'\u00d0', # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': u'\u00c9', # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': u'\u00ca', # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': u'\u00c8', # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': u'\u0395', # greek capital letter epsilon, U+0395
'Eta': u'\u0397', # greek capital letter eta, U+0397
'Euml': u'\u00cb', # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': u'\u0393', # greek capital letter gamma, U+0393 ISOgrk3
'Gcirc': u'\u011C', # G with circumlex, U+011C
'Iacute': u'\u00cd', # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': u'\u00ce', # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': u'\u00cc', # latin capital letter I with grave, U+00CC ISOlat1
'Iota': u'\u0399', # greek capital letter iota, U+0399
'Iuml': u'\u00cf', # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': u'\u039a', # greek capital letter kappa, U+039A
'Lambda': u'\u039b', # greek capital letter lambda, U+039B ISOgrk3
'Mu': u'\u039c', # greek capital letter mu, U+039C
'Ntilde': u'\u00d1', # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': u'\u039d', # greek capital letter nu, U+039D
'OElig': u'\u0152', # latin capital ligature OE, U+0152 ISOlat2
'Oacute': u'\u00d3', # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': u'\u00d4', # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': u'\u00d2', # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': u'\u03a9', # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': u'\u039f', # greek capital letter omicron, U+039F
'Oslash': u'\u00d8', # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': u'\u00d5', # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': u'\u00d6', # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': u'\u03a6', # greek capital letter phi, U+03A6 ISOgrk3
'Pi': u'\u03a0', # greek capital letter pi, U+03A0 ISOgrk3
'Prime': u'\u2033', # double prime = seconds = inches, U+2033 ISOtech
'Psi': u'\u03a8', # greek capital letter psi, U+03A8 ISOgrk3
'Rho': u'\u03a1', # greek capital letter rho, U+03A1
'Scaron': u'\u0160', # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': u'\u03a3', # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': u'\u00de', # latin capital letter THORN, U+00DE ISOlat1
'Tau': u'\u03a4', # greek capital letter tau, U+03A4
'Theta': u'\u0398', # greek capital letter theta, U+0398 ISOgrk3
'Uacute': u'\u00da', # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': u'\u00db', # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': u'\u00d9', # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': u'\u03a5', # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': u'\u00dc', # latin capital letter U with diaeresis, U+00DC ISOlat1
'Vdot': u'\u1E7E', # latin capital letter V with dot below, U+1E7E
'Xi': u'\u039e', # greek capital letter xi, U+039E ISOgrk3
'Yacute': u'\u00dd', # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': u'\u0178', # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': u'\u0396', # greek capital letter zeta, U+0396
'aacute': u'\u00e1', # latin small letter a with acute, U+00E1 ISOlat1
'acirc': u'\u00e2', # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': u'\u00b4', # acute accent = spacing acute, U+00B4 ISOdia
'aelig': u'\u00e6', # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': u'\u00e0', # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': u'\u2135', # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': u'\u03b1', # greek small letter alpha, U+03B1 ISOgrk3
#'amp': u'\u0026', # ampersand, U+0026 ISOnum
'and': u'\u2227', # logical and = wedge, U+2227 ISOtech
'ang': u'\u2220', # angle, U+2220 ISOamso
'ap': u'\u2245', # approximate,
'apos': u'\u0027', # apostrophe
'ast': '\00D7', #star
'aring': u'\u00e5', # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': u'\u2248', # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': u'\u00e3', # latin small letter a with tilde, U+00E3 ISOlat1
'auml': u'\u00e4', # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': u'\u201e', # double low-9 quotation mark, U+201E NEW
'beta': u'\u03b2', # greek small letter beta, U+03B2 ISOgrk3
'brvbar': u'\u00a6', # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': u'\u2022', # bullet = black small circle, U+2022 ISOpub
'cap': u'\u2229', # intersection = cap, U+2229 ISOtech
'ccedil': u'\u00e7', # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': u'\u00b8', # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': u'\u00a2', # cent sign, U+00A2 ISOnum
'chi': u'\u03c7', # greek small letter chi, U+03C7 ISOgrk3
'circ': u'\u02c6', # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': u'\u2663', # black club suit = shamrock, U+2663 ISOpub
'cong': u'\u2245', # approximately equal to, U+2245 ISOtech
'copy': u'\u00a9', # copyright sign, U+00A9 ISOnum
'crarr': u'\u21b5', # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': u'\u222a', # union = cup, U+222A ISOtech
'curren': u'\u00a4', # currency sign, U+00A4 ISOnum
'dArr': u'\u21d3', # downwards double arrow, U+21D3 ISOamsa
'dagger': u'\u2020', # dagger, U+2020 ISOpub
'darr': u'\u2193', # downwards arrow, U+2193 ISOnum
'deg': u'\u00b0', # degree sign, U+00B0 ISOnum
'delta': u'\u03b4', # greek small letter delta, U+03B4 ISOgrk3
'diams': u'\u2666', # black diamond suit, U+2666 ISOpub
'divide': u'\u00f7', # division sign, U+00F7 ISOnum
'eacute': u'\u00e9', # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': u'\u00ea', # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': u'\u00e8', # latin small letter e with grave, U+00E8 ISOlat1
'empty': u'\u2205', # empty set = null set = diameter, U+2205 ISOamso
'emsp': u'\u2003', # em space, U+2003 ISOpub
'ensp': u'\u2002', # en space, U+2002 ISOpub
'epsilon': u'\u03b5', # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': u'\u2261', # identical to, U+2261 ISOtech
'eta': u'\u03b7', # greek small letter eta, U+03B7 ISOgrk3
'eth': u'\u00f0', # latin small letter eth, U+00F0 ISOlat1
'euml': u'\u00eb', # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': u'\u20ac', # euro sign, U+20AC NEW
'exist': u'\u2203', # there exists, U+2203 ISOtech
'fnof': u'\u0192', # latin small f with hook = function = florin, U+0192 ISOtech
'forall': u'\u2200', # for all, U+2200 ISOtech
'frac12': u'\u00bd', # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': u'\u00bc', # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': u'\u00be', # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': u'\u2044', # fraction slash, U+2044 NEW
'gamma': u'\u03b3', # greek small letter gamma, U+03B3 ISOgrk3
'ge': u'\u2265', # greater-than or equal to, U+2265 ISOtech
#'gt': u'\u003e', # greater-than sign, U+003E ISOnum
'hArr': u'\u21d4', # left right double arrow, U+21D4 ISOamsa
'harr': u'\u2194', # left right arrow, U+2194 ISOamsa
'hearts': u'\u2665', # black heart suit = valentine, U+2665 ISOpub
'hellip': u'\u2026', # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': u'\u00ed', # latin small letter i with acute, U+00ED ISOlat1
'icirc': u'\u00ee', # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': u'\u00a1', # inverted exclamation mark, U+00A1 ISOnum
'igrave': u'\u00ec', # latin small letter i with grave, U+00EC ISOlat1
'image': u'\u2111', # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': u'\u221e', # infinity, U+221E ISOtech
'int': u'\u222b', # integral, U+222B ISOtech
'iota': u'\u03b9', # greek small letter iota, U+03B9 ISOgrk3
'iquest': u'\u00bf', # inverted question mark = turned question mark, U+00BF ISOnum
'isin': u'\u2208', # element of, U+2208 ISOtech
'iuml': u'\u00ef', # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': u'\u03ba', # greek small letter kappa, U+03BA ISOgrk3
'lArr': u'\u21d0', # leftwards double arrow, U+21D0 ISOtech
'lambda': u'\u03bb', # greek small letter lambda, U+03BB ISOgrk3
'lang': u'\u2329', # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': u'\u00ab', # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': u'\u2190', # leftwards arrow, U+2190 ISOnum
'lceil': u'\u2308', # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': u'\u201c', # left double quotation mark, U+201C ISOnum
'le': u'\u2264', # less-than or equal to, U+2264 ISOtech
'lfloor': u'\u230a', # left floor = apl downstile, U+230A ISOamsc
'lowast': u'\u2217', # asterisk operator, U+2217 ISOtech
'loz': u'\u25ca', # lozenge, U+25CA ISOpub
'lrm': u'\u200e', # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': u'\u2039', # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': u'\u2018', # left single quotation mark, U+2018 ISOnum
#'lt': u'\u003c', # less-than sign, U+003C ISOnum
'macr': u'\u00af', # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': u'\u2014', # em dash, U+2014 ISOpub
'micro': u'\u00b5', # micro sign, U+00B5 ISOnum
'middot': u'\u00b7', # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': u'\u2212', # minus sign, U+2212 ISOtech
'mu': u'\u03bc', # greek small letter mu, U+03BC ISOgrk3
'nabla': u'\u2207', # nabla = backward difference, U+2207 ISOtech
'nbsp': u'\u00a0', # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': u'\u2013', # en dash, U+2013 ISOpub
'ne': u'\u2260', # not equal to, U+2260 ISOtech
'ni': u'\u220b', # contains as member, U+220B ISOtech
'not': u'\u00ac', # not sign, U+00AC ISOnum
'notin': u'\u2209', # not an element of, U+2209 ISOtech
'nsub': u'\u2284', # not a subset of, U+2284 ISOamsn
'ntilde': u'\u00f1', # latin small letter n with tilde, U+00F1 ISOlat1
'nu': u'\u03bd', # greek small letter nu, U+03BD ISOgrk3
'oacute': u'\u00f3', # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': u'\u00f4', # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': u'\u0153', # latin small ligature oe, U+0153 ISOlat2
'ograve': u'\u00f2', # latin small letter o with grave, U+00F2 ISOlat1
'oline': u'\u203e', # overline = spacing overscore, U+203E NEW
'omega': u'\u03c9', # greek small letter omega, U+03C9 ISOgrk3
'omicron': u'\u03bf', # greek small letter omicron, U+03BF NEW
'oplus': u'\u2295', # circled plus = direct sum, U+2295 ISOamsb
'or': u'\u2228', # logical or = vee, U+2228 ISOtech
'ordf': u'\u00aa', # feminine ordinal indicator, U+00AA ISOnum
'ordm': u'\u00ba', # masculine ordinal indicator, U+00BA ISOnum
'oslash': u'\u00f8', # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': u'\u00f5', # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': u'\u2297', # circled times = vector product, U+2297 ISOamsb
'ouml': u'\u00f6', # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': u'\u00b6', # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': u'\u2202', # partial differential, U+2202 ISOtech
'percnt': u'\u0025', # percent sign, U+0025
'permil': u'\u2030', # per mille sign, U+2030 ISOtech
'perp': u'\u22a5', # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': u'\u03c6', # greek small letter phi, U+03C6 ISOgrk3
'pi': u'\u03c0', # greek small letter pi, U+03C0 ISOgrk3
'piv': u'\u03d6', # greek pi symbol, U+03D6 ISOgrk3
'plusmn': u'\u00b1', # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': u'\u00a3', # pound sign, U+00A3 ISOnum
'prime': u'\u2032', # prime = minutes = feet, U+2032 ISOtech
'prod': u'\u220f', # n-ary product = product sign, U+220F ISOamsb
'prop': u'\u221d', # proportional to, U+221D ISOtech
'psi': u'\u03c8', # greek small letter psi, U+03C8 ISOgrk3
#'quot': u'\u0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': u'\u21d2', # rightwards double arrow, U+21D2 ISOtech
'radic': u'\u221a', # square root = radical sign, U+221A ISOtech
'rang': u'\u232a', # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': u'\u00bb', # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': u'\u2192', # rightwards arrow, U+2192 ISOnum
'rceil': u'\u2309', # right ceiling, U+2309 ISOamsc
'rdquo': u'\u201d', # right double quotation mark, U+201D ISOnum
'real': u'\u211c', # blackletter capital R = real part symbol, U+211C ISOamso
'reg': u'\u00ae', # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': u'\u230b', # right floor, U+230B ISOamsc
'rho': u'\u03c1', # greek small letter rho, U+03C1 ISOgrk3
'rlm': u'\u200f', # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': u'\u203a', # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': u'\u2019', # right single quotation mark, U+2019 ISOnum
'sbquo': u'\u201a', # single low-9 quotation mark, U+201A NEW
'scaron': u'\u0161', # latin small letter s with caron, U+0161 ISOlat2
'sdot': u'\u22c5', # dot operator, U+22C5 ISOamsb
'sect': u'\u00a7', # section sign, U+00A7 ISOnum
'shy': u'\u00ad', # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': u'\u03c3', # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': u'\u03c2', # greek small letter final sigma, U+03C2 ISOgrk3
'sim': u'\u223c', # tilde operator = varies with = similar to, U+223C ISOtech
'spades': u'\u2660', # black spade suit, U+2660 ISOpub
'sub': u'\u2282', # subset of, U+2282 ISOtech
'sube': u'\u2286', # subset of or equal to, U+2286 ISOtech
'sum': u'\u2211', # n-ary sumation, U+2211 ISOamsb
'sup': u'\u2283', # superset of, U+2283 ISOtech
'sup1': u'\u00b9', # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': u'\u00b2', # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': u'\u00b3', # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': u'\u2287', # superset of or equal to, U+2287 ISOtech
'szlig': u'\u00df', # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': u'\u03c4', # greek small letter tau, U+03C4 ISOgrk3
'there4': u'\u2234', # therefore, U+2234 ISOtech
'theta': u'\u03b8', # greek small letter theta, U+03B8 ISOgrk3
'thetasym': u'\u03d1', # greek small letter theta symbol, U+03D1 NEW
'thinsp': u'\u2009', # thin space, U+2009 ISOpub
'thorn': u'\u00fe', # latin small letter thorn with, U+00FE ISOlat1
'tilde': u'\u02dc', # small tilde, U+02DC ISOdia
'times': u'\u00d7', # multiplication sign, U+00D7 ISOnum
'trade': u'\u2122', # trade mark sign, U+2122 ISOnum
'uArr': u'\u21d1', # upwards double arrow, U+21D1 ISOamsa
'uacute': u'\u00fa', # latin small letter u with acute, U+00FA ISOlat1
'uarr': u'\u2191', # upwards arrow, U+2191 ISOnum
'ucirc': u'\u00fb', # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': u'\u00f9', # latin small letter u with grave, U+00F9 ISOlat1
'uml': u'\u00a8', # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': u'\u03d2', # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': u'\u03c5', # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': u'\u00fc', # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': u'\u2118', # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': u'\u03be', # greek small letter xi, U+03BE ISOgrk3
'yacute': u'\u00fd', # latin small letter y with acute, U+00FD ISOlat1
'yen': u'\u00a5', # yen sign = yuan sign, U+00A5 ISOnum
'yuml': u'\u00ff', # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': u'\u03b6', # greek small letter zeta, U+03B6 ISOgrk3
'zwj': u'\u200d', # zero width joiner, U+200D NEW RFC 2070
'zwnj': u'\u200c', # zero width non-joiner, U+200C NEW RFC 2070
} | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/entities2uc.py | entities2uc.py |
import xml.sax
import zipfile, cStringIO
from xml.sax.handler import ContentHandler
from zopyx.txng3.core.baseconverter import BaseConverter
class ootextHandler(ContentHandler):
def characters(self, ch):
self._data.write(ch.encode("utf-8") + ' ')
def startDocument(self):
self._data = cStringIO.StringIO()
def getxmlcontent(self, doc):
file = cStringIO.StringIO(doc)
doctype = """<!DOCTYPE office:document-content PUBLIC "-//OpenOffice.org//DTD OfficeDocument 1.0//EN" "office.dtd">"""
xmlstr = zipfile.ZipFile(file).read('content.xml')
xmlstr = xmlstr.replace(doctype,'')
return xmlstr
def getData(self):
return self._data.getvalue()
class Converter(BaseConverter):
content_type = ('application/vnd.sun.xml.calc',
'application/vnd.sun.xml.calc.template',
'application/vnd.sun.xml.draw',
'application/vnd.sun.xml.draw.template',
'application/vnd.sun.xml.impress',
'application/vnd.sun.xml.impress.template',
'application/vnd.sun.xml.math',
'application/vnd.sun.xml.writer',
'application/vnd.sun.xml.writer.global',
'application/vnd.sun.xml.writer.template',
'application/vnd.oasis.opendocument.chart',
'application/vnd.oasis.opendocument.database',
'application/vnd.oasis.opendocument.formula',
'application/vnd.oasis.opendocument.graphics',
'application/vnd.oasis.opendocument.graphics-template otg',
'application/vnd.oasis.opendocument.image',
'application/vnd.oasis.opendocument.presentation',
'application/vnd.oasis.opendocument.presentation-template otp',
'application/vnd.oasis.opendocument.spreadsheet',
'application/vnd.oasis.opendocument.spreadsheet-template ots',
'application/vnd.oasis.opendocument.text',
'application/vnd.oasis.opendocument.text-master',
'application/vnd.oasis.opendocument.text-template ott',
'application/vnd.oasis.opendocument.text-web')
content_description = "OpenOffice, all formats"
def convert(self, doc, encoding, mimetype,
logError=False, raiseException=False):
handler = ootextHandler()
xmlstr = handler.getxmlcontent(doc)
xml.sax.parseString(xmlstr, handler)
return handler.getData(), 'utf-8'
OOfficeConverter = Converter() | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/ooffice.py | ooffice.py |
from HTMLParser import HTMLParser,HTMLParseError,piclose, charref, entityref
from string import lower,find
class HTML2SafeHTML(HTMLParser):
can_close = ['li','p','dd','dt','option']
never_close = ['br','wbr','hr','input','isindex','base','meta','img']
def __init__(self,valid_tags):
HTMLParser.__init__(self)
self.valid_tags = valid_tags
self.result = ""
self.openTags = []
def end_tag(self,tag):
self.result = "%s</%s>" % (self.result, tag)
def handle_data(self, data):
if data:
self.result = self.result + data
def handle_charref(self, name):
self.result = "%s&#%s;" % (self.result, name)
from htmlentitydefs import entitydefs # our entity defs list to use
def handle_entityref(self, name):
# this quotes non-standard entities
if self.entitydefs.has_key(name):
amp = '&'
else:
amp = '&'
self.result = "%s%s%s;" % (self.result, amp, name)
def handle_starttag(self, tag, attrs):
""" Delete all tags except for legal ones """
if tag in self.valid_tags:
self.result = self.result + '<' + tag
for k, v in attrs:
if v is None:
self.result += ' ' + k
else:
if lower(k[0:2]) != 'on' and lower(v[0:10]) != 'javascript':
self.result += ' %s="%s"' % (k, v)
if tag not in self.never_close:
self.openTags.append(tag)
self.result = self.result + '>'
def handle_endtag(self, tag):
try:
while tag != self.openTags[-1] and self.openTags[-1] in self.can_close:
self.openTags.pop()
if tag==self.openTags[-1]:
self.end_tag(self.openTags.pop())
except IndexError:
pass
def cleanup(self):
""" Append missing closing tags """
while self.openTags:
tag = self.openTags.pop()
if tag not in self.can_close:
self.end_tag(tag)
def parse_starttag(self,i):
try:
return HTMLParser.parse_starttag(self,i)
except HTMLParseError:
try:
return piclose.search(self.rawdata,i).end()
except AttributeError:
return -1
def parse_endtag(self,i):
try:
return HTMLParser.parse_endtag(self,i)
except HTMLParseError:
try:
return piclose.search(self.rawdata,i).end()
except:
return -1
def goahead(self,end):
# fix incomplete entity and char refs
rawdata = self.rawdata
i = 0
n = len(rawdata)
newdata=''
while i < n:
j = find(rawdata,'&',i)
if j==-1:
break
newdata = newdata + rawdata[i:j]
if charref.match(rawdata, j) or entityref.match(rawdata, j):
newdata = newdata + '&'
else:
newdata = newdata + '&'
i = j+1
self.rawdata = newdata + rawdata[i:]
# do normal parsing
try:
return HTMLParser.goahead(self,end)
except HTMLParseError:
pass | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/stripogram/html2safehtml.py | html2safehtml.py |
import sgmllib
from string import lower, replace, split, join
class HTML2Text(sgmllib.SGMLParser):
from htmlentitydefs import entitydefs # replace entitydefs from sgmllib
def __init__(self, ignore_tags=(), indent_width=4, page_width=80):
sgmllib.SGMLParser.__init__(self)
self.result = ""
self.indent = 0
self.ol_number = 0
self.page_width=page_width
self.inde_width=indent_width
self.lines=[]
self.line=[]
self.ignore_tags = ignore_tags
def add_text(self,text):
# convert text into words
words = split(replace(text,'\n',' '))
self.line.extend(words)
def add_break(self):
self.lines.append((self.indent,self.line))
self.line=[]
def generate(self):
# join lines with indents
indent_width = self.inde_width
page_width = self.page_width
out_paras=[]
for indent,line in self.lines+[(self.indent,self.line)]:
i=indent*indent_width
indent_string = i*' '
line_width = page_width-i
out_para=''
out_line=[]
len_out_line=0
for word in line:
len_word = len(word)
if len_out_line+len_word<line_width:
out_line.append(word)
len_out_line = len_out_line + len_word
else:
out_para = out_para + indent_string + join(out_line, ' ') + '\n'
out_line=[word]
len_out_line=len_word
out_para = out_para + indent_string + join(out_line, ' ')
out_paras.append(out_para)
self.result = join(out_paras,'\n\n')
def mod_indent(self,i):
self.indent = self.indent + i
if self.indent < 0:
self.indent = 0
def handle_data(self, data):
if data:
self.add_text(data)
def unknown_starttag(self, tag, attrs):
""" Convert HTML to something meaningful in plain text """
tag = lower(tag)
if tag not in self.ignore_tags:
if tag[0]=='h' or tag in ['br','pre','p','hr']:
# insert a blank line
self.add_break()
elif tag =='img':
# newline, text, newline
src = ''
for k, v in attrs:
if lower(k) == 'src':
src = v
self.add_break()
self.add_text('Image: ' + src)
elif tag =='li':
self.add_break()
if self.ol_number:
# num - text
self.add_text(str(self.ol_number) + ' - ')
self.ol_number = self.ol_number + 1
else:
# - text
self.add_text('- ')
elif tag in ['dd','dt']:
self.add_break()
# increase indent
self.mod_indent(+1)
elif tag in ['ul','dl','ol']:
# blank line
# increase indent
self.mod_indent(+1)
if tag=='ol':
self.ol_number = 1
def unknown_endtag(self, tag):
""" Convert HTML to something meaningful in plain text """
tag = lower(tag)
if tag not in self.ignore_tags:
if tag[0]=='h' or tag in ['pre']:
# newline, text, newline
self.add_break()
elif tag =='li':
self.add_break()
elif tag in ['dd','dt']:
self.add_break()
# descrease indent
self.mod_indent(-1)
elif tag in ['ul','dl','ol']:
# blank line
self.add_break()
# decrease indent
self.mod_indent(-1)
self.ol_number = 0 | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/stripogram/html2text.py | html2text.py |
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import re
import string
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
interesting_cdata = re.compile(r'<(/|\Z)')
incomplete = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*|#[0-9]*)?')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*);')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+);')
starttagopen = re.compile('<[a-zA-Z]')
piopen = re.compile(r'<\?')
piclose = re.compile('>')
endtagopen = re.compile('</')
declopen = re.compile('<!')
special = re.compile('<![^<>]*>')
commentopen = re.compile('<!--')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('[a-zA-Z][-.a-zA-Z0-9:_]*')
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[-a-zA-Z0-9./:;+*%?!&$\(\)_#=~]*))?')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
endstarttag = re.compile(r"\s*/?>")
endendtag = re.compile('>')
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
declname = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*')
declstringlit = re.compile(r'(\'[^\']*\'|"[^"]*")\s*')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser:
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self):
"""Initialize and reset this instance."""
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.lineno = 1
self.offset = 0
self.interesting = interesting_normal
def feed(self, data):
"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = string.count(rawdata, "\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = string.rindex(rawdata, "\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self):
self.interesting = interesting_cdata
def clear_cdata_mode(self):
self.interesting = interesting_normal
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
if rawdata[i] == '<':
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif endtagopen.match(rawdata, i): # </
k = self.parse_endtag(i)
if k >= 0:
self.clear_cdata_mode()
elif commentopen.match(rawdata, i): # <!--
k = self.parse_comment(i)
elif piopen.match(rawdata, i): # <?
k = self.parse_pi(i)
elif declopen.match(rawdata, i): # <!
k = self.parse_declaration(i)
else:
if i < n-1:
raise HTMLParseError(
"invalid '<' construct: %s" % `rawdata[i:i+2]`,
self.getpos())
k = -1
if k < 0:
if end:
raise HTMLParseError("EOF in middle of construct",
self.getpos())
break
i = self.updatepos(i, k)
elif rawdata[i] == '&':
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
i = self.updatepos(i, k)
continue
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
i = self.updatepos(i, k)
continue
if incomplete.match(rawdata, i):
if end:
raise HTMLParseError(
"EOF in middle of entity or char ref",
self.getpos())
return -1 # incomplete
raise HTMLParseError("'&' not part of entity or char ref",
self.getpos())
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse comment, return end or -1 if not terminated
def parse_comment(self, i):
rawdata = self.rawdata
assert rawdata[i:i+4] == '<!--', 'unexpected call to parse_comment()'
match = commentclose.search(rawdata, i+4)
if not match:
return -1
j = match.start()
self.handle_comment(rawdata[i+4: j])
j = match.end()
return j
# Internal -- parse declaration.
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# in practice, this should look like: ((name|stringlit) S*)+ '>'
n = len(rawdata)
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
self.handle_decl(rawdata[i+2:j])
return j + 1
if c in "\"'":
m = declstringlit.match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
m = declname.match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
else:
raise HTMLParseError(
"unexpected char in declaration: %s" % `rawdata[j]`,
self.getpos())
return -1 # incomplete
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = string.lower(rawdata[i+1:k])
while k < endpos:
m = attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
attrvalue = self.unescape(attrvalue)
attrs.append((string.lower(attrname), attrvalue))
k = m.end()
end = string.strip(rawdata[k:endpos])
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + string.count(self.__starttag_text, "\n")
offset = len(self.__starttag_text) \
- string.rfind(self.__starttag_text, "\n")
else:
offset = offset + len(self.__starttag_text)
raise HTMLParseError("junk characters in start tag: %s"
% `rawdata[k:endpos][:20]`,
(lineno, offset))
if end[-2:] == '/>':
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode()
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
m = locatestarttagend.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
s = rawdata[j:j+2]
if s == "/>":
return j + 2
if s == "/":
# buffer boundary
return -1
# else bogus input
self.updatepos(i, j + 1)
raise HTMLParseError("malformed empty start tag",
self.getpos())
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
self.updatepos(i, j)
raise HTMLParseError("malformed start tag", self.getpos())
raise AssertionError("we should not gt here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
j = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
raise HTMLParseError("bad end tag: %s" % `rawdata[i:j]`,
self.getpos())
tag = match.group(1)
self.handle_endtag(string.lower(tag))
return j
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
s = string.replace(s, "<", "<")
s = string.replace(s, ">", ">")
s = string.replace(s, "'", "'")
s = string.replace(s, """, '"')
s = string.replace(s, "&", "&") # Must be last
return s | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/stripogram/HTMLParser.py | HTMLParser.py |
Strip-o-Gram HTML Conversion Library
This is a library for converting HTML to Plain Text
and stripping specified tags from HTML.
Installation for Python
Simply copy the stripogram package to any folder on
your PYTHONPATH.
Alternatively, if you have distutils available to you,
you can install by executing the following in the
directory where you unpacked the tarball:
python setup.py install
Installation for Zope
The stripogram package can also be used as a Zope Product.
To do this, place the package in your Zope Products
directory and restart Zope.
NB: When using Strip-o-Gram as a Zope Product, you must change
any import statements that contain 'stripogram' to
'Products.stripogram'
Usage
from stripogram import html2text, html2safehtml
mylumpofdodgyhtml # a lump of dodgy html ;-)
# Only allow <b>, <a>, <i>, <br>, and <p> tags
mylumpofcoolcleancollectedhtml = html2safehtml(mylumpofdodgyhtml,valid_tags=('b', 'a', 'i', 'br', 'p'))
# Don't process <img> tags, just strip them out. Use an indent of 4 spaces
# and a page that's 80 characters wide.
mylumpoftext = html2text(mylumpofcoolcleancollectedhtml,ignore_tags=('img',),indent_width=4,page_width=80)
If you want more information on how it works, read the source :-)
Licensing
Copyright (c) 2001-2002 Chris Withers
This Software is released under the MIT License:
http://www.opensource.org/licenses/mit-license.html
See license.txt for more details.
Credits
Itamar Shtull-Trauring for the original HTML Filter.
Oleg Broytmann for the pre-original HTML Filter.
Andy McKay for the testing he has done.
Rik Hoekstra for a patch to html2text.
Andre Ribeiro Camargo for some html2text tests and a bugfix or two.
Mark McEahern for the distutils support.
Sylvia Candelaria de Ram for a bug fix.
Shane Hathaway for convincing me that Zope's security policy is sensible.
Changes
1.4
- made the stripogram package work as a Zope product
1.3
- added distutils support.
- allowed valid_tags to be in any case
- html2text can now ignore specified tags
- the indent and page width used in html2text can now be specified
- fixed problems in html2safehtml with tag attributes that didn't have a value
- fixed a bug in the html2text handling of order lists
1.2
- documented installation
- included security declarations so that html2text
and html2safehtml can be used in Zope's
Script (Python)'s
- added further tests for html2text.
- added further tests for html2safehtml.
1.1
- re-implemented html2text which should still
be considered alpha.
- fixed handling of the img tag.
1.0
- First release as a seperate module.
| zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/converters/stripogram/readme.txt | readme.txt |
import sys, re, os, tempfile
from threading import Lock
from zope.interface import implements
from zopyx.txng3.core.interfaces import IParser
from zopyx.txng3.core.parsetree import *
import lex, yacc
# For UNIX: create a directory /tmp/textindexng3-uid-<uid>
# For Windows: use the default tmp directory
tempdir = tempfile.gettempdir()
if os.name == 'posix':
tempdir = os.path.join(tempfile.tempdir, 'textindexng3-uid-%d-pid-%d' % (os.getuid(), os.getpid()))
if not os.path.exists(tempdir):
os.makedirs(tempdir, 0777)
outputdir = tempfile.mkdtemp(dir=tempdir)
if not outputdir in sys.path:
sys.path.append(outputdir)
class QueryParserError(Exception): pass
class ParserBase:
""" Base class for all parsers """
tokens = ()
precedence = ()
names = {}
implements(IParser)
def __init__(self, language='en'):
self.language = language
self.lock = Lock()
try:
modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
except:
modname = "parser"+"_"+self.__class__.__name__
self.debugfile = modname + ".dbg"
self.tabmodule = modname + "_" + "parsetab"
lex.lex(module=self, debug=False)
self.p = yacc.yacc(module=self,
debug=0,
outputdir=outputdir,
debugfile=self.debugfile,
tabmodule=self.tabmodule,)
def getLanguage(self):
return self.language
def __call__(self, query):
return self.parse(query)
def parse(self, query):
try:
query = query.strip()
# Plone workaround: queryCatalog.py inserts extra
# quoting for parentheses
query = query.replace('"("', '(')
query = query.replace('")"', ')')
if not query:
return None
self.lock.acquire()
self._clear()
result = self.p.parse(query)
self.lock.release()
return result
except:
self.lock.release()
# import traceback
# traceback.print_exc()
raise QueryParserError('Unable to parse query: %s' % query)
def lexer(self, data):
""" helper method to control lexer process. Return the tokenize
data string.
"""
lex.input(data)
tokens = []
while 1:
tok = lex.token()
if not tok: break
tokens.append(tok)
return tokens
class ElementCollector:
""" wrapper class around a list to keep the nodes
for a phrase search
"""
def __init__(self):
self.clear()
def clear(self):
self._list = []
self._field = None
def addElement(self, item):
self._list.insert(0, item)
def setField(self, field):
self._field = field
def getElements(self):
return tuple(self._list)
def getField(self):
return self._field
class PhraseElements(ElementCollector):
def addElement(self, item):
if not isinstance(item, WordNode):
raise QueryParserError('Offending subquery in phrase found: %s.\nPhrases must contain *only* terms but not terms containing wildcards or other special query characters' % repr(item.getValue()))
ElementCollector.addElement(self, item)
# some regexes to distinguish between normal strings
# truncated strings and patterns
str_regex = re.compile(r'[^%*?()\s"]+$',re.LOCALE|re.UNICODE)
range_regex = re.compile(r'[^%*?()\s"]+\.\.[^%*?()\s"]+$',re.LOCALE|re.UNICODE)
sim_regex = re.compile(r'[%][^%*?()\s"]+$',re.LOCALE|re.UNICODE)
sub_regex = re.compile(r'[*][^%*?()\s"]+[*]$',re.LOCALE|re.UNICODE)
rt_regex = re.compile(r'[^%*?()\s"]+[*]$',re.LOCALE|re.UNICODE)
lt_regex = re.compile(r'[*][^%*?()\s"]+$',re.LOCALE|re.UNICODE)
class EnglishParser(ParserBase):
tokens = (
'STRING' ,
'NOT',
'OR',
'AND',
'NEAR',
'NEAR_PREFIX',
'PHRASE_PREFIX',
'AND_PREFIX',
'OR_PREFIX',
'QUOTE',
'OPENP',
'CLOSEP',
'COLON',
)
t_QUOTE = r'\"'
t_OPENP = r'\('
t_CLOSEP = r'\)'
t_ignore = '\t'
def __init__(self, *args, **kw):
ParserBase.__init__(self, *args, **kw)
self._clear()
def _clear(self):
self.phrase_elements = PhraseElements()
self.elements = ElementCollector()
def t_COLON(self, t):
r'\s*:\s*'
return t
def t_NOT(self, t):
'NOT\s+|not\s+|\-'
return t
def t_AND(self, t):
'\s+AND\s+|\s+and\s+'
return t
def t_OR(self, t):
'\s+OR\s+|\s+or\s+'
return t
def t_NEAR(self, t):
'\s+NEAR\s+|\s+near\s+'
return t
def t_NEAR_PREFIX(self, t):
'[\w+_]+::(NEAR|near)'
return t
def t_AND_PREFIX(self, t):
'[\w+_]+::(AND|and)'
return t
def t_OR_PREFIX(self, t):
'[\w+_]+::(OR|or)'
return t
def t_PHRASE_PREFIX(self, t):
'[\w+_]+::(PHRASE|phrase)'
return t
def t_newline(self, t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(self, t):
if t.value[0] in [' ']:
t.skip(1)
else:
raise QueryParserError,"Illegal character '%s'" % t.value[0]
def p_expr_expr_factor3(self, t):
"""expr : NOT expr"""
t[0] = NotNode( t[2] )
def p_expr_expr_factor2(self, t):
"""expr : NOT factor """
t[0] = NotNode( t[2] )
def p_expr_parens(self, t):
"""expr : OPENP expr CLOSEP """
t[0] = t[2]
def p_near_prefix(self, t):
"""expr : NEAR_PREFIX OPENP qterm CLOSEP """
field,op = t[1].split('::')
if field=='default': field = None
t[0] = NearNode(self.phrase_elements.getElements(), field)
self.phrase_elements.clear()
def p_phrase_prefix(self, t):
"""expr : PHRASE_PREFIX OPENP qterm CLOSEP """
field,op = t[1].split('::')
if field=='default': field = None
t[0] = PhraseNode(self.phrase_elements.getElements(), field)
self.phrase_elements.clear()
def p_and_prefix(self, t):
"""expr : AND_PREFIX OPENP terms CLOSEP """
field,op = t[1].split('::')
if field=='default': field = None
t[0] = AndNode(self.elements.getElements(), field)
self.elements.clear()
def p_or_prefix(self, t):
"""expr : OR_PREFIX OPENP terms CLOSEP """
field,op = t[1].split('::')
if field=='default': field = None
t[0] = OrNode(self.elements.getElements(), field)
self.elements.clear()
def p_expr_and(self, t):
"""expr : expr AND expr """
t[0] = AndNode( (t[1], t[3]) )
def p_expr_or(self, t):
"""expr : expr OR expr """
t[0] = OrNode( (t[1], t[3]) )
def p_expr_near(self, t):
"""expr : expr NEAR expr """
t[0] = NearNode( (t[1], t[3]) )
def p_expr_noop(self, t):
"""expr : expr expr"""
t[0] = AndNode( (t[1], t[2]))
def p_expr_expr_factor(self, t):
"""expr : factor """
t[0] = t[1]
def p_factor_string(self, t):
"""factor : string"""
t[0] = t[1]
def p_factor_quote(self, t):
"""factor : quotestart qterm quoteend """
t[0] = PhraseNode(self.phrase_elements.getElements(), self.phrase_elements.getField())
self.phrase_elements.clear()
def p_qterm_1(self, t):
""" qterm : string qterm"""
self.phrase_elements.addElement(t[1])
t[0] = [t[1], t[2]]
def p_qterm_2(self, t):
""" qterm : string"""
self.phrase_elements.addElement(t[1])
t[0] = t[1]
def p_terms(self, t):
""" terms : string terms"""
self.elements.addElement(t[1])
t[0] = [t[1], t[2]]
def p_terms_1(self, t):
""" terms : string"""
self.elements.addElement(t[1])
t[0] = t[1]
def p_quotestart_1(self, t):
""" quotestart : QUOTE """
self.phrase_elements.clear()
def p_quotestart_with_field(self, t):
""" quotestart : phrasefield COLON QUOTE """
self.phrase_elements.clear()
def p_phrasefield(self, t):
""" phrasefield : STRING """
self.phrase_elements.setField(t[1])
def p_quoteend_1(self, t):
""" quoteend : QUOTE """
def p_string(self, t):
"""string : STRING
| AND
| OR
| NEAR
| NOT"""
v = t[1].strip()
if range_regex.match(v): t[0] = RangeNode(tuple(v.split('..')) )
elif str_regex.match(v):
if '::' in v:
field, value= v.split('::')
t[0] = WordNode(*(value, field))
else:
t[0] = WordNode(v)
elif sim_regex.match(v): t[0] = SimNode(v[1:] )
elif sub_regex.match(v): t[0] = SubstringNode(v[1:-1] )
elif rt_regex.match(v): t[0] = TruncNode(v[:-1] )
elif lt_regex.match(v): t[0] = LTruncNode(v[1:] )
else:
if not (v.lower().strip() in ('and', 'or', 'not', 'near')):
t[0] = GlobNode(t[1])
def t_STRING(self, t):
r'[^()\s"]+'
return t
def p_error(self, t):
raise QueryParserError("Syntax error at '%s'" % t.value)
EnglishQueryParser = EnglishParser('en')
if __name__== '__main__':
import sys
for query in (
'title::phrase(the zope book) and author::and(michel pelletier)',
'title::phrase(the zope book) and author::and(michel pelletier)',
'a b',
'c++ Algol68',
'field:value',
'NEAR(a b c)',
'NEAR(a b c*)',
'phrase(the quick brown fox)',
'phrase(foo bar sucks)',
'phrase(foo bar sucks) or OR(foo bar)',
'"a and b" ',
'(a b)',
'somefield::AND(a b)',
):
print '-'*80
print '>>',query
print EnglishQueryParser.lexer(query)
print EnglishQueryParser.parse(query)
if len(sys.argv) > 1:
print '-'*80
print '>>',sys.argv[1]
print EnglishQueryParser.lexer(sys.argv[1])
print EnglishQueryParser.parse(sys.argv[1]) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/parsers/english.py | english.py |
__version__ = "2.3"
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
import re, types, sys, cStringIO, os.path
try:
import hashlib
have_hashlib = True
except ImportError:
have_hashlib = False
import md5
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Available instance types. This is used when parsers are defined by a class.
# it's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol(object):
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.pbstack = []
self.stack = stack
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def pushback(self,n):
if n <= 0:
raise ValueError, "Expected a positive value"
if n > (len(self.slice)-1):
raise ValueError, "Can't push %d tokens. Only %d are available." % (n,len(self.slice)-1)
for i in range(0,n):
self.pbstack.append(self.slice[-i-1])
# The LR Parsing engine. This is defined as a class so that multiple parsers
# can exist in the same process. A user never instantiates this directly.
# Instead, the global yacc() function should be used to create a suitable Parser
# object.
class Parser:
def __init__(self,magic=None):
# This is a hack to keep users from trying to instantiate a Parser
# object directly.
if magic != "xyzzy":
raise YaccError, "Can't instantiate Parser. Use yacc() instead."
# Reset internal state
self.productions = None # List of productions
self.errorfunc = None # Error handling function
self.action = { } # LR Action table
self.goto = { } # LR goto table
self.require = { } # Attribute require table
self.method = "Unknown LR" # Table construction method used
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table
goto = self.goto # Local reference to goto table
prod = self.productions # Local reference to production list
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
import lex
lexer = lex.lexer
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input:
lexer.input(input)
# Tokenize function
get_token = lexer.token
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if debug > 1:
print 'state', state
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
if debug:
errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if debug > 1:
print 'action', t
if t is not None:
if t > 0:
# shift a symbol on the stack
if ltype == '$end':
# Error, end of input
sys.stderr.write("yacc: Parse error. EOF\n")
return
statestack.append(t)
state = t
if debug > 1:
sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if debug > 1:
sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
del symstack[-plen:]
del statestack[-plen:]
else:
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
targ = [ sym ]
pslice.slice = targ
# Call the grammar rule with our special slice object
p.func(pslice)
# If there was a pushback, put that on the stack
if pslice.pbstack:
lookaheadstack.append(lookahead)
for _t in pslice.pbstack:
lookaheadstack.append(_t)
lookahead = None
pslice.pbstack = []
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
continue
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
if debug:
sys.stderr.write(errorlead + "\n")
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
continue
# Call an error function here
raise RuntimeError, "yacc: internal parser error!!!\n"
# -----------------------------------------------------------------------------
# === Parser Construction ===
#
# The following functions and variables are used to implement the yacc() function
# itself. This is pretty hairy stuff involving lots of error checking,
# construction of LR items, kernels, and so forth. Although a lot of
# this work is done using global variables, the resulting Parser object
# is completely self contained--meaning that it is safe to repeatedly
# call yacc() with different grammars in the same application.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# validate_file()
#
# This function checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_file(filename):
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
noerror = 0
linen += 1
return noerror
# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
def validate_dict(d):
for n,v in d.items():
if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_':
sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
except StandardError:
pass
# -----------------------------------------------------------------------------
# === GRAMMAR FUNCTIONS ===
#
# The following global variables and functions are used to store, manipulate,
# and verify the grammar rules specified by the user.
# -----------------------------------------------------------------------------
# Initialize all of the global variables used during grammar construction
def initialize_vars():
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
First = { } # A dictionary of precomputed FIRST(x) symbols
Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
LRitems = [ ] # A list of all LR items for the grammar. These are the
# productions with the "dot" like E -> E . PLUS E
Errorfunc = None # User defined error handler
# Digital signature of the grammar rules, precedence
# and other information. Used to determined when a
# parsing table needs to be regenerated.
if have_hashlib:
Signature = hashlib.md5()
else:
Signature = md5.new()
Requires = { } # Requires list
# File objects used when creating the parser.out debugging file
global _vf, _vfc
_vf = cStringIO.StringIO()
_vfc = cStringIO.StringIO()
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# It has a few required attributes:
#
# name - Name of the production (nonterminal)
# prod - A list of symbols making up its production
# number - Production number.
#
# In addition, a few additional attributes are used to help with debugging or
# optimization of table generation.
#
# file - File where production action is defined.
# lineno - Line number where action is defined
# func - Action function
# prec - Precedence level
# lr_next - Next LR item. Example, if we are ' E -> E . PLUS E'
# then lr_next refers to 'E -> E PLUS . E'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# -----------------------------------------------------------------------------
class Production:
def __init__(self,**kw):
for k,v in kw.items():
setattr(self,k,v)
self.lr_index = -1
self.lr0_added = 0 # Flag indicating whether or not added to LR0 closure
self.lr1_added = 0 # Flag indicating whether or not added to LR1
self.usyms = [ ]
self.lookaheads = { }
self.lk_added = { }
self.setnumbers = [ ]
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return str(self)
# Compute lr_items from the production
def lr_item(self,n):
if n > len(self.prod): return None
p = Production()
p.name = self.name
p.prod = list(self.prod)
p.number = self.number
p.lr_index = n
p.lookaheads = { }
p.setnumbers = self.setnumbers
p.prod.insert(n,".")
p.prod = tuple(p.prod)
p.len = len(p.prod)
p.usyms = self.usyms
# Precompute list of productions immediately following
try:
p.lrafter = Prodnames[p.prod[n+1]]
except (IndexError,KeyError),e:
p.lrafter = []
try:
p.lrbefore = p.prod[n-1]
except IndexError:
p.lrbefore = None
return p
class MiniProduction:
pass
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule.
# The production rule is assumed to be found in the function's docstring.
# This rule has the general syntax:
#
# name1 ::= production1
# | production2
# | production3
# ...
# | productionn
# name2 ::= production1
# | production2
# ...
# -----------------------------------------------------------------------------
def add_production(f,file,line,prodname,syms):
if Terminals.has_key(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
return -1
if prodname == 'error':
sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
return -1
if not _is_identifier.match(prodname):
sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
return -1
for x in range(len(syms)):
s = syms[x]
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
return -1
if not Terminals.has_key(c):
Terminals[c] = []
syms[x] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
return -1
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if Prodmap.has_key(map):
m = Prodmap[map]
sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
return -1
p = Production()
p.name = prodname
p.prod = syms
p.file = file
p.line = line
p.func = f
p.number = len(Productions)
Productions.append(p)
Prodmap[map] = p
if not Nonterminals.has_key(prodname):
Nonterminals[prodname] = [ ]
# Add all terminals to Terminals
i = 0
while i < len(p.prod):
t = p.prod[i]
if t == '%prec':
try:
precname = p.prod[i+1]
except IndexError:
sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
return -1
prec = Precedence.get(precname,None)
if not prec:
sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
return -1
else:
p.prec = prec
del p.prod[i]
del p.prod[i]
continue
if Terminals.has_key(t):
Terminals[t].append(p.number)
# Is a terminal. We'll assign a precedence to p based on this
if not hasattr(p,"prec"):
p.prec = Precedence.get(t,('right',0))
else:
if not Nonterminals.has_key(t):
Nonterminals[t] = [ ]
Nonterminals[t].append(p.number)
i += 1
if not hasattr(p,"prec"):
p.prec = ('right',0)
# Set final length of productions
p.len = len(p.prod)
p.prod = tuple(p.prod)
# Calculate unique syms in the production
p.usyms = [ ]
for s in p.prod:
if s not in p.usyms:
p.usyms.append(s)
# Add to the global productions list
try:
Prodnames[p.name].append(p)
except KeyError:
Prodnames[p.name] = [ p ]
return 0
# Given a raw rule function, this function rips out its doc string
# and adds rules to the grammar
def add_function(f):
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
error = 0
if isinstance(f,types.MethodType):
reqdargs = 2
else:
reqdargs = 1
if f.func_code.co_argcount > reqdargs:
sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
return -1
if f.func_code.co_argcount < reqdargs:
sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
return -1
if f.__doc__:
# Split the doc string into lines
pstrings = f.__doc__.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
return -1
prodname = lastp
if len(p) > 1:
syms = p[1:]
else:
syms = [ ]
else:
prodname = p[0]
lastp = prodname
assign = p[1]
if len(p) > 2:
syms = p[2:]
else:
syms = [ ]
if assign != ':' and assign != '::=':
sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
return -1
e = add_production(f,file,dline,prodname,syms)
error += e
except StandardError:
sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
error -= 1
else:
sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
return error
# Cycle checking code (Michael Dyck)
def compute_reachable():
'''
Find each symbol that can be reached from the start symbol.
Print a warning for any nonterminals that can't be reached.
(Unused terminals have already had their warning.)
'''
Reachable = { }
for s in Terminals.keys() + Nonterminals.keys():
Reachable[s] = 0
mark_reachable_from( Productions[0].prod[0], Reachable )
for s in Nonterminals.keys():
if not Reachable[s]:
sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
def mark_reachable_from(s, Reachable):
'''
Mark all symbols that are reachable from symbol s.
'''
if Reachable[s]:
# We've already reached symbol s.
return
Reachable[s] = 1
for p in Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r, Reachable)
# -----------------------------------------------------------------------------
# compute_terminates()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def compute_terminates():
'''
Raise an error for any symbols that don't terminate.
'''
Terminates = {}
# Terminals:
for t in Terminals.keys():
Terminates[t] = 1
Terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in Nonterminals.keys():
Terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not Terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not Terminates[n]:
Terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
some_error = 0
for (s,terminates) in Terminates.items():
if not terminates:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
some_error = 1
return some_error
# -----------------------------------------------------------------------------
# verify_productions()
#
# This function examines all of the supplied rules to see if they seem valid.
# -----------------------------------------------------------------------------
def verify_productions(cycle_check=1):
error = 0
for p in Productions:
if not p: continue
for s in p.prod:
if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
error = 1
continue
unused_tok = 0
# Now verify all of the tokens
if yaccdebug:
_vf.write("Unused terminals:\n\n")
for s,v in Terminals.items():
if s != 'error' and not v:
sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
if yaccdebug: _vf.write(" %s\n"% s)
unused_tok += 1
# Print out all of the productions
if yaccdebug:
_vf.write("\nGrammar\n\n")
for i in range(1,len(Productions)):
_vf.write("Rule %-5d %s\n" % (i, Productions[i]))
unused_prod = 0
# Verify the use of all productions
for s,v in Nonterminals.items():
if not v:
p = Prodnames[s][0]
sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
unused_prod += 1
if unused_tok == 1:
sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
if unused_tok > 1:
sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
if unused_prod == 1:
sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
if unused_prod > 1:
sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
if yaccdebug:
_vf.write("\nTerminals, with rules where they appear\n\n")
ks = Terminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
_vf.write("\nNonterminals, with rules where they appear\n\n")
ks = Nonterminals.keys()
ks.sort()
for k in ks:
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
if (cycle_check):
compute_reachable()
error += compute_terminates()
# error += check_cycles()
return error
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems():
for p in Productions:
lastlri = p
lri = p.lr_item(0)
i = 0
while 1:
lri = p.lr_item(i)
lastlri.lr_next = lri
if not lri: break
lri.lr_num = len(LRitems)
LRitems.append(lri)
lastlri = lri
i += 1
# In order for the rest of the parser generator to work, we need to
# guarantee that no more lritems are generated. Therefore, we nuke
# the p.lr_item method. (Only used in debugging)
# Production.lr_item = None
# -----------------------------------------------------------------------------
# add_precedence()
#
# Given a list of precedence rules, add to the precedence table.
# -----------------------------------------------------------------------------
def add_precedence(plist):
plevel = 0
error = 0
for p in plist:
plevel += 1
try:
prec = p[0]
terms = p[1:]
if prec != 'left' and prec != 'right' and prec != 'nonassoc':
sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
return -1
for t in terms:
if Precedence.has_key(t):
sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
error += 1
continue
Precedence[t] = (prec,plevel)
except:
sys.stderr.write("yacc: Invalid precedence table.\n")
error += 1
return error
# -----------------------------------------------------------------------------
# augment_grammar()
#
# Compute the augmented grammar. This is just a rule S' -> start where start
# is the starting symbol.
# -----------------------------------------------------------------------------
def augment_grammar(start=None):
if not start:
start = Productions[1].name
Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
Productions[0].usyms = [ start ]
Nonterminals[start].append(0)
# -------------------------------------------------------------------------
# first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def first(beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# FOLLOW(x)
# Given a non-terminal. This function computes the set of all symbols
# that might follow it. Dragon book, p. 189.
def compute_follow(start=None):
# Add '$end' to the follow list of the start symbol
for k in Nonterminals.keys():
Follow[k] = [ ]
if not start:
start = Productions[1].name
Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if Nonterminals.has_key(B):
# Okay. We got a non-terminal in a production
fst = first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in Follow[p.name]:
if f not in Follow[B]:
Follow[B].append(f)
didadd = 1
if not didadd: break
if 0 and yaccdebug:
_vf.write('\nFollow:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
# -------------------------------------------------------------------------
# compute_first1()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first1():
# Terminals:
for t in Terminals.keys():
First[t] = [t]
First['$end'] = ['$end']
First['#'] = ['#'] # what's this for?
# Nonterminals:
# Initialize to the empty set:
for n in Nonterminals.keys():
First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in Nonterminals.keys():
for p in Prodnames[n]:
for f in first(p.prod):
if f not in First[n]:
First[n].append( f )
some_change = 1
if not some_change:
break
if 0 and yaccdebug:
_vf.write('\nFirst:\n')
for k in Nonterminals.keys():
_vf.write("%-20s : %s\n" %
(k, " ".join([str(s) for s in First[k]])))
# -----------------------------------------------------------------------------
# === SLR Generation ===
#
# The following functions are used to construct SLR (Simple LR) parsing tables
# as described on p.221-229 of the dragon book.
# -----------------------------------------------------------------------------
# Global variables for the LR parsing engine
def lr_init_vars():
global _lr_action, _lr_goto, _lr_method
global _lr_goto_cache, _lr0_cidhash
_lr_action = { } # Action table
_lr_goto = { } # Goto table
_lr_method = "Unknown" # LR method used
_lr_goto_cache = { }
_lr0_cidhash = { }
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
# prodlist is a list of productions.
_add_count = 0 # Counter used to detect cycles
def lr0_closure(I):
global _add_count
_add_count += 1
prodlist = Productions
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lrafter:
if x.lr0_added == _add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = _add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(I,x):
# First we look for a previously cached entry
g = _lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = _lr_goto_cache.get(x,None)
if not s:
s = { }
_lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lrbefore == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
_lr_goto_cache[(id(I),x)] = g
return g
_lr0_cidhash = { }
# Compute the LR(0) sets of item function
def lr0_items():
C = [ lr0_closure([Productions[0].lr_next]) ]
i = 0
for I in C:
_lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms.keys():
g = lr0_goto(I,x)
if not g: continue
if _lr0_cidhash.has_key(id(g)): continue
_lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# Note: This implementation is a complete replacement of the LALR(1)
# implementation in PLY-1.x releases. That version was based on
# a less efficient algorithm and it had bugs in its implementation.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals():
nullable = {}
num_nullable = 0
while 1:
for p in Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not nullable.has_key(t): break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if Nonterminals.has_key(t[1]):
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if Terminals.has_key(a):
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = lr0_goto(C[state],N)
j = _lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if empty.has_key(a):
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if dtrans.has_key((j,t)):
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if Terminals.has_key(p.prod[li]): break # No forget it
if not nullable.has_key(p.prod[li]): break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = lr0_goto(C[j],t) # Go to next set
j = _lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not includedict.has_key(i): includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = sys.maxint
F[stack[-1]] = F[x]
element = stack.pop()
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(C, ntrans, nullable):
FP = lambda x: dr_relation(C,x,nullable)
R = lambda x: reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not p.lookaheads.has_key(state):
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(C):
# Determine all of the nullable nonterminals
nullable = compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = find_nonterminal_transitions(C)
# Compute read sets
readsets = compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(method):
global _lr_method
goto = _lr_goto # Goto array
action = _lr_action # Action array
actionp = { } # Action production array (temporary)
_lr_method = method
n_srconflict = 0
n_rrconflict = 0
if yaccdebug:
sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
_vf.write("\n\nParsing method: %s\n\n" % method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = lr0_items()
if method == 'LALR':
add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
if yaccdebug:
_vf.write("\nstate %d\n\n" % st)
for p in I:
_vf.write(" (%d) %s\n" % (p.number, str(p)))
_vf.write("\n")
for p in I:
try:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
n_srconflict += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
n_srconflict +=1
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
# sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
n_rrconflict += 1
_vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
_vf.write(" ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if Terminals.has_key(a):
g = lr0_goto(I,a)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
sys.stderr.write("Shift/shift conflict in state %d\n" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec != 'left')):
# We decide to shift here... highest precedence to shift
st_action[a] = j
st_actionp[a] = p
if not rlevel:
n_srconflict += 1
_vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as shift.\n" % a)
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
n_srconflict +=1
_vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
_vf.write(" ! shift/reduce conflict for %s resolved as reduce.\n" % a)
else:
sys.stderr.write("Unknown conflict in state %d\n" % st)
else:
st_action[a] = j
st_actionp[a] = p
except StandardError,e:
print sys.exc_info()
raise YaccError, "Hosed in lr_parse_table"
# Print the actions associated with each terminal
if yaccdebug:
_actprint = { }
for a,p,m in actlist:
if st_action.has_key(a):
if p is st_actionp[a]:
_vf.write(" %-15s %s\n" % (a,m))
_actprint[(a,m)] = 1
_vf.write("\n")
for a,p,m in actlist:
if st_action.has_key(a):
if p is not st_actionp[a]:
if not _actprint.has_key((a,m)):
_vf.write(" ! %-15s [ %s ]\n" % (a,m))
_actprint[(a,m)] = 1
# Construct the goto table for this state
if yaccdebug:
_vf.write("\n")
nkeys = { }
for ii in I:
for s in ii.usyms:
if Nonterminals.has_key(s):
nkeys[s] = None
for n in nkeys.keys():
g = lr0_goto(I,n)
j = _lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
if yaccdebug:
_vf.write(" %-30s shift and go to state %d\n" % (n,j))
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
if yaccdebug:
if n_srconflict == 1:
sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
if n_srconflict > 1:
sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
if n_rrconflict == 1:
sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
if n_rrconflict > 1:
sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
# -----------------------------------------------------------------------------
# ==== LR Utility functions ====
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _lr_write_tables()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def lr_write_tables(modulename=tab_module,outputdir=''):
filename = os.path.join(outputdir,modulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_lr_method = %s
_lr_signature = %s
""" % (filename, repr(_lr_method), repr(Signature.digest())))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in _lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_action.has_key(_x): _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in _lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in _lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in _lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in Productions:
if p:
if (p.func):
f.write(" (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
else:
f.write(" (%r,%d,None,None,None),\n" % (p.name, p.len))
else:
f.write(" None,\n")
f.write("]\n")
f.close()
except IOError,e:
print >>sys.stderr, "Unable to create '%s'" % filename
print >>sys.stderr, e
return
def lr_read_tables(module=tab_module,optimize=0):
global _lr_action, _lr_goto, _lr_productions, _lr_method
try:
exec "import %s as parsetab" % module
if (optimize) or (Signature.digest() == parsetab._lr_signature):
_lr_action = parsetab._lr_action
_lr_goto = parsetab._lr_goto
_lr_productions = parsetab._lr_productions
_lr_method = parsetab._lr_method
return 1
else:
return 0
except (ImportError,AttributeError):
return 0
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build the parser module
# -----------------------------------------------------------------------------
def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
global yaccdebug
yaccdebug = debug
initialize_vars()
files = { }
error = 0
# Add parsing method to signature
Signature.update(method)
# If a "module" parameter was supplied, extract its dictionary.
# Note: a module may in fact be an instance as well.
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for i in _items:
ldict[i[0]] = i[1]
else:
raise ValueError,"Expected a module"
else:
# No module given. We might be able to get information from the caller.
# Throw an exception and unwind the traceback to get the globals
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
# Add starting symbol to signature
if not start:
start = ldict.get("start",None)
if start:
Signature.update(start)
# If running in optimized mode. We're going to
if (optimize and lr_read_tables(tabmodule,1)):
# Read parse table
del Productions[:]
for p in _lr_productions:
if not p:
Productions.append(None)
else:
m = MiniProduction()
m.name = p[0]
m.len = p[1]
m.file = p[3]
m.line = p[4]
if p[2]:
m.func = ldict[p[2]]
Productions.append(m)
else:
# Get the tokens map
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
else:
tokens = ldict.get("tokens",None)
if not tokens:
raise YaccError,"module does not define a list 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise YaccError,"tokens must be a list or tuple."
# Check to see if a requires dictionary is defined.
requires = ldict.get("require",None)
if requires:
if not (isinstance(requires,types.DictType)):
raise YaccError,"require must be a dictionary."
for r,v in requires.items():
try:
if not (isinstance(v,types.ListType)):
raise TypeError
v1 = [x.split(".") for x in v]
Requires[r] = v1
except StandardError:
print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
# Build the dictionary of terminals. We a record a 0 in the
# dictionary to track whether or not a terminal is actually
# used in the grammar
if 'error' in tokens:
print >>sys.stderr, "yacc: Illegal token 'error'. Is a reserved word."
raise YaccError,"Illegal token name"
for n in tokens:
if Terminals.has_key(n):
print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
Terminals[n] = [ ]
Terminals['error'] = [ ]
# Get the precedence map (if any)
prec = ldict.get("precedence",None)
if prec:
if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
raise YaccError,"precedence must be a list or tuple."
add_precedence(prec)
Signature.update(repr(prec))
for n in tokens:
if not Precedence.has_key(n):
Precedence[n] = ('right',0) # Default, right associative, 0 precedence
# Look for error handler
ef = ldict.get('p_error',None)
if ef:
if isinstance(ef,types.FunctionType):
ismethod = 0
elif isinstance(ef, types.MethodType):
ismethod = 1
else:
raise YaccError,"'p_error' defined, but is not a function or method."
eline = ef.func_code.co_firstlineno
efile = ef.func_code.co_filename
files[efile] = None
if (ef.func_code.co_argcount != 1+ismethod):
raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
global Errorfunc
Errorfunc = ef
else:
print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
# Get the list of built-in functions with p_ prefix
symbols = [ldict[f] for f in ldict.keys()
if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
and ldict[f].__name__ != 'p_error')]
# Check for non-empty symbols
if len(symbols) == 0:
raise YaccError,"no rules of the form p_rulename are defined."
# Sort the symbols by line number
symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
# Add all of the symbols to the grammar
for f in symbols:
if (add_function(f)) < 0:
error += 1
else:
files[f.func_code.co_filename] = None
# Make a signature of the docstrings
for f in symbols:
if f.__doc__:
Signature.update(f.__doc__)
lr_init_vars()
if error:
raise YaccError,"Unable to construct parser."
if not lr_read_tables(tabmodule):
# Validate files
for filename in files.keys():
if not validate_file(filename):
error = 1
# Validate dictionary
validate_dict(ldict)
if start and not Prodnames.has_key(start):
raise YaccError,"Bad starting symbol '%s'" % start
augment_grammar(start)
error = verify_productions(cycle_check=check_recursion)
otherfunc = [ldict[f] for f in ldict.keys()
if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
if error:
raise YaccError,"Unable to construct parser."
build_lritems()
compute_first1()
compute_follow(start)
if method in ['SLR','LALR']:
lr_parse_table(method)
else:
raise YaccError, "Unknown parsing method '%s'" % method
if write_tables:
lr_write_tables(tabmodule,outputdir)
if yaccdebug:
try:
f = open(os.path.join(outputdir,debugfile),"w")
f.write(_vfc.getvalue())
f.write("\n\n")
f.write(_vf.getvalue())
f.close()
except IOError,e:
print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
# Made it here. Create a parser object and set up its internal state.
# Set global parse() method to bound method of parser object.
p = Parser("xyzzy")
p.productions = Productions
p.errorfunc = Errorfunc
p.action = _lr_action
p.goto = _lr_goto
p.method = _lr_method
p.require = Requires
global parse
parse = p.parse
global parser
parser = p
# Clean up all of the globals we created
if (not optimize):
yacc_cleanup()
return p
# yacc_cleanup function. Delete all of the global variables
# used during table construction
def yacc_cleanup():
global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
global Productions, Prodnames, Prodmap, Terminals
global Nonterminals, First, Follow, Precedence, LRitems
global Errorfunc, Signature, Requires
del Productions, Prodnames, Prodmap, Terminals
del Nonterminals, First, Follow, Precedence, LRitems
del Errorfunc, Signature, Requires
global _vf, _vfc
del _vf, _vfc
# Stub that raises an error if parsing is attempted without first calling yacc()
def parse(*args,**kwargs):
raise YaccError, "yacc: No parser built with yacc()" | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/parsers/yacc.py | yacc.py |
__version__ = "2.3"
import re, sys, types
# Regular expression used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Available instance types. This is used when lexers are defined by a class.
# It's a little funky because I want to preserve backwards compatibility
# with Python 2.0 where types.ObjectType is undefined.
try:
_INSTANCETYPE = (types.InstanceType, types.ObjectType)
except AttributeError:
_INSTANCETYPE = types.InstanceType
class object: pass # Note: needed if no new-style classes present
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
def skip(self,n):
self.lexer.skip(n)
# -----------------------------------------------------------------------------
# Lexer class
#
# This class encapsulates all of the methods and data associated with a lexer.
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = Lexer()
c.lexstatere = self.lexstatere
c.lexstateinfo = self.lexstateinfo
c.lexstateretext = self.lexstateretext
c.lexstate = self.lexstate
c.lexstatestack = self.lexstatestack
c.lexstateignore = self.lexstateignore
c.lexstateerrorf = self.lexstateerrorf
c.lexreflags = self.lexreflags
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lextokens = self.lextokens
c.lexdebug = self.lexdebug
c.lineno = self.lineno
c.lexoptimize = self.lexoptimize
c.lexliterals = self.lexliterals
c.lexmodule = self.lexmodule
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
# Set up other attributes
c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile):
tf = open(tabfile+".py","w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
exec "import %s as lextab" % tabfile
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
raise ValueError, "Expected a string"
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not self.lexstatere.has_key(state):
raise ValueError, "Undefined state"
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Set last match in lexer so that rules can access it if they want
self.lexmatch = m
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
lexpos = m.end()
i = m.lastindex
func,tok.type = lexindexfunc[i]
self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
if tok.type: return tok
break
# if func not callable, it means it's an ignored token
if not callable(func):
break
# If token is processed by a function, call it
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not self.lextokens.has_key(newtok.type):
raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.func_code.co_filename, func.func_code.co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError, "No input string given with input()"
return None
# -----------------------------------------------------------------------------
# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def _validate_file(filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist):
result = []
for f in funclist:
if f and f[0]:
result.append((f[0].__name__,f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[handle.__name__])
elif handle is not None:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex]
except Exception,e:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not names.has_key(parts[i]) and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names.keys())
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
error = 0
files = { }
lexobj = Lexer()
lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token,input
if nowarn: warn = 0
else: warn = 1
if object: module = object
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = { }
for (i,v) in _items:
ldict[i] = v
else:
raise ValueError,"Expected a module or instance"
lexobj.lexmodule = module
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Get the tokens, states, and literals variables (if any)
if (module and isinstance(module,_INSTANCETYPE)):
tokens = getattr(module,"tokens",None)
states = getattr(module,"states",None)
literals = getattr(module,"literals","")
else:
tokens = ldict.get("tokens",None)
states = ldict.get("states",None)
literals = ldict.get("literals","")
if not tokens:
raise SyntaxError,"lex: module does not define 'tokens'"
if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
raise SyntaxError,"lex: tokens must be a list or tuple."
# Build a dictionary of valid token names
lexobj.lextokens = { }
if not optimize:
for n in tokens:
if not _is_identifier.match(n):
print >>sys.stderr, "lex: Bad token name '%s'" % n
error = 1
if warn and lexobj.lextokens.has_key(n):
print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
lexobj.lextokens[n] = None
else:
for n in tokens: lexobj.lextokens[n] = None
if debug:
print "lex: tokens = '%s'" % lexobj.lextokens.keys()
try:
for c in literals:
if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
error = 1
continue
except TypeError:
print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
error = 1
lexobj.lexliterals = literals
# Build statemap
if states:
if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
print >>sys.stderr, "lex: states must be defined as a tuple or list."
error = 1
else:
for s in states:
if not isinstance(s,types.TupleType) or len(s) != 2:
print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
error = 1
continue
name, statetype = s
if not isinstance(name,types.StringType):
print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
error = 1
continue
if stateinfo.has_key(name):
print >>sys.stderr, "lex: state '%s' already defined." % name
error = 1
continue
stateinfo[name] = statetype
# Get a list of symbols with the t_ or s_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
funcsym = { } # Symbols defined as functions
strsym = { } # Symbols defined as strings
toknames = { } # Mapping of symbols to token names
for s in stateinfo.keys():
funcsym[s] = []
strsym[s] = []
ignore = { } # Ignore strings by state
errorf = { } # Error functions by state
if len(tsymbols) == 0:
raise SyntaxError,"lex: no rules of the form t_rulename are defined."
for f in tsymbols:
t = ldict[f]
states, tokname = _statetoken(f,stateinfo)
toknames[f] = tokname
if callable(t):
for s in states: funcsym[s].append((f,t))
elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
for s in states: strsym[s].append((f,t))
else:
print >>sys.stderr, "lex: %s not defined as a function or string" % f
error = 1
# Sort the functions by line number
for f in funcsym.values():
f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
# Sort the strings by regular expression length
for s in strsym.values():
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
regexs = { }
# Build the master regular expressions
for state in stateinfo.keys():
regex_list = []
# Add rules defined by functions first
for fname, f in funcsym[state]:
line = f.func_code.co_firstlineno
file = f.func_code.co_filename
files[file] = None
tokname = toknames[fname]
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.func_code.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
error = 1
continue
if nargs < reqargs:
print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
error = 1
continue
if tokname == 'ignore':
print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
error = 1
continue
if tokname == 'error':
errorf[state] = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
if c.match(""):
print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
error = 1
continue
except re.error,e:
print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
if '#' in f.__doc__:
print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
else:
print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
# Now add all of the simple rules
for name,r in strsym[state]:
tokname = toknames[name]
if tokname == 'ignore':
if "\\" in r:
print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
ignore[state] = r
continue
if not optimize:
if tokname == 'error':
raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
error = 1
continue
if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
if (c.match("")):
print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
error = 1
continue
except re.error,e:
print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
if '#' in r:
print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
error = 1
continue
if debug:
print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
regex_list.append("(?P<%s>%s)" % (name,r))
if not regex_list:
print >>sys.stderr, "lex: No rules defined for state '%s'" % state
error = 1
regexs[state] = regex_list
if not optimize:
for f in files.keys():
if not _validate_file(f):
error = 1
if error:
raise SyntaxError,"lex: Unable to build lexer."
# From this point forward, we're reasonably confident that we can build the lexer.
# No more errors will be generated, but there might be some warning messages.
# Build the master regular expressions
for state in regexs.keys():
lexre, re_text = _form_master_re(regexs[state],reflags,ldict,toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
if debug:
for i in range(len(re_text)):
print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
# For inclusive states, we need to add the INITIAL state
for state,type in stateinfo.items():
if state != "INITIAL" and type == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = errorf
lexobj.lexerrorf = errorf.get("INITIAL",None)
if warn and not lexobj.lexerrorf:
print >>sys.stderr, "lex: Warning. no t_error rule is defined."
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if warn and not errorf.has_key(s):
print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
if warn and not ignore.has_key(s) and lexobj.lexignore:
print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
elif stype == 'inclusive':
if not errorf.has_key(s):
errorf[s] = errorf.get("INITIAL",None)
if not ignore.has_key(s):
ignore[s] = ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print "Reading from standard input (type EOF to end):"
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/parsers/lex.py | lex.py |
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.4.5"
__versionTime__ = "16 December 2006 07:20"
__author__ = "Paul McGuire <[email protected]>"
import string
import copy,sys
import warnings
import re
import sre_constants
import xml.sax.saxutils
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError, e:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
#~ return set( [c for c in strg] )
class _Constants(object):
pass
alphas = string.lowercase + string.uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
__slots__ = ( "loc","msg","pstr","parserElement" )
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc, msg, elem=None ):
self.loc = loc
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError, aname
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % ( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column], markerString, line_str[line_column:]])
return line_str.strip()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class"""
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ReparseException(ParseBaseException):
def __init_( self, newstring, restartLoc ):
self.newParseText = newstring
self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
# this line is related to debugging the asXML bug
#~ asList = False
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = (toklist.copy(),-1)
else:
self[name] = (ParseResults(toklist[0]),-1)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,tuple):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = self
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
del self.__toklist[i]
else:
del self._tokdict[i]
def __contains__( self, k ):
return self.__tokdict.has_key(k)
def __len__( self ): return len( self.__toklist )
def __nonzero__( self ): return len( self.__toklist ) > 0
def __iter__( self ): return iter( self.__toklist )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict.keys()]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if self.__tokdict.has_key( name ):
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k,(v[0],addoffset(v[1])) ) for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = self
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i], namedItemsOnly and doctag is None, nextLevelIndent,formatted)]
else:
out += [ res.asXML(None, namedItemsOnly and doctag is None, nextLevelIndent,formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = xml.sax.saxutils.escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">", xmlBodyText, "</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+str(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(str(v))
else:
out.append(str(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
self.__parent, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print "Match",expr,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print "Matched",expr,"->",toks.asList()
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print "Exception raised:", exc
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True
self.errmsg = ""
self.modalResults = True
self.debugActions = ( None, None, None )
self.re = None
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if f.func_code.co_flags & STAR_ARGS:
return f
numargs = f.func_code.co_argcount
if hasattr(f,"im_self"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if f.__call__.im_func.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.im_func.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
except AttributeError:
# not a bound method, get info directly from __call__ method
if f.__call__.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
#~ print "adding function %s with %d args" % (f.func_name,numargs)
if numargs == 3:
return f
else:
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
return tmp
normalizeParseActionArgs = staticmethod(normalizeParseActionArgs)
def setParseAction( self, *fns ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value."""
self.parseAction = map(self.normalizeParseActionArgs, list(fns))
return self
def addParseAction( self, *fns ):
"""Add parse action to expression's list of parse actions. See setParseAction_."""
self.parseAction += map(self.normalizeParseActionArgs, list(fns))
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self.skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print "Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
#~ except ReparseException, retryEx:
#~ pass
except ParseException, err:
#~ print "Exception raised:", err
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and doActions:
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print "Matched",self,"->",retTokens.asList()
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
return self._parse( instring, loc, doActions=False )[0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
if doActions and self.parseAction:
return self._parseNoCache( instring, loc, doActions, callPreParse )
lookup = (self,instring,loc,callPreParse)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
if isinstance(value,ParseBaseException):
value.loc = loc
raise value
return value
else:
try:
ParserElement._exprArgCache[ lookup ] = \
value = self._parseNoCache( instring, loc, doActions, callPreParse )
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if self.keepTabs:
loc, tokens = self._parse( instring, 0 )
else:
loc, tokens = self._parse( instring.expandtabs(), 0 )
return tokens
def scanString( self, instring, maxMatches=sys.maxint ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(out)
def searchString( self, instring, maxMatches=sys.maxint ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of += operator"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return other + self
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of |= operator"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^= operator"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of right-& operator"""
if isinstance( other, basestring ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot add element of type %s to ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents)
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0 ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = sys.maxint
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
self.mayIndexError = False
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d.keys():
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = sys.maxint
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = sys.maxint
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class PositionToken(Token):
def __init__( self ):
super(PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self.skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected start of line"
self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected end of line"
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, basestring ):
self.exprs = [ Literal( exprs ) ]
else:
self.exprs = [ exprs ]
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
for e in self.exprs[1:]:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError, err:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError, err:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.optionals = [ e.expr for e in exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
def parseImpl( self, instring, loc, doActions=True ):
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self.skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self.skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also consumed. The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None ):
super( SkipTo, self ).__init__( other )
if ignore is not None:
self.expr = self.expr.copy()
self.expr.ignore(ignore)
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
self.errmsg = "No match found for "+_ustr(self.expr)
self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
while loc <= instrlen:
try:
loc = expr.skipIgnorables( instring, loc )
expr._parse( instring, loc, doActions=False, callPreParse=False )
if self.includeMatch:
skipText = instring[startLoc:loc]
loc,mat = expr._parse(instring,loc)
if mat:
return loc, [ skipText, mat ]
else:
return loc, [ skipText ]
else:
return loc, [ instring[startLoc:loc] ]
except (ParseException,IndexError):
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
return self
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = Forward
return "Forward: "+retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return map( string.upper, tokenlist )
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = ("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = (tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = (dictvalue,i)
else:
tokenlist[ikey] = (dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement.normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset():
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement.normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception, exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setParseAction(countFieldParseAction) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens )
expr.addParseAction(copyTokenToRepeater)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,"\\"+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = strs[:]
elif isinstance(strs,basestring):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
_bslash = "\\"
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = "[" + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ str(tt).upper() for tt in t ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ str(tt).lower() for tt in t ]
def keepOriginalText(s,startLoc,t):
import inspect
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
f = inspect.stack()[1][0]
try:
endloc = f.f_locals["loc"]
finally:
del f
return s[startLoc:endloc]
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine("</" + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal
- numTerms is the number of terms for this operator (must
be 1 or 2)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
thisExpr = Forward().setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = Group( lastExpr + opExpr )
elif arity == 2:
matchExpr = Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
matchExpr = Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
else:
raise ValueError, "operator must indicate right or left associativity"
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\.))*')''').setName("quotedString using single or double quotes")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_"))
commonHTMLEntity = Combine("&" + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";")
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),"><& '"))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
print teststring,"->",
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print tokenlist
print "tokens = ", tokens
print "tokens.columns =", tokens.columns
print "tokens.tables =", tokens.tables
print tokens.asXML("SQL",True)
except ParseException, err:
print err.line
print " "*(err.column-1) + "^"
print err
print
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " ) | zopyx.txng3.core | /zopyx.txng3.core-3.6.2.zip/zopyx.txng3.core-3.6.2/zopyx/txng3/core/parsers/experimental/pyparsing.py | pyparsing.py |
zopyx.txng3.ext - Extension modules for TextIndexNG3
====================================================
Author
------
zopyx.txng3.ext is written by Andreas Jung for ZOPYX Ltd. & Co. KG, Tuebingen, Germany.
License
--------
- TextIndexNG 3 is published under the Zope Public License V 2.1 (see ZPL.txt)
Other license agreements can be made. Contact us for details ([email protected]).
- TextIndexNG 3 ships with a copy of the Snowball code (snowball.tartarus.org)
for implementing stemming. This code is (C) 2001, Dr. Martin Porter and
published under the BSD license.
- TextIndexNG3 ships with the python-levenshtein extension written by
David Necas und published under the GNU Public License (GPL).
Supported Python versions
-------------------------
- Python 2.7
- Python 3.4
- Python 3.5
- Python 3.6
- PyPy
Contact
-------
| Andreas Jung/ZOPYX
| D-72074 Tuebingen, Germany
| E-mail: info at zopyx dot com
| Web: http://www.zopyx.com
| zopyx.txng3.ext | /zopyx.txng3.ext-4.0.0.zip/zopyx.txng3.ext-4.0.0/README.txt | README.txt |
import os
import shutil
import sys
import tempfile
from optparse import OptionParser
tmpeggs = tempfile.mkdtemp()
usage = '''\
[DESIRED PYTHON FOR BUILDOUT] bootstrap.py [options]
Bootstraps a buildout-based project.
Simply run this script in a directory containing a buildout.cfg, using the
Python that you want bin/buildout to use.
Note that by using --find-links to point to local resources, you can keep
this script from going over the network.
'''
parser = OptionParser(usage=usage)
parser.add_option("-v", "--version", help="use a specific zc.buildout version")
parser.add_option("-t", "--accept-buildout-test-releases",
dest='accept_buildout_test_releases',
action="store_true", default=False,
help=("Normally, if you do not specify a --version, the "
"bootstrap script and buildout gets the newest "
"*final* versions of zc.buildout and its recipes and "
"extensions for you. If you use this flag, "
"bootstrap and buildout will get the newest releases "
"even if they are alphas or betas."))
parser.add_option("-c", "--config-file",
help=("Specify the path to the buildout configuration "
"file to be used."))
parser.add_option("-f", "--find-links",
help=("Specify a URL to search for buildout releases"))
parser.add_option("--allow-site-packages",
action="store_true", default=False,
help=("Let bootstrap.py use existing site packages"))
parser.add_option("--setuptools-version",
help="use a specific setuptools version")
options, args = parser.parse_args()
######################################################################
# load/install setuptools
try:
if options.allow_site_packages:
import setuptools
import pkg_resources
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
ez = {}
exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
if not options.allow_site_packages:
# ez_setup imports site, which adds site packages
# this will remove them from the path to ensure that incompatible versions
# of setuptools are not in the path
import site
# inside a virtualenv, there is no 'getsitepackages'.
# We can't remove these reliably
if hasattr(site, 'getsitepackages'):
for sitepackage_path in site.getsitepackages():
sys.path[:] = [x for x in sys.path if sitepackage_path not in x]
setup_args = dict(to_dir=tmpeggs, download_delay=0)
if options.setuptools_version is not None:
setup_args['version'] = options.setuptools_version
ez['use_setuptools'](**setup_args)
import setuptools
import pkg_resources
# This does not (always?) update the default working set. We will
# do it.
for path in sys.path:
if path not in pkg_resources.working_set.entries:
pkg_resources.working_set.add_entry(path)
######################################################################
# Install buildout
ws = pkg_resources.working_set
cmd = [sys.executable, '-c',
'from setuptools.command.easy_install import main; main()',
'-mZqNxd', tmpeggs]
find_links = os.environ.get(
'bootstrap-testing-find-links',
options.find_links or
('http://downloads.buildout.org/'
if options.accept_buildout_test_releases else None)
)
if find_links:
cmd.extend(['-f', find_links])
setuptools_path = ws.find(
pkg_resources.Requirement.parse('setuptools')).location
requirement = 'zc.buildout'
version = options.version
if version is None and not options.accept_buildout_test_releases:
# Figure out the most recent final version of zc.buildout.
import setuptools.package_index
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
try:
return not parsed_version.is_prerelease
except AttributeError:
# Older setuptools
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
index = setuptools.package_index.PackageIndex(
search_path=[setuptools_path])
if find_links:
index.add_find_links((find_links,))
req = pkg_resources.Requirement.parse(requirement)
if index.obtain(req) is not None:
best = []
bestv = None
for dist in index[req.project_name]:
distv = dist.parsed_version
if _final_version(distv):
if bestv is None or distv > bestv:
best = [dist]
bestv = distv
elif distv == bestv:
best.append(dist)
if best:
best.sort()
version = best[-1].version
if version:
requirement = '=='.join((requirement, version))
cmd.append(requirement)
import subprocess
if subprocess.call(cmd, env=dict(os.environ, PYTHONPATH=setuptools_path)) != 0:
raise Exception(
"Failed to execute command:\n%s" % repr(cmd)[1:-1])
######################################################################
# Import and run buildout
ws.add_entry(tmpeggs)
ws.require(requirement)
import zc.buildout.buildout
if not [a for a in args if '=' not in a]:
args.append('bootstrap')
# if -c was provided, we push it back into args for buildout' main function
if options.config_file is not None:
args[0:0] = ['-c', options.config_file]
zc.buildout.buildout.main(args)
shutil.rmtree(tmpeggs) | zopyx.txng3.ext | /zopyx.txng3.ext-4.0.0.zip/zopyx.txng3.ext-4.0.0/bootstrap.py | bootstrap.py |
from Levenshtein import *
from warnings import warn
class StringMatcher:
"""A SequenceMatcher-like class built on the top of Levenshtein"""
def _reset_cache(self):
self._ratio = self._distance = None
self._opcodes = self._editops = self._matching_blocks = None
def __init__(self, isjunk=None, seq1='', seq2=''):
if isjunk:
warn("isjunk not NOT implemented, it will be ignored")
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seqs(self, seq1, seq2):
self._str1, self._str2 = seq1, seq2
self._reset_cache()
def set_seq1(self, seq1):
self._str1 = seq1
self._reset_cache()
def set_seq2(self, seq2):
self._str2 = seq2
self._reset_cache()
def get_opcodes(self):
if not self._opcodes:
if self._editops:
self._opcodes = opcodes(self._editops, self._str1, self._str2)
else:
self._opcodes = opcodes(self._str1, self._str2)
return self._opcodes
def get_editops(self):
if not self._editops:
if self._opcodes:
self._editops = editops(self._opcodes, self._str1, self._str2)
else:
self._editops = editops(self._str1, self._str2)
return self._editops
def get_matching_blocks(self):
if not self._matching_blocks:
self._matching_blocks = matching_blocks(self.get_opcodes(),
self._str1, self._str2)
return self._matching_blocks
def ratio(self):
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def quick_ratio(self):
# This is usually quick enough :o)
if not self._ratio:
self._ratio = ratio(self._str1, self._str2)
return self._ratio
def real_quick_ratio(self):
len1, len2 = len(self._str1), len(self._str2)
return 2.0 * min(len1, len2) / (len1 + len2)
def distance(self):
if not self._distance:
self._distance = distance(self._str1, self._str2)
return self._distance | zopyx.txng3.ext | /zopyx.txng3.ext-4.0.0.zip/zopyx.txng3.ext-4.0.0/zopyx/txng3/ext/python-Levenshtein-0.10/StringMatcher.py | StringMatcher.py |
Changelog
=========
1.0a8 (2022-07-03)
------------------
- fixed normalization in document_path()
[zopyx]
- changed column ordering for "demo search" in control panel
[zopyx]
- added integration with toolbar
[zopyx]
- updated for Typesense 0.23.0
[zopyx]
- support incremental schema change without collection recreation
[zopyx]
1.0a7 (2022-02-26)
------------------
- updated docs
[zopyx]
1.0a6 (2022-02-19)
------------------
- hide/show search results/stats based on search input state
[zopyx]
- support for indexing HTML headlines (h1-h6 tags) into a dedicated
field
[zopyx]
- highlighting support for title | headlines | text with query
weights 4:2:1
[zopyx]
1.0a1 (2022-02-13)
------------------
- Initial release.
[zopyx]
| zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/CHANGES.rst | CHANGES.rst |
Using the development buildout
==============================
Create a virtualenv in the package::
$ virtualenv --clear .
Install requirements with pip::
$ ./bin/pip install -r requirements.txt
Run buildout::
$ ./bin/buildout
Start Plone in foreground:
$ ./bin/instance fg
Running tests
-------------
$ tox
list all tox environments:
$ tox -l
py27-Plone43
py27-Plone51
py27-Plone52
py37-Plone52
build_instance
code-analysis
lint-py27
lint-py37
coverage-report
run a specific tox env:
$ tox -e py37-Plone52
| zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/DEVELOP.rst | DEVELOP.rst |
zopyx.typesense Copyright 2021, Andreas Jung
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License version 2
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA.
| zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/LICENSE.rst | LICENSE.rst |
"""Module where all interfaces, events and exceptions live."""
from .config import COLLECTION_SCHEMA_JSON, DEFAULT_REVIEW_STATES_TO_INDEX
from zope import schema
from zope.interface import Interface
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
from zopyx.typesense import _
class IBrowserLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class ITypesenseSettings(Interface):
"""Connector settings"""
enabled = schema.Bool(
title=_("Typesense integration enabled"),
default=False,
)
collection = schema.TextLine(
title=_("Name of Typesense collection"),
default="typesense",
required=True,
)
api_key = schema.TextLine(
title=_("Typesense Admin API key"),
default="",
required=True,
)
search_api_key = schema.TextLine(
title=_("Typesense search API key"),
default="",
required=True,
)
node1_url = schema.TextLine(
title=_("URL of Typesense node 1"),
description=_("URL node 1"),
default="http://localhost:8108",
required=True,
)
node2_url = schema.TextLine(
title=_("URL of Typesense node 2"),
description=_("URL node 2"),
required=False,
)
node3_url = schema.TextLine(
title=_("URL of Typesense node 3"),
description=_("URL node 3"),
required=False,
)
tika_url = schema.TextLine(
title=_("URL of Tika server for indexing office formats"),
description=_("URL Tika server"),
required=False,
)
review_states_to_index = schema.Text(
title=_("Review states to index "),
default=DEFAULT_REVIEW_STATES_TO_INDEX,
required=True,
)
collection_schema = schema.Text(
title=_("Collection schema"),
default=COLLECTION_SCHEMA_JSON,
required=True,
)
use_searchabletext = schema.Bool(
title=_("Use SearchableText for indexing as default"),
default=False,
required=False,
)
class ITypesenseIndexDataProvider(Interface):
"""Adapter for custom indexing"""
def get_indexable_content(indexable_content):
"""This method get the default data dict with
indexed_content (see api.py). The custom
indexer method can modify or provide additional
data to be indexed.
Returns an updated dict of indexed_content
""" | zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/interfaces.py | interfaces.py |
from .huey_tasks import ts_index, ts_unindex
from datetime import datetime
from plone import api
from plone.app.contenttypes.indexers import SearchableText
from plone.app.textfield import RichText
from plone.dexterity.utils import iterSchemata
from zope.component import ComponentLookupError, getAdapter
from zope.interface.interfaces import ComponentLookupError
from zope.schema import getFields
from zopyx.typesense import _, LOG
from zopyx.typesense.interfaces import ITypesenseIndexDataProvider, ITypesenseSettings
import furl
import html_text
import json
import lxml.html
import typesense
import zope.schema
def html2text(html):
if isinstance(html, bytes):
html = html.decode("utf8")
tree = html_text.parse_html(html)
return html_text.extract_text(tree)
def headlines_from_html(html):
if isinstance(html, bytes):
html = html.decode("utf8")
root = lxml.html.fromstring(html)
result = []
for node in root.xpath(
"//*[self::h2 or self::h3 or self::h4 or self::h5 or self::h6 or self::h7]"
):
if node.text:
result.append(node.text)
result = " ".join(result)
return result
class API:
@property
def collection(self):
"""Return collection name from registry"""
collection = api.portal.get_registry_record("collection", ITypesenseSettings)
return collection
def index_document(self, obj, collection=None):
"""Index document `obj`"""
data = self.indexable_content(obj)
if not data:
return
target_collection = collection if collection else self.collection
ts_index(
ts_client=self.get_typesense_client(),
collection=target_collection,
data=data,
document_id=self.document_id(obj),
document_path=self.document_path(obj),
)
def unindex_document(self, obj):
"""Unindex document `obj`"""
ts_unindex(
ts_client=self.get_typesense_client(),
collection=self.collection,
document_id=self.document_id(obj),
document_path=self.document_path(obj),
)
def indexable_content(self, obj):
"""Return dict with indexable content for `obj`"""
# review states
review_states_to_index = api.portal.get_registry_record(
"review_states_to_index", ITypesenseSettings
)
review_states_to_index = [
s.strip() for s in review_states_to_index.split("\n") if s.strip()
]
ignore_review_state = False
try:
review_state = api.content.get_state(obj)
except:
review_state = ""
ignore_review_state = True
if not ignore_review_state and not review_state in review_states_to_index:
# don't index content without proper review state
return
# language
default_language = api.portal.get_default_language()
language = obj.Language() or default_language
document_id = self.document_id(obj)
d = dict()
d["id"] = document_id
d["id_original"] = obj.getId()
d["title"] = obj.Title()
d["description"] = obj.Description()
d["language"] = language
d["portal_type"] = obj.portal_type
d["review_state"] = review_state
d["path"] = self.document_path(obj)
d["all_paths"] = self.document_path_items(obj)
d["created"] = obj.created().ISO8601()
d["modified"] = obj.modified().ISO8601()
d["effective"] = obj.effective().ISO8601()
d["expires"] = obj.expires().ISO8601()
d["subject"] = obj.Subject()
d["uid"] = obj.UID()
d["document_type_order"] = 0
d["_indexed"] = datetime.utcnow().isoformat()
use_searchabletext = api.portal.get_registry_record(
"use_searchabletext", ITypesenseSettings
)
if use_searchabletext:
# use Plone's SearchableText implemenation
indexable_text = SearchableText(obj)
else:
# or our own indexable text content
indexable_text = []
indexable_headlines = []
fields = {}
schemes = iterSchemata(obj)
for schema in schemes:
fields.update(getFields(schema))
for name, field in fields.items():
if name in ["id", "title"]:
continue
if isinstance(field, RichText):
text = getattr(obj, name)
if isinstance(text, str):
indexable_text.append(text)
else:
if text and text.output:
indexable_text.append(html2text(text.output))
indexable_headlines.append(headlines_from_html(text.output))
elif isinstance(field, (zope.schema.Text, zope.schema.TextLine)):
text = getattr(obj, name)
indexable_text.append(text)
indexable_text = [text for text in indexable_text if text]
indexable_text = " ".join(indexable_text)
indexable_headlines = [text for text in indexable_headlines if text]
indexable_headlines = " ".join(indexable_headlines)
d["text"] = indexable_text
d["headlines"] = indexable_headlines
# Check if there is an adapter for the given content type interface
# implementing ITypesenseIndexDataProvider for modifying the index data
# dict.
try:
adapter = getAdapter(obj, ITypesenseIndexDataProvider)
except ComponentLookupError:
adapter = None
if adapter:
d = adapter.get_indexable_content(d)
return d
def indexed_content(self, obj):
"""Return the indexed content in Typesense for the given `obj`"""
client = self.get_typesense_client()
document_id = self.document_id(obj)
try:
document = (
client.collections[self.collection].documents[document_id].retrieve()
)
except typesense.exceptions.ObjectNotFound:
document = {}
return document
def document_id(self, obj):
"""Return the content id for the given `obj`"""
site_id = api.portal.get().getId()
obj_id = f"{site_id}-{obj.UID()}"
return obj_id
def document_path(self, obj):
"""Return the content path for the given `obj`"""
site_path = api.portal.get().absolute_url(1)
obj_path = obj.absolute_url(1)
rel_path = obj_path.replace(site_path, "", 1)
rel_path = rel_path.lstrip("/")
return rel_path
def document_path_items(self, obj):
"""Return all possible prefix path components of the given object"""
document_path = self.document_path(obj)
document_path_items = document_path.split("/")
all_paths = list()
for i in range(len(document_path_items) + 1):
items = "/".join(document_path_items[:i])
if not items.startswith("/"):
items = "/" + items
if not items in all_paths:
all_paths.append(items)
return all_paths
def exists_collection(self, collection):
"""Check if collection exists"""
client = self.get_typesense_client()
all_collections = [
collection["name"] for collection in client.collections.retrieve()
]
return collection in all_collections
def update_collection_schema(self):
client = self.get_client(self.api_key)
c = client.collections[self.collection]
schema = c.retrieve()["fields"]
schema_names = set([d["name"] for d in schema])
collection_schema = api.portal.get_registry_record(
"collection_schema", ITypesenseSettings
)
collection_schema = json.loads(collection_schema)["fields"]
collection_schema_names = [d["name"] for d in collection_schema if d["name"] not in ["id"]]
collection_schema_names = set(collection_schema_names)
added_names = collection_schema_names - schema_names
removed_names = schema_names - collection_schema_names
update_schema = { "fields":[]}
for name in removed_names:
update_schema["fields"].append(dict(name=name, drop=True))
for field_schema in collection_schema:
if field_schema["name"] in added_names:
update_schema["fields"].append(field_schema)
if update_schema["fields"]:
client.collections[self.collection].update(update_schema)
LOG.info(f"Schema of collection {self.collection} updated: {update_schema}")
else:
LOG.info(f"No schema update information found for collection {self.collection}")
def create_collection(self, temporary=False):
"""Create collection"""
client = self.get_typesense_client()
collection = self.collection
if temporary:
collection = collection + "-" + datetime.utcnow().isoformat()
if self.exists_collection(collection):
raise RuntimeError(f"Collection `{collection}` already exists")
collection_schema = api.portal.get_registry_record(
"collection_schema", ITypesenseSettings
)
collection_schema = json.loads(collection_schema)
collection_schema["name"] = collection
client.collections.create(collection_schema)
LOG.info(f"Created Typesense collection {collection}")
return collection
def drop_collection(self, collection_name=None):
"""Drop collection"""
collection = collection_name if collection_name else self.collection
if self.exists_collection(collection):
client = self.get_typesense_client()
try:
client.collections[collection].delete()
LOG.info(f"Deleted Typesense collection {collection}")
except Exception as e:
LOG.exception(f"Could not delete Typesense collection {collection}")
raise
def alias_collection(self, collection, alias_collection):
"""alias `alias_collection` to `collection`"""
client = self.get_typesense_client()
client.aliases.upsert(collection, dict(collection_name=alias_collection))
LOG.info(f"Creating alias '{collection}' for {alias_collection}")
def remove_obsolete_collections(self, collection_prefix):
"""Remove obsolete collections with prefix `collection_prefix`"""
client = self.get_typesense_client()
all_collections = client.collections.retrieve()
names = [
d["name"]
for d in all_collections
if d["name"].startswith(collection_prefix + "-")
]
names = sorted(names)
names_to_remove = names[:-1]
for name in names_to_remove:
self.drop_collection(name)
def collection_stats(self):
"""Get collection statistics"""
client = self.get_typesense_client()
try:
result = client.collections[self.collection].retrieve()
except Exception as e:
LOG.error("Unable to fetch Typesense stats", exc_info=True)
return {}
result["created_at_str"] = datetime.fromtimestamp(
result["created_at"]
).isoformat()
return result
def get_typesense_client(
self,
):
"""Typesense client with full admin access"""
return self.get_client(self.api_key)
def get_typesense_search_client(self):
"""Typesense client with search access"""
return self.get_client(self.search_api_key)
@property
def search_api_key(self):
"""Return search API key"""
return api.portal.get_registry_record("search_api_key", ITypesenseSettings)
@property
def api_key(self):
"""Return admin API key"""
return api.portal.get_registry_record("api_key", ITypesenseSettings)
def get_client(self, api_key):
"""Get Typesense client for given API key"""
if not api_key:
raise ValueError(_("No Typesense API key(s) configured"))
client = typesense.Client(
{
"api_key": api_key,
"nodes": self.nodes,
"connection_timeout_seconds": 10,
}
)
return client
@property
def nodes(self):
"""Return a list of Typesense nodes (used by the search UI)"""
try:
node1_url = api.portal.get_registry_record("node1_url", ITypesenseSettings)
node2_url = api.portal.get_registry_record("node2_url", ITypesenseSettings)
node3_url = api.portal.get_registry_record("node3_url", ITypesenseSettings)
except (KeyError, ComponentLookupError):
return None
nodes = list()
for url in (node1_url, node2_url, node3_url):
if not url:
continue
f = furl.furl(url)
nodes.append(dict(host=f.host, port=f.port, protocol=f.scheme))
return nodes
def export_documents(self, format="jsonl"):
"""Export all documents of collection as JSONlines"""
client = self.get_typesense_client()
result = client.collections[self.collection].documents.export()
if format == "jsonl":
return result
else:
# JSON
result = [json.loads(jline) for jline in result.splitlines()]
return json.dumps(result, indent=2)
def search(self, query, per_page=25, page=1):
client = self.get_typesense_client()
search_params = {
"q": query,
"query_by": "text",
"per_page": per_page,
"page": page,
# 'sort_by': 'id2:desc',
# 'facet_by': ['language,area,document_type,societies,specifications_str,specifications2_str,topic_str']
}
LOG.info(search_params)
result = client.collections[self.collection].documents.search(search_params)
result["pages"] = (
int(result["found"] / result["request_params"]["per_page"]) + 1
)
return result
def snapshot(self):
"""Snapshot typesense database.
Cavecat: If Typesense is running with a Docker container,
the snapshot will be created inside the container unless you configure
a volume mapping.
"""
client = self.get_typesense_client()
snapshot_path = f"{self.collection}-{datetime.utcnow().isoformat()}.snapshot"
client.operations.perform("snapshot", {"snapshot_path": snapshot_path})
return snapshot_path
def cluster_data(self):
"""Return metrics, stats etc. from Typesense"""
client = self.get_typesense_client()
try:
# cluster
metrics = client.metrics.retrieve()
stats = client.stats.retrieve()
health = client.health.retrieve()
return dict(metrics=metrics, health=health, stats=stats)
except AttributeError:
# standalone
return None | zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/api.py | api.py |
from Products.Five.browser import BrowserView
from zopyx.typesense import _, LOG
from zopyx.typesense.api import API
import gzip
import json
import os
import plone.api
import progressbar
import time
class View(BrowserView):
def recreate_collection(self):
ts_api = API()
ts_api.drop_collection()
ts_api.create_collection()
LOG.info(f"Created Typesense collection {ts_api.collection}")
portal = plone.api.portal.get()
plone.api.portal.show_message(
_("Typesense collection dropped and recreated"),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin")
def update_collection_schema(self):
ts_api = API()
ts_api.update_collection_schema()
portal = plone.api.portal.get()
plone.api.portal.show_message(
_("Typesense collection schema updated"),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin")
def indexed_content(self):
"""Return indexed content for current context object"""
ts_api = API()
document = ts_api.indexed_content(self.context)
return document
def export_documents(self, format="jsonl"):
"""Export all documents from current collection as JSONLines"""
ts_api = API()
result = ts_api.export_documents(format)
self.request.response.setHeader("content-type", f"application/{format}")
self.request.response.setHeader(
"content-disposition",
f"attachment; filename={ts_api.collection}.{format}",
)
return result
def collection_information(self):
"""Retrieve Collection information"""
ts_api = API()
return ts_api.collection_stats()
def reindex_all(self):
"""Reindex all"""
ts = time.time()
ts_api = API()
# ts_api.drop_collection()
collection = ts_api.create_collection(temporary=True)
catalog = plone.api.portal.get_tool("portal_catalog")
brains = catalog()
num_brains = len(list(brains))
with progressbar.ProgressBar(max_value=num_brains) as pg:
for i, brain in enumerate(brains):
pg.update(i)
obj = brain.getObject()
ts_api.index_document(obj, collection)
ts_api.drop_collection(ts_api.collection)
ts_api.alias_collection(ts_api.collection, collection)
ts_api.remove_obsolete_collections(ts_api.collection)
duration = time.time() - ts
LOG.info(
f"All content submitted for reindexing ({num_brains} items), duration {duration:.2f} seconds"
)
portal = plone.api.portal.get()
plone.api.portal.show_message(
_(
"All content submitted for reindexing. Results may/will show up delayed depending on the amount of documents!"
),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin")
def search_result(self):
"""Search UI for admin view"""
ts_api = API()
result = ts_api.search(
query=self.request.form.get("query"), page=self.request.form.get("page", 1)
)
return result
def import_demo_content(self):
from plone.app.textfield.value import RichTextValue
portal = plone.api.portal.get()
LOG.info("Deleting folder: new")
if "news" in portal.objectIds():
plone.api.content.delete(portal.news)
LOG.info("Creating folder: new")
data_folder = plone.api.content.create(
container=portal, type="Folder", id="news", title="news"
)
fn = os.path.dirname(__file__) + "/de-news.json"
with open(fn) as fp:
news = json.load(fp)
with progressbar.ProgressBar(max_value=len(news)) as pg:
for i, n in enumerate(news):
pg.update(i)
text = RichTextValue(n["text"], "text/html", "text/html")
doc = plone.api.content.create(
type="News Item",
container=data_folder,
title=n["title"],
text=text,
language="de",
)
plone.api.content.transition(doc, "publish")
plone.api.portal.show_message(
_("Sample content imported into folder /news"),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin")
def snapshot(self):
"""Create a snapshot of the Typesense internal database"""
ts_api = API()
snapshot_name = ts_api.snapshot()
portal = plone.api.portal.get()
plone.api.portal.show_message(
_(f"Snapshot taken ({snapshot_name})"),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin")
def cluster_data(self):
"""Return metrics, stats from Typesense"""
ts_api = API()
return ts_api.cluster_data()
def current_path(self):
"""Return the current folder path relativ to the Plone site"""
portal_path = plone.api.portal.get().absolute_url(1)
context_path = self.context.absolute_url(1)
context_path = context_path.replace(portal_path, "")
if not context_path.startswith("/"):
context_path = "/" + context_path
return context_path
def search_settings(self):
"""Typesense settings returned as JSON for dynamic search UI"""
ts_api = API()
settings = dict()
settings["collection"] = ts_api.collection
settings["api_key"] = ts_api.search_api_key
settings["nodes"] = ts_api.nodes
settings["query_by"] = "title,headlines,text"
settings["query_weights"] = "4,2,1"
self.request.response.setHeader("content-type", "application/json")
return json.dumps(settings)
def import_demo_content2(self):
from plone.app.textfield.value import RichTextValue
portal = plone.api.portal.get()
if "demo" in portal.objectIds():
plone.api.content.delete(portal.news)
data_folder = plone.api.content.create(
container=portal, type="Folder", id="demo", title="demo"
)
fn = os.path.dirname(__file__) + "/demo.json"
news = json.load(open(fn))
for n in news:
text = RichTextValue(n["text"], "text/html", "text/html")
doc = plone.api.content.create(
type="News Item",
container=data_folder,
title=n["title"],
text=text,
language="de",
)
plone.api.content.transition(doc, "publish")
plone.api.portal.show_message(
_("Sample content imported into folder /demo"),
request=self.request,
)
self.request.response.redirect(portal.absolute_url() + "/@@typesense-admin") | zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/browser/views.py | views.py |
/* global vars first */
var remote_url = PORTAL_URL + "/@@typesense-search-settings";
var ts_settings = null;
/* Show initially all hits with all form control (true)
* or show only empty search field by default (false).
*/
var SHOW_ALL_HITS_INITIALLY = true;
/* Retrieve search settings through JSON */
function getSearchSettings() {
return $.getJSON({
type: "GET",
url: remote_url,
async: false
}).responseText;
}
ts_settings = JSON.parse(getSearchSettings());
var filterBy = '';
if (CURRENT_PATH.length > 1) // root = "/"
filterBy = `all_paths:=${CURRENT_PATH}`;
const typesenseInstantsearchAdapter = new TypesenseInstantSearchAdapter({
server: {
apiKey: ts_settings["api_key"],
nodes: ts_settings["nodes"]
},
// The following parameters are directly passed to Typesense's search API
// endpoint. So you can pass any parameters supported by the search
// endpoint below. queryBy is required. filterBy is managed and
// overridden by InstantSearch.js. To set it, you want to use one of the
// filter widgets like refinementList or use the `configure` widget.
additionalSearchParameters: {
queryBy: ts_settings["query_by"],
queryByWeights: ts_settings["query_by_weights"],
filterBy: filterBy
},
});
const searchClient = typesenseInstantsearchAdapter.searchClient;
const search = instantsearch({
searchClient,
indexName: ts_settings["collection"],
searchFunction(helper) {
if (! SHOW_ALL_HITS_INITIALLY) {
if (helper.state.query === '') {
$('.refinement-label').hide();
$('.ais-RefinementList-list').hide();
$('#search-control').hide();
$('#hits').hide();
} else {
$('.refinement-label').show();
$('.ais-RefinementList-list').show();
$('#search-control').show();
$('#hits').show();
helper.search();
}
} else {
helper.search();
}
}
});
/*
* Example:
* https://github.com/typesense/showcase-ecommerce-store/blob/master/src/app.js
*/
search.addWidgets([
instantsearch.widgets.searchBox({
container: '#searchbox',
showSubmit: false,
showReset: false,
placeholder: 'Search for... ',
autofocus: false,
searchAsYouType: true,
showLoadingIndicator: true,
cssClasses: {
input: 'form-control form-control-sm border border-light text-dark',
loadingIcon: 'stroke-primary',
},
}),
instantsearch.widgets.configure({
hitsPerPage: 10,
}),
instantsearch.widgets.hits({
container: '#hits',
templates: {
item: `
<div class="hit">
<div class="hit-title"> <a class="hit-link" href="${PORTAL_URL}/{{path}}\">{{#helpers.highlight}}{ "attribute": "title" }{{/helpers.highlight}}</a></div>
<div class="hit-meta">
<span class="hit-portal_type">{{#helpers.highlight}}{ "attribute": "portal_type" }{{/helpers.highlight}}</span> |
<span class="hit-review_state">{{#helpers.highlight}}{ "attribute": "review_state" }{{/helpers.highlight}}</span> |
<span class="hit-created">{{#helpers.highlight}}{ "attribute": "created" }{{/helpers.highlight}}</span> |
<span class="hit-modified">{{#helpers.highlight}}{ "attribute": "modified" }{{/helpers.highlight}}</span>
</div>
<!--
<div class="hit-text">{{#helpers.highlight}}{ "attribute": "text" }{{/helpers.highlight}}</div>
-->
<div class="hit-text" id="hits-headlines">{{#helpers.snippet}}{ "attribute": "headlines" }{{/helpers.snippet}}</div>
<div class="hit-text" id="hits-text">{{#helpers.snippet}}{ "attribute": "text" }{{/helpers.snippet}}</div>
</div>
`,
},
}),
instantsearch.widgets.pagination({
container: '#pagination',
root: "nav",
cssClasses: {
root: "navigation",
list: 'pagination ',
item: 'page-item ',
link: 'text-decoration-none',
disabledItem: 'text-muted',
selectedItem: 'fw-bold text-primary',
},
}),
instantsearch.widgets.refinementList({
container: '#review-state',
attribute: 'review_state',
}),
instantsearch.widgets.refinementList({
container: '#portal-type',
attribute: 'portal_type',
showMore: false
}),
instantsearch.widgets.refinementList({
container: '#subject',
attribute: 'subject',
}),
instantsearch.widgets.refinementList({
container: '#language',
attribute: 'language',
}),
instantsearch.widgets.stats({
container: '#stats',
templates: {
text: `
{{#hasNoResults}}No hits{{/hasNoResults}}
{{#hasOneResult}}1 hit{{/hasOneResult}}
{{#hasManyResults}}{{#helpers.formatNumber}}{{nbHits}}{{/helpers.formatNumber}} hits {{/hasManyResults}}
found in {{processingTimeMS}} ms
`,
},
cssClasses: {
text: 'small',
},
}),
instantsearch.widgets.hitsPerPage({
container: '#hits-per-page',
items: [{
label: '10 per page',
value: 10,
default: true
}, {
label: '20 per page',
value: 20
}, {
label: '50 per page',
value: 50
}, {
label: '100 per page',
value: 100
}, ],
cssClasses: {
select: 'custom-select custom-select-sm',
},
}),
]);
search.start(); | zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/browser/static/app.js | app.js |
!function(e){if("object"==typeof exports&&"undefined"!=typeof module)module.exports=e();else if("function"==typeof define&&define.amd)define([],e);else{var t;t="undefined"!=typeof window?window:"undefined"!=typeof global?global:"undefined"!=typeof self?self:this,t.algoliasearch=e()}}(function(){var e;return function t(e,r,o){function n(a,s){if(!r[a]){if(!e[a]){var u="function"==typeof require&&require;if(!s&&u)return u(a,!0);if(i)return i(a,!0);var c=new Error("Cannot find module '"+a+"'");throw c.code="MODULE_NOT_FOUND",c}var l=r[a]={exports:{}};e[a][0].call(l.exports,function(t){var r=e[a][1][t];return n(r?r:t)},l,l.exports,t,e,r,o)}return r[a].exports}for(var i="function"==typeof require&&require,a=0;a<o.length;a++)n(o[a]);return n}({1:[function(e,t,r){(function(o){function n(){return!("undefined"==typeof window||!window.process||"renderer"!==window.process.type)||("undefined"!=typeof document&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||"undefined"!=typeof window&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\/(\d+)/)&&parseInt(RegExp.$1,10)>=31||"undefined"!=typeof navigator&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\/(\d+)/))}function i(e){var t=this.useColors;if(e[0]=(t?"%c":"")+this.namespace+(t?" %c":" ")+e[0]+(t?"%c ":" ")+"+"+r.humanize(this.diff),t){var o="color: "+this.color;e.splice(1,0,o,"color: inherit");var n=0,i=0;e[0].replace(/%[a-zA-Z%]/g,function(e){"%%"!==e&&(n++,"%c"===e&&(i=n))}),e.splice(i,0,o)}}function a(){return"object"==typeof console&&console.log&&Function.prototype.apply.call(console.log,console,arguments)}function s(e){try{null==e?r.storage.removeItem("debug"):r.storage.debug=e}catch(t){}}function u(){var e;try{e=r.storage.debug}catch(t){}return!e&&"undefined"!=typeof o&&"env"in o&&(e=o.env.DEBUG),e}function c(){try{return window.localStorage}catch(e){}}r=t.exports=e(2),r.log=a,r.formatArgs=i,r.save=s,r.load=u,r.useColors=n,r.storage="undefined"!=typeof chrome&&"undefined"!=typeof chrome.storage?chrome.storage.local:c(),r.colors=["lightseagreen","forestgreen","goldenrod","dodgerblue","darkorchid","crimson"],r.formatters.j=function(e){try{return JSON.stringify(e)}catch(t){return"[UnexpectedJSONParseError]: "+t.message}},r.enable(u())}).call(this,e(11))},{11:11,2:2}],2:[function(e,t,r){function o(e){var t,o=0;for(t in e)o=(o<<5)-o+e.charCodeAt(t),o|=0;return r.colors[Math.abs(o)%r.colors.length]}function n(e){function t(){if(t.enabled){var e=t,o=+new Date,n=o-(c||o);e.diff=n,e.prev=c,e.curr=o,c=o;for(var i=new Array(arguments.length),a=0;a<i.length;a++)i[a]=arguments[a];i[0]=r.coerce(i[0]),"string"!=typeof i[0]&&i.unshift("%O");var s=0;i[0]=i[0].replace(/%([a-zA-Z%])/g,function(t,o){if("%%"===t)return t;s++;var n=r.formatters[o];if("function"==typeof n){var a=i[s];t=n.call(e,a),i.splice(s,1),s--}return t}),r.formatArgs.call(e,i);var u=t.log||r.log||console.log.bind(console);u.apply(e,i)}}return t.namespace=e,t.enabled=r.enabled(e),t.useColors=r.useColors(),t.color=o(e),"function"==typeof r.init&&r.init(t),t}function i(e){r.save(e),r.names=[],r.skips=[];for(var t=("string"==typeof e?e:"").split(/[\s,]+/),o=t.length,n=0;n<o;n++)t[n]&&(e=t[n].replace(/\*/g,".*?"),"-"===e[0]?r.skips.push(new RegExp("^"+e.substr(1)+"$")):r.names.push(new RegExp("^"+e+"$")))}function a(){r.enable("")}function s(e){var t,o;for(t=0,o=r.skips.length;t<o;t++)if(r.skips[t].test(e))return!1;for(t=0,o=r.names.length;t<o;t++)if(r.names[t].test(e))return!0;return!1}function u(e){return e instanceof Error?e.stack||e.message:e}r=t.exports=n.debug=n["default"]=n,r.coerce=u,r.disable=a,r.enable=i,r.enabled=s,r.humanize=e(8),r.names=[],r.skips=[],r.formatters={};var c},{8:8}],3:[function(t,r,o){(function(n,i){!function(t,n){"object"==typeof o&&"undefined"!=typeof r?r.exports=n():"function"==typeof e&&e.amd?e(n):t.ES6Promise=n()}(this,function(){"use strict";function e(e){var t=typeof e;return null!==e&&("object"===t||"function"===t)}function r(e){return"function"==typeof e}function o(e){Q=e}function a(e){Y=e}function s(){return function(){return n.nextTick(f)}}function u(){return"undefined"!=typeof G?function(){G(f)}:p()}function c(){var e=0,t=new Z(f),r=document.createTextNode("");return t.observe(r,{characterData:!0}),function(){r.data=e=++e%2}}function l(){var e=new MessageChannel;return e.port1.onmessage=f,function(){return e.port2.postMessage(0)}}function p(){var e=setTimeout;return function(){return e(f,1)}}function f(){for(var e=0;e<V;e+=2){var t=re[e],r=re[e+1];t(r),re[e]=void 0,re[e+1]=void 0}V=0}function d(){try{var e=t,r=e("vertx");return G=r.runOnLoop||r.runOnContext,u()}catch(o){return p()}}function h(e,t){var r=arguments,o=this,n=new this.constructor(m);void 0===n[ne]&&U(n);var i=o._state;return i?!function(){var e=r[i-1];Y(function(){return q(i,n,e,o._result)})}():k(o,n,e,t),n}function y(e){var t=this;if(e&&"object"==typeof e&&e.constructor===t)return e;var r=new t(m);return S(r,e),r}function m(){}function g(){return new TypeError("You cannot resolve a promise with itself")}function v(){return new TypeError("A promises callback cannot return that same promise.")}function b(e){try{return e.then}catch(t){return ue.error=t,ue}}function w(e,t,r,o){try{e.call(t,r,o)}catch(n){return n}}function _(e,t,r){Y(function(e){var o=!1,n=w(r,t,function(r){o||(o=!0,t!==r?S(e,r):j(e,r))},function(t){o||(o=!0,O(e,t))},"Settle: "+(e._label||" unknown promise"));!o&&n&&(o=!0,O(e,n))},e)}function x(e,t){t._state===ae?j(e,t._result):t._state===se?O(e,t._result):k(t,void 0,function(t){return S(e,t)},function(t){return O(e,t)})}function T(e,t,o){t.constructor===e.constructor&&o===h&&t.constructor.resolve===y?x(e,t):o===ue?(O(e,ue.error),ue.error=null):void 0===o?j(e,t):r(o)?_(e,t,o):j(e,t)}function S(t,r){t===r?O(t,g()):e(r)?T(t,r,b(r)):j(t,r)}function A(e){e._onerror&&e._onerror(e._result),R(e)}function j(e,t){e._state===ie&&(e._result=t,e._state=ae,0!==e._subscribers.length&&Y(R,e))}function O(e,t){e._state===ie&&(e._state=se,e._result=t,Y(A,e))}function k(e,t,r,o){var n=e._subscribers,i=n.length;e._onerror=null,n[i]=t,n[i+ae]=r,n[i+se]=o,0===i&&e._state&&Y(R,e)}function R(e){var t=e._subscribers,r=e._state;if(0!==t.length){for(var o=void 0,n=void 0,i=e._result,a=0;a<t.length;a+=3)o=t[a],n=t[a+r],o?q(r,o,n,i):n(i);e._subscribers.length=0}}function C(){this.error=null}function I(e,t){try{return e(t)}catch(r){return ce.error=r,ce}}function q(e,t,o,n){var i=r(o),a=void 0,s=void 0,u=void 0,c=void 0;if(i){if(a=I(o,n),a===ce?(c=!0,s=a.error,a.error=null):u=!0,t===a)return void O(t,v())}else a=n,u=!0;t._state!==ie||(i&&u?S(t,a):c?O(t,s):e===ae?j(t,a):e===se&&O(t,a))}function N(e,t){try{t(function(t){S(e,t)},function(t){O(e,t)})}catch(r){O(e,r)}}function E(){return le++}function U(e){e[ne]=le++,e._state=void 0,e._result=void 0,e._subscribers=[]}function P(e,t){this._instanceConstructor=e,this.promise=new e(m),this.promise[ne]||U(this.promise),X(t)?(this.length=t.length,this._remaining=t.length,this._result=new Array(this.length),0===this.length?j(this.promise,this._result):(this.length=this.length||0,this._enumerate(t),0===this._remaining&&j(this.promise,this._result))):O(this.promise,D())}function D(){return new Error("Array Methods must be provided an Array")}function H(e){return new P(this,e).promise}function F(e){var t=this;return new t(X(e)?function(r,o){for(var n=e.length,i=0;i<n;i++)t.resolve(e[i]).then(r,o)}:function(e,t){return t(new TypeError("You must pass an array to race."))})}function J(e){var t=this,r=new t(m);return O(r,e),r}function M(){throw new TypeError("You must pass a resolver function as the first argument to the promise constructor")}function $(){throw new TypeError("Failed to construct 'Promise': Please use the 'new' operator, this object constructor cannot be called as a function.")}function L(e){this[ne]=E(),this._result=this._state=void 0,this._subscribers=[],m!==e&&("function"!=typeof e&&M(),this instanceof L?N(this,e):$())}function B(){var e=void 0;if("undefined"!=typeof i)e=i;else if("undefined"!=typeof self)e=self;else try{e=Function("return this")()}catch(t){throw new Error("polyfill failed because global object is unavailable in this environment")}var r=e.Promise;if(r){var o=null;try{o=Object.prototype.toString.call(r.resolve())}catch(t){}if("[object Promise]"===o&&!r.cast)return}e.Promise=L}var K=void 0;K=Array.isArray?Array.isArray:function(e){return"[object Array]"===Object.prototype.toString.call(e)};var X=K,V=0,G=void 0,Q=void 0,Y=function(e,t){re[V]=e,re[V+1]=t,V+=2,2===V&&(Q?Q(f):oe())},z="undefined"!=typeof window?window:void 0,W=z||{},Z=W.MutationObserver||W.WebKitMutationObserver,ee="undefined"==typeof self&&"undefined"!=typeof n&&"[object process]"==={}.toString.call(n),te="undefined"!=typeof Uint8ClampedArray&&"undefined"!=typeof importScripts&&"undefined"!=typeof MessageChannel,re=new Array(1e3),oe=void 0;oe=ee?s():Z?c():te?l():void 0===z&&"function"==typeof t?d():p();var ne=Math.random().toString(36).substring(16),ie=void 0,ae=1,se=2,ue=new C,ce=new C,le=0;return P.prototype._enumerate=function(e){for(var t=0;this._state===ie&&t<e.length;t++)this._eachEntry(e[t],t)},P.prototype._eachEntry=function(e,t){var r=this._instanceConstructor,o=r.resolve;if(o===y){var n=b(e);if(n===h&&e._state!==ie)this._settledAt(e._state,t,e._result);else if("function"!=typeof n)this._remaining--,this._result[t]=e;else if(r===L){var i=new r(m);T(i,e,n),this._willSettleAt(i,t)}else this._willSettleAt(new r(function(t){return t(e)}),t)}else this._willSettleAt(o(e),t)},P.prototype._settledAt=function(e,t,r){var o=this.promise;o._state===ie&&(this._remaining--,e===se?O(o,r):this._result[t]=r),0===this._remaining&&j(o,this._result)},P.prototype._willSettleAt=function(e,t){var r=this;k(e,void 0,function(e){return r._settledAt(ae,t,e)},function(e){return r._settledAt(se,t,e)})},L.all=H,L.race=F,L.resolve=y,L.reject=J,L._setScheduler=o,L._setAsap=a,L._asap=Y,L.prototype={constructor:L,then:h,"catch":function(e){return this.then(null,e)}},L.polyfill=B,L.Promise=L,L})}).call(this,t(11),"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{11:11}],4:[function(e,t,r){var o=Object.prototype.hasOwnProperty,n=Object.prototype.toString;t.exports=function(e,t,r){if("[object Function]"!==n.call(t))throw new TypeError("iterator must be a function");var i=e.length;if(i===+i)for(var a=0;a<i;a++)t.call(r,e[a],a,e);else for(var s in e)o.call(e,s)&&t.call(r,e[s],s,e)}},{}],5:[function(e,t,r){(function(e){var r;r="undefined"!=typeof window?window:"undefined"!=typeof e?e:"undefined"!=typeof self?self:{},t.exports=r}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{}],6:[function(e,t,r){"function"==typeof Object.create?t.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:t.exports=function(e,t){e.super_=t;var r=function(){};r.prototype=t.prototype,e.prototype=new r,e.prototype.constructor=e}},{}],7:[function(e,t,r){var o={}.toString;t.exports=Array.isArray||function(e){return"[object Array]"==o.call(e)}},{}],8:[function(e,t,r){function o(e){if(e=String(e),!(e.length>100)){var t=/^((?:\d+)?\.?\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(e);if(t){var r=parseFloat(t[1]),o=(t[2]||"ms").toLowerCase();switch(o){case"years":case"year":case"yrs":case"yr":case"y":return r*p;case"days":case"day":case"d":return r*l;case"hours":case"hour":case"hrs":case"hr":case"h":return r*c;case"minutes":case"minute":case"mins":case"min":case"m":return r*u;case"seconds":case"second":case"secs":case"sec":case"s":return r*s;case"milliseconds":case"millisecond":case"msecs":case"msec":case"ms":return r;default:return}}}}function n(e){return e>=l?Math.round(e/l)+"d":e>=c?Math.round(e/c)+"h":e>=u?Math.round(e/u)+"m":e>=s?Math.round(e/s)+"s":e+"ms"}function i(e){return a(e,l,"day")||a(e,c,"hour")||a(e,u,"minute")||a(e,s,"second")||e+" ms"}function a(e,t,r){if(!(e<t))return e<1.5*t?Math.floor(e/t)+" "+r:Math.ceil(e/t)+" "+r+"s"}var s=1e3,u=60*s,c=60*u,l=24*c,p=365.25*l;t.exports=function(e,t){t=t||{};var r=typeof e;if("string"===r&&e.length>0)return o(e);if("number"===r&&isNaN(e)===!1)return t["long"]?i(e):n(e);throw new Error("val is not a non-empty string or a valid number. val="+JSON.stringify(e))}},{}],9:[function(e,t,r){"use strict";var o=Object.prototype.hasOwnProperty,n=Object.prototype.toString,i=Array.prototype.slice,a=e(10),s=Object.prototype.propertyIsEnumerable,u=!s.call({toString:null},"toString"),c=s.call(function(){},"prototype"),l=["toString","toLocaleString","valueOf","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","constructor"],p=function(e){var t=e.constructor;return t&&t.prototype===e},f={$console:!0,$external:!0,$frame:!0,$frameElement:!0,$frames:!0,$innerHeight:!0,$innerWidth:!0,$outerHeight:!0,$outerWidth:!0,$pageXOffset:!0,$pageYOffset:!0,$parent:!0,$scrollLeft:!0,$scrollTop:!0,$scrollX:!0,$scrollY:!0,$self:!0,$webkitIndexedDB:!0,$webkitStorageInfo:!0,$window:!0},d=function(){if("undefined"==typeof window)return!1;for(var e in window)try{if(!f["$"+e]&&o.call(window,e)&&null!==window[e]&&"object"==typeof window[e])try{p(window[e])}catch(t){return!0}}catch(t){return!0}return!1}(),h=function(e){if("undefined"==typeof window||!d)return p(e);try{return p(e)}catch(t){return!1}},y=function(e){var t=null!==e&&"object"==typeof e,r="[object Function]"===n.call(e),i=a(e),s=t&&"[object String]"===n.call(e),p=[];if(!t&&!r&&!i)throw new TypeError("Object.keys called on a non-object");var f=c&&r;if(s&&e.length>0&&!o.call(e,0))for(var d=0;d<e.length;++d)p.push(String(d));if(i&&e.length>0)for(var y=0;y<e.length;++y)p.push(String(y));else for(var m in e)f&&"prototype"===m||!o.call(e,m)||p.push(String(m));if(u)for(var g=h(e),v=0;v<l.length;++v)g&&"constructor"===l[v]||!o.call(e,l[v])||p.push(l[v]);return p};y.shim=function(){if(Object.keys){var e=function(){return 2===(Object.keys(arguments)||"").length}(1,2);if(!e){var t=Object.keys;Object.keys=function(e){return t(a(e)?i.call(e):e)}}}else Object.keys=y;return Object.keys||y},t.exports=y},{10:10}],10:[function(e,t,r){"use strict";var o=Object.prototype.toString;t.exports=function(e){var t=o.call(e),r="[object Arguments]"===t;return r||(r="[object Array]"!==t&&null!==e&&"object"==typeof e&&"number"==typeof e.length&&e.length>=0&&"[object Function]"===o.call(e.callee)),r}},{}],11:[function(e,t,r){function o(){throw new Error("setTimeout has not been defined")}function n(){throw new Error("clearTimeout has not been defined")}function i(e){if(p===setTimeout)return setTimeout(e,0);if((p===o||!p)&&setTimeout)return p=setTimeout,setTimeout(e,0);try{return p(e,0)}catch(t){try{return p.call(null,e,0)}catch(t){return p.call(this,e,0)}}}function a(e){if(f===clearTimeout)return clearTimeout(e);if((f===n||!f)&&clearTimeout)return f=clearTimeout,clearTimeout(e);try{return f(e)}catch(t){try{return f.call(null,e)}catch(t){return f.call(this,e)}}}function s(){m&&h&&(m=!1,h.length?y=h.concat(y):g=-1,y.length&&u())}function u(){if(!m){var e=i(s);m=!0;for(var t=y.length;t;){for(h=y,y=[];++g<t;)h&&h[g].run();g=-1,t=y.length}h=null,m=!1,a(e)}}function c(e,t){this.fun=e,this.array=t}function l(){}var p,f,d=t.exports={};!function(){try{p="function"==typeof setTimeout?setTimeout:o}catch(e){p=o}try{f="function"==typeof clearTimeout?clearTimeout:n}catch(e){f=n}}();var h,y=[],m=!1,g=-1;d.nextTick=function(e){var t=new Array(arguments.length-1);if(arguments.length>1)for(var r=1;r<arguments.length;r++)t[r-1]=arguments[r];y.push(new c(e,t)),1!==y.length||m||i(u)},c.prototype.run=function(){this.fun.apply(null,this.array)},d.title="browser",d.browser=!0,d.env={},d.argv=[],d.version="",d.versions={},d.on=l,d.addListener=l,d.once=l,d.off=l,d.removeListener=l,d.removeAllListeners=l,d.emit=l,d.binding=function(e){throw new Error("process.binding is not supported")},d.cwd=function(){return"/"},d.chdir=function(e){throw new Error("process.chdir is not supported")},d.umask=function(){return 0}},{}],12:[function(e,t,r){"use strict";function o(e,t){return Object.prototype.hasOwnProperty.call(e,t)}t.exports=function(e,t,r,i){t=t||"&",r=r||"=";var a={};if("string"!=typeof e||0===e.length)return a;var s=/\+/g;e=e.split(t);var u=1e3;i&&"number"==typeof i.maxKeys&&(u=i.maxKeys);var c=e.length;u>0&&c>u&&(c=u);for(var l=0;l<c;++l){var p,f,d,h,y=e[l].replace(s,"%20"),m=y.indexOf(r);m>=0?(p=y.substr(0,m),f=y.substr(m+1)):(p=y,f=""),d=decodeURIComponent(p),h=decodeURIComponent(f),o(a,d)?n(a[d])?a[d].push(h):a[d]=[a[d],h]:a[d]=h}return a};var n=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)}},{}],13:[function(e,t,r){"use strict";function o(e,t){if(e.map)return e.map(t);for(var r=[],o=0;o<e.length;o++)r.push(t(e[o],o));return r}var n=function(e){switch(typeof e){case"string":return e;case"boolean":return e?"true":"false";case"number":return isFinite(e)?e:"";default:return""}};t.exports=function(e,t,r,s){return t=t||"&",r=r||"=",null===e&&(e=void 0),"object"==typeof e?o(a(e),function(a){var s=encodeURIComponent(n(a))+r;return i(e[a])?o(e[a],function(e){return s+encodeURIComponent(n(e))}).join(t):s+encodeURIComponent(n(e[a]))}).join(t):s?encodeURIComponent(n(s))+r+encodeURIComponent(n(e)):""};var i=Array.isArray||function(e){return"[object Array]"===Object.prototype.toString.call(e)},a=Object.keys||function(e){var t=[];for(var r in e)Object.prototype.hasOwnProperty.call(e,r)&&t.push(r);return t}},{}],14:[function(e,t,r){"use strict";r.decode=r.parse=e(12),r.encode=r.stringify=e(13)},{12:12,13:13}],15:[function(e,t,r){(function(r){function o(t,r,o){var i=e(1)("algoliasearch"),a=e(22),s=e(7),c=e(27),l="Usage: algoliasearch(applicationID, apiKey, opts)";if(o._allowEmptyCredentials!==!0&&!t)throw new u.AlgoliaSearchError("Please provide an application ID. "+l);if(o._allowEmptyCredentials!==!0&&!r)throw new u.AlgoliaSearchError("Please provide an API key. "+l);this.applicationID=t,this.apiKey=r,this.hosts={read:[],write:[]},o=o||{},this._timeouts=o.timeouts||{connect:1e3,read:2e3,write:3e4},o.timeout&&(this._timeouts.connect=this._timeouts.read=this._timeouts.write=o.timeout);var p=o.protocol||"https:";if(/:$/.test(p)||(p+=":"),"http:"!==p&&"https:"!==p)throw new u.AlgoliaSearchError("protocol must be `http:` or `https:` (was `"+o.protocol+"`)");if(this._checkAppIdData(),o.hosts)s(o.hosts)?(this.hosts.read=a(o.hosts),this.hosts.write=a(o.hosts)):(this.hosts.read=a(o.hosts.read),this.hosts.write=a(o.hosts.write));else{var f=c(this._shuffleResult,function(e){return t+"-"+e+".algolianet.com"}),d=(o.dsn===!1?"":"-dsn")+".algolia.net";this.hosts.read=[this.applicationID+d].concat(f),this.hosts.write=[this.applicationID+".algolia.net"].concat(f)}this.hosts.read=c(this.hosts.read,n(p)),this.hosts.write=c(this.hosts.write,n(p)),this.extraHeaders={},this.cache=o._cache||{},this._ua=o._ua,this._useCache=!(void 0!==o._useCache&&!o._cache)||o._useCache,this._useRequestCache=this._useCache&&o._useRequestCache,this._useFallback=void 0===o.useFallback||o.useFallback,this._setTimeout=o._setTimeout,i("init done, %j",this)}function n(e){return function(t){return e+"//"+t.toLowerCase()}}function i(e){if(void 0===Array.prototype.toJSON)return JSON.stringify(e);var t=Array.prototype.toJSON;delete Array.prototype.toJSON;var r=JSON.stringify(e);return Array.prototype.toJSON=t,r}function a(e){for(var t,r,o=e.length;0!==o;)r=Math.floor(Math.random()*o),o-=1,t=e[o],e[o]=e[r],e[r]=t;return e}function s(e){var t={};for(var r in e)if(Object.prototype.hasOwnProperty.call(e,r)){var o;o="x-algolia-api-key"===r||"x-algolia-application-id"===r?"**hidden for security purposes**":e[r],t[r]=o}return t}t.exports=o;var u=e(25),c=e(26),l=e(16),p=e(31),f=500,d=r.env.RESET_APP_DATA_TIMER&&parseInt(r.env.RESET_APP_DATA_TIMER,10)||12e4;o.prototype.initIndex=function(e){return new l(this,e)},o.prototype.setExtraHeader=function(e,t){this.extraHeaders[e.toLowerCase()]=t},o.prototype.getExtraHeader=function(e){return this.extraHeaders[e.toLowerCase()]},o.prototype.unsetExtraHeader=function(e){delete this.extraHeaders[e.toLowerCase()]},o.prototype.addAlgoliaAgent=function(e){var t="; "+e;this._ua.indexOf(t)===-1&&(this._ua+=t)},o.prototype._jsonRequest=function(t){function r(e,n){function c(e){var t=e&&e.body&&e.body.message&&e.body.status||e.statusCode||e&&e.body&&200;d("received response: statusCode: %s, computed statusCode: %d, headers: %j",e.statusCode,t,e.headers);var r=2===Math.floor(t/100),o=new Date;if(w.push({currentHost:A,headers:s(p),content:a||null,contentLength:void 0!==a?a.length:null,method:n.method,timeouts:n.timeouts,url:n.url,startTime:T,endTime:o,duration:o-T,statusCode:t}),r)return m._useCache&&!m._useRequestCache&&y&&(y[l]=e.responseText),{responseText:e.responseText,body:e.body};var i=4!==Math.floor(t/100);if(i)return g+=1,_();d("unrecoverable error");var c=new u.AlgoliaSearchError(e.body&&e.body.message,{debugData:w,statusCode:t});return m._promise.reject(c)}function f(e){d("error: %s, stack: %s",e.message,e.stack);var r=new Date;return w.push({currentHost:A,headers:s(p),content:a||null,contentLength:void 0!==a?a.length:null,method:n.method,timeouts:n.timeouts,url:n.url,startTime:T,endTime:r,duration:r-T}),e instanceof u.AlgoliaSearchError||(e=new u.Unknown(e&&e.message,e)),g+=1,e instanceof u.Unknown||e instanceof u.UnparsableJSON||g>=m.hosts[t.hostType].length&&(v||!b)?(e.debugData=w,m._promise.reject(e)):e instanceof u.RequestTimeout?x():_()}function _(){return d("retrying request"),m._incrementHostIndex(t.hostType),r(e,n)}function x(){return d("retrying request with higher timeout"),m._incrementHostIndex(t.hostType),m._incrementTimeoutMultipler(),n.timeouts=m._getTimeoutsForRequest(t.hostType),r(e,n)}m._checkAppIdData();var T=new Date;if(m._useCache&&!m._useRequestCache&&(l=t.url),m._useCache&&!m._useRequestCache&&a&&(l+="_body_"+n.body),o(!m._useRequestCache,y,l)){d("serving response from cache");var S=y[l];return m._promise.resolve({body:JSON.parse(S),responseText:S})}if(g>=m.hosts[t.hostType].length)return!b||v?(d("could not get any response"),m._promise.reject(new u.AlgoliaSearchError("Cannot connect to the AlgoliaSearch API. Send an email to [email protected] to report and resolve the issue. Application id was: "+m.applicationID,{debugData:w}))):(d("switching to fallback"),g=0,n.method=t.fallback.method,n.url=t.fallback.url,n.jsonBody=t.fallback.body,n.jsonBody&&(n.body=i(n.jsonBody)),p=m._computeRequestHeaders({additionalUA:h,headers:t.headers}),n.timeouts=m._getTimeoutsForRequest(t.hostType),m._setHostIndexByType(0,t.hostType),v=!0,r(m._request.fallback,n));var A=m._getHostByType(t.hostType),j=A+n.url,O={body:n.body,jsonBody:n.jsonBody,method:n.method,headers:p,timeouts:n.timeouts,debug:d,forceAuthHeaders:n.forceAuthHeaders};return d("method: %s, url: %s, headers: %j, timeouts: %d",O.method,j,O.headers,O.timeouts),e===m._request.fallback&&d("using fallback"),e.call(m,j,O).then(c,f)}function o(e,t,r){return m._useCache&&e&&t&&void 0!==t[r]}function n(e,r){return o(m._useRequestCache,y,l)&&e["catch"](function(){delete y[l]}),"function"!=typeof t.callback?e.then(r):void e.then(function(e){c(function(){t.callback(null,r(e))},m._setTimeout||setTimeout)},function(e){c(function(){t.callback(e)},m._setTimeout||setTimeout)})}this._checkAppIdData();var a,l,p,d=e(1)("algoliasearch:"+t.url),h=t.additionalUA||"",y=t.cache,m=this,g=0,v=!1,b=m._useFallback&&m._request.fallback&&t.fallback;this.apiKey.length>f&&void 0!==t.body&&(void 0!==t.body.params||void 0!==t.body.requests)?(t.body.apiKey=this.apiKey,p=this._computeRequestHeaders({additionalUA:h,withApiKey:!1,headers:t.headers})):p=this._computeRequestHeaders({additionalUA:h,headers:t.headers}),void 0!==t.body&&(a=i(t.body)),d("request start");var w=[];if(m._useCache&&m._useRequestCache&&(l=t.url),m._useCache&&m._useRequestCache&&a&&(l+="_body_"+a),o(m._useRequestCache,y,l)){d("serving request from cache");var _=y[l],x="function"!=typeof _.then?m._promise.resolve({responseText:_}):_;return n(x,function(e){return JSON.parse(e.responseText)})}var T=r(m._request,{url:t.url,method:t.method,body:a,jsonBody:t.body,timeouts:m._getTimeoutsForRequest(t.hostType),forceAuthHeaders:t.forceAuthHeaders});return m._useCache&&m._useRequestCache&&y&&(y[l]=T),n(T,function(e){return e.body})},o.prototype._getSearchParams=function(e,t){if(void 0===e||null===e)return t;for(var r in e)null!==r&&void 0!==e[r]&&e.hasOwnProperty(r)&&(t+=""===t?"":"&",t+=r+"="+encodeURIComponent("[object Array]"===Object.prototype.toString.call(e[r])?i(e[r]):e[r]));return t},o.prototype._computeRequestHeaders=function(t){var r=e(4),o=t.additionalUA?this._ua+"; "+t.additionalUA:this._ua,n={"x-algolia-agent":o,"x-algolia-application-id":this.applicationID};return t.withApiKey!==!1&&(n["x-algolia-api-key"]=this.apiKey),this.userToken&&(n["x-algolia-usertoken"]=this.userToken),this.securityTags&&(n["x-algolia-tagfilters"]=this.securityTags),r(this.extraHeaders,function(e,t){n[t]=e}),t.headers&&r(t.headers,function(e,t){n[t]=e}),n},o.prototype.search=function(t,r,o){var n=e(7),i=e(27),a="Usage: client.search(arrayOfQueries[, callback])";if(!n(t))throw new Error(a);"function"==typeof r?(o=r,r={}):void 0===r&&(r={});var s=this,u={requests:i(t,function(e){var t="";return void 0!==e.query&&(t+="query="+encodeURIComponent(e.query)),{indexName:e.indexName,params:s._getSearchParams(e.params,t)}})},c=i(u.requests,function(e,t){return t+"="+encodeURIComponent("/1/indexes/"+encodeURIComponent(e.indexName)+"?"+e.params)}).join("&"),l="/1/indexes/*/queries";return void 0!==r.strategy&&(u.strategy=r.strategy),this._jsonRequest({cache:this.cache,method:"POST",url:l,body:u,hostType:"read",fallback:{method:"GET",url:"/1/indexes/*",body:{params:c}},callback:o})},o.prototype.searchForFacetValues=function(t){var r=e(7),o=e(27),n="Usage: client.searchForFacetValues([{indexName, params: {facetName, facetQuery, ...params}}, ...queries])";if(!r(t))throw new Error(n);var i=this;return i._promise.all(o(t,function(t){if(!t||void 0===t.indexName||void 0===t.params.facetName||void 0===t.params.facetQuery)throw new Error(n);var r=e(22),o=e(29),a=t.indexName,s=t.params,u=s.facetName,c=o(r(s),function(e){return"facetName"===e}),l=i._getSearchParams(c,"");return i._jsonRequest({cache:i.cache,method:"POST",url:"/1/indexes/"+encodeURIComponent(a)+"/facets/"+encodeURIComponent(u)+"/query",hostType:"read",body:{params:l}})}))},o.prototype.setSecurityTags=function(e){if("[object Array]"===Object.prototype.toString.call(e)){for(var t=[],r=0;r<e.length;++r)if("[object Array]"===Object.prototype.toString.call(e[r])){for(var o=[],n=0;n<e[r].length;++n)o.push(e[r][n]);t.push("("+o.join(",")+")")}else t.push(e[r]);e=t.join(",")}this.securityTags=e},o.prototype.setUserToken=function(e){this.userToken=e},o.prototype.clearCache=function(){this.cache={}},o.prototype.setRequestTimeout=function(e){e&&(this._timeouts.connect=this._timeouts.read=this._timeouts.write=e)},o.prototype.setTimeouts=function(e){this._timeouts=e},o.prototype.getTimeouts=function(){return this._timeouts},o.prototype._getAppIdData=function(){var e=p.get(this.applicationID);return null!==e&&this._cacheAppIdData(e),e},o.prototype._setAppIdData=function(e){return e.lastChange=(new Date).getTime(),this._cacheAppIdData(e),p.set(this.applicationID,e)},o.prototype._checkAppIdData=function(){var e=this._getAppIdData(),t=(new Date).getTime();return null===e||t-e.lastChange>d?this._resetInitialAppIdData(e):e},o.prototype._resetInitialAppIdData=function(e){var t=e||{};return t.hostIndexes={read:0,write:0},t.timeoutMultiplier=1,t.shuffleResult=t.shuffleResult||a([1,2,3]),this._setAppIdData(t)},o.prototype._cacheAppIdData=function(e){this._hostIndexes=e.hostIndexes,this._timeoutMultiplier=e.timeoutMultiplier,this._shuffleResult=e.shuffleResult},o.prototype._partialAppIdDataUpdate=function(t){var r=e(4),o=this._getAppIdData();return r(t,function(e,t){o[t]=e}),this._setAppIdData(o)},o.prototype._getHostByType=function(e){return this.hosts[e][this._getHostIndexByType(e)]},o.prototype._getTimeoutMultiplier=function(){return this._timeoutMultiplier},o.prototype._getHostIndexByType=function(e){return this._hostIndexes[e]},o.prototype._setHostIndexByType=function(t,r){var o=e(22),n=o(this._hostIndexes);return n[r]=t,this._partialAppIdDataUpdate({hostIndexes:n}),t},o.prototype._incrementHostIndex=function(e){return this._setHostIndexByType((this._getHostIndexByType(e)+1)%this.hosts[e].length,e)},o.prototype._incrementTimeoutMultipler=function(){var e=Math.max(this._timeoutMultiplier+1,4);return this._partialAppIdDataUpdate({timeoutMultiplier:e})},o.prototype._getTimeoutsForRequest=function(e){return{connect:this._timeouts.connect*this._timeoutMultiplier,complete:this._timeouts[e]*this._timeoutMultiplier}}}).call(this,e(11))},{1:1,11:11,16:16,22:22,25:25,26:26,27:27,29:29,31:31,4:4,7:7}],16:[function(e,t,r){function o(e,t){this.indexName=t,this.as=e,this.typeAheadArgs=null,this.typeAheadValueOption=null,this.cache={}}var n=e(21),i=e(23),a=e(24);t.exports=o,o.prototype.clearCache=function(){this.cache={}},o.prototype.search=n("query"),o.prototype.similarSearch=i(n("similarQuery"),a("index.similarSearch(query[, callback])","index.search({ similarQuery: query }[, callback])")),o.prototype.browse=function(t,r,o){var n,i,a=e(28),s=this;0===arguments.length||1===arguments.length&&"function"==typeof arguments[0]?(n=0,o=arguments[0],t=void 0):"number"==typeof arguments[0]?(n=arguments[0],"number"==typeof arguments[1]?i=arguments[1]:"function"==typeof arguments[1]&&(o=arguments[1],i=void 0),t=void 0,r=void 0):"object"==typeof arguments[0]?("function"==typeof arguments[1]&&(o=arguments[1]),r=arguments[0],t=void 0):"string"==typeof arguments[0]&&"function"==typeof arguments[1]&&(o=arguments[1],r=void 0),r=a({},r||{},{page:n,hitsPerPage:i,query:t});var u=this.as._getSearchParams(r,"");return this.as._jsonRequest({method:"POST",url:"/1/indexes/"+encodeURIComponent(s.indexName)+"/browse",body:{params:u},hostType:"read",callback:o})},o.prototype.browseFrom=function(e,t){return this.as._jsonRequest({method:"POST",url:"/1/indexes/"+encodeURIComponent(this.indexName)+"/browse",body:{cursor:e},hostType:"read",callback:t})},o.prototype.searchForFacetValues=function(t,r){var o=e(22),n=e(29),i="Usage: index.searchForFacetValues({facetName, facetQuery, ...params}[, callback])";if(void 0===t.facetName||void 0===t.facetQuery)throw new Error(i);var a=t.facetName,s=n(o(t),function(e){return"facetName"===e}),u=this.as._getSearchParams(s,"");return this.as._jsonRequest({method:"POST",url:"/1/indexes/"+encodeURIComponent(this.indexName)+"/facets/"+encodeURIComponent(a)+"/query",hostType:"read",body:{params:u},callback:r})},o.prototype.searchFacet=i(function(e,t){return this.searchForFacetValues(e,t)},a("index.searchFacet(params[, callback])","index.searchForFacetValues(params[, callback])")),o.prototype._search=function(e,t,r,o){return this.as._jsonRequest({cache:this.cache,method:"POST",url:t||"/1/indexes/"+encodeURIComponent(this.indexName)+"/query",body:{params:e},hostType:"read",fallback:{method:"GET",url:"/1/indexes/"+encodeURIComponent(this.indexName),body:{params:e}},callback:r,additionalUA:o})},o.prototype.getObject=function(e,t,r){var o=this;1!==arguments.length&&"function"!=typeof t||(r=t,t=void 0);var n="";if(void 0!==t){n="?attributes=";for(var i=0;i<t.length;++i)0!==i&&(n+=","),n+=t[i]}return this.as._jsonRequest({method:"GET",url:"/1/indexes/"+encodeURIComponent(o.indexName)+"/"+encodeURIComponent(e)+n,hostType:"read",callback:r})},o.prototype.getObjects=function(t,r,o){var n=e(7),i=e(27),a="Usage: index.getObjects(arrayOfObjectIDs[, callback])";if(!n(t))throw new Error(a);var s=this;1!==arguments.length&&"function"!=typeof r||(o=r,
r=void 0);var u={requests:i(t,function(e){var t={indexName:s.indexName,objectID:e};return r&&(t.attributesToRetrieve=r.join(",")),t})};return this.as._jsonRequest({method:"POST",url:"/1/indexes/*/objects",hostType:"read",body:u,callback:o})},o.prototype.as=null,o.prototype.indexName=null,o.prototype.typeAheadArgs=null,o.prototype.typeAheadValueOption=null},{21:21,22:22,23:23,24:24,27:27,28:28,29:29,7:7}],17:[function(e,t,r){"use strict";var o=e(15),n=e(18);t.exports=n(o,"Browser (lite)")},{15:15,18:18}],18:[function(e,t,r){(function(r){"use strict";var o=e(5),n=o.Promise||e(3).Promise;t.exports=function(t,i){function a(t,r,o){var n=e(22);return o=n(o||{}),o._ua=o._ua||a.ua,new s(t,r,o)}function s(){t.apply(this,arguments)}var u=e(6),c=e(25),l=e(19),p=e(20),f=e(30);i=i||"","debug"===r.env.NODE_ENV&&e(1).enable("algoliasearch*"),a.version=e(32),a.ua="Algolia for JavaScript ("+a.version+"); "+i,a.initPlaces=f(a),o.__algolia={debug:e(1),algoliasearch:a};var d={hasXMLHttpRequest:"XMLHttpRequest"in o,hasXDomainRequest:"XDomainRequest"in o};return d.hasXMLHttpRequest&&(d.cors="withCredentials"in new XMLHttpRequest),u(s,t),s.prototype._request=function(e,t){return new n(function(r,o){function n(){if(!h){clearTimeout(f);var e;try{e={body:JSON.parse(m.responseText),responseText:m.responseText,statusCode:m.status,headers:m.getAllResponseHeaders&&m.getAllResponseHeaders()||{}}}catch(t){e=new c.UnparsableJSON({more:m.responseText})}e instanceof c.UnparsableJSON?o(e):r(e)}}function i(e){h||(clearTimeout(f),o(new c.Network({more:e})))}function a(){h=!0,m.abort(),o(new c.RequestTimeout)}function s(){g=!0,clearTimeout(f),f=setTimeout(a,t.timeouts.complete)}function u(){g||s()}function p(){!g&&m.readyState>1&&s()}if(!d.cors&&!d.hasXDomainRequest)return void o(new c.Network("CORS not supported"));e=l(e,t.headers);var f,h,y=t.body,m=d.cors?new XMLHttpRequest:new XDomainRequest,g=!1;f=setTimeout(a,t.timeouts.connect),m.onprogress=u,"onreadystatechange"in m&&(m.onreadystatechange=p),m.onload=n,m.onerror=i,m instanceof XMLHttpRequest?(m.open(t.method,e,!0),t.forceAuthHeaders&&(m.setRequestHeader("x-algolia-application-id",t.headers["x-algolia-application-id"]),m.setRequestHeader("x-algolia-api-key",t.headers["x-algolia-api-key"]))):m.open(t.method,e),d.cors&&(y&&("POST"===t.method?m.setRequestHeader("content-type","application/x-www-form-urlencoded"):m.setRequestHeader("content-type","application/json")),m.setRequestHeader("accept","application/json")),y?m.send(y):m.send()})},s.prototype._request.fallback=function(e,t){return e=l(e,t.headers),new n(function(r,o){p(e,t,function(e,t){return e?void o(e):void r(t)})})},s.prototype._promise={reject:function(e){return n.reject(e)},resolve:function(e){return n.resolve(e)},delay:function(e){return new n(function(t){setTimeout(t,e)})},all:function(e){return n.all(e)}},a}}).call(this,e(11))},{1:1,11:11,19:19,20:20,22:22,25:25,3:3,30:30,32:32,5:5,6:6}],19:[function(e,t,r){"use strict";function o(e,t){return e+=/\?/.test(e)?"&":"?",e+n(t)}t.exports=o;var n=e(13)},{13:13}],20:[function(e,t,r){"use strict";function o(e,t,r){function o(){t.debug("JSONP: success"),m||f||(m=!0,p||(t.debug("JSONP: Fail. Script loaded but did not call the callback"),s(),r(new n.JSONPScriptFail)))}function a(){"loaded"!==this.readyState&&"complete"!==this.readyState||o()}function s(){clearTimeout(g),h.onload=null,h.onreadystatechange=null,h.onerror=null,d.removeChild(h)}function u(){try{delete window[y],delete window[y+"_loaded"]}catch(e){window[y]=window[y+"_loaded"]=void 0}}function c(){t.debug("JSONP: Script timeout"),f=!0,s(),r(new n.RequestTimeout)}function l(){t.debug("JSONP: Script error"),m||f||(s(),r(new n.JSONPScriptError))}if("GET"!==t.method)return void r(new Error("Method "+t.method+" "+e+" is not supported by JSONP."));t.debug("JSONP: start");var p=!1,f=!1;i+=1;var d=document.getElementsByTagName("head")[0],h=document.createElement("script"),y="algoliaJSONP_"+i,m=!1;window[y]=function(e){return u(),f?void t.debug("JSONP: Late answer, ignoring"):(p=!0,s(),void r(null,{body:e,responseText:JSON.stringify(e)}))},e+="&callback="+y,t.jsonBody&&t.jsonBody.params&&(e+="&"+t.jsonBody.params);var g=setTimeout(c,t.timeouts.complete);h.onreadystatechange=a,h.onload=o,h.onerror=l,h.async=!0,h.defer=!0,h.src=e,d.appendChild(h)}t.exports=o;var n=e(25),i=0},{25:25}],21:[function(e,t,r){function o(e,t){return function(r,o,i){if("function"==typeof r&&"object"==typeof o||"object"==typeof i)throw new n.AlgoliaSearchError("index.search usage is index.search(query, params, cb)");0===arguments.length||"function"==typeof r?(i=r,r=""):1!==arguments.length&&"function"!=typeof o||(i=o,o=void 0),"object"==typeof r&&null!==r?(o=r,r=void 0):void 0!==r&&null!==r||(r="");var a="";void 0!==r&&(a+=e+"="+encodeURIComponent(r));var s;return void 0!==o&&(o.additionalUA&&(s=o.additionalUA,delete o.additionalUA),a=this.as._getSearchParams(o,a)),this._search(a,t,i,s)}}t.exports=o;var n=e(25)},{25:25}],22:[function(e,t,r){t.exports=function(e){return JSON.parse(JSON.stringify(e))}},{}],23:[function(e,t,r){t.exports=function(e,t){function r(){return o||(console.warn(t),o=!0),e.apply(this,arguments)}var o=!1;return r}},{}],24:[function(e,t,r){t.exports=function(e,t){var r=e.toLowerCase().replace(/[\.\(\)]/g,"");return"algoliasearch: `"+e+"` was replaced by `"+t+"`. Please see https://github.com/algolia/algoliasearch-client-javascript/wiki/Deprecated#"+r}},{}],25:[function(e,t,r){"use strict";function o(t,r){var o=e(4),n=this;"function"==typeof Error.captureStackTrace?Error.captureStackTrace(this,this.constructor):n.stack=(new Error).stack||"Cannot get a stacktrace, browser is too old",this.name="AlgoliaSearchError",this.message=t||"Unknown error",r&&o(r,function(e,t){n[t]=e})}function n(e,t){function r(){var r=Array.prototype.slice.call(arguments,0);"string"!=typeof r[0]&&r.unshift(t),o.apply(this,r),this.name="AlgoliaSearch"+e+"Error"}return i(r,o),r}var i=e(6);i(o,Error),t.exports={AlgoliaSearchError:o,UnparsableJSON:n("UnparsableJSON","Could not parse the incoming response as JSON, see err.more for details"),RequestTimeout:n("RequestTimeout","Request timed out before getting a response"),Network:n("Network","Network issue, see err.more for details"),JSONPScriptFail:n("JSONPScriptFail","<script> was loaded but did not call our provided callback"),ValidUntilNotFound:n("ValidUntilNotFound","The SecuredAPIKey does not have a validUntil parameter."),JSONPScriptError:n("JSONPScriptError","<script> unable to load due to an `error` event on it"),ObjectNotFound:n("ObjectNotFound","Object not found"),Unknown:n("Unknown","Unknown error occured")}},{4:4,6:6}],26:[function(e,t,r){t.exports=function(e,t){t(e,0)}},{}],27:[function(e,t,r){var o=e(4);t.exports=function(e,t){var r=[];return o(e,function(o,n){r.push(t(o,n,e))}),r}},{4:4}],28:[function(e,t,r){var o=e(4);t.exports=function n(e){var t=Array.prototype.slice.call(arguments);return o(t,function(t){for(var r in t)t.hasOwnProperty(r)&&("object"==typeof e[r]&&"object"==typeof t[r]?e[r]=n({},e[r],t[r]):void 0!==t[r]&&(e[r]=t[r]))}),e}},{4:4}],29:[function(e,t,r){t.exports=function(t,r){var o=e(9),n=e(4),i={};return n(o(t),function(e){r(e)!==!0&&(i[e]=t[e])}),i}},{4:4,9:9}],30:[function(e,t,r){function o(t){return function(r,o,a){var s=e(22);a=a&&s(a)||{},a.hosts=a.hosts||["places-dsn.algolia.net","places-1.algolianet.com","places-2.algolianet.com","places-3.algolianet.com"],0!==arguments.length&&"object"!=typeof r&&void 0!==r||(r="",o="",a._allowEmptyCredentials=!0);var u=t(r,o,a),c=u.initIndex("places");return c.search=i("query","/1/places/query"),c.reverse=function(e,t){var r=n.encode(e);return this.as._jsonRequest({method:"GET",url:"/1/places/reverse?"+r,hostType:"read",callback:t})},c.getObject=function(e,t){return this.as._jsonRequest({method:"GET",url:"/1/places/"+encodeURIComponent(e),hostType:"read",callback:t})},c}}t.exports=o;var n=e(14),i=e(21)},{14:14,21:21,22:22}],31:[function(e,t,r){(function(r){function o(e,t){return u("localStorage failed with",t),a(),s=l,s.get(e)}function n(e,t){return 1===arguments.length?s.get(e):s.set(e,t)}function i(){try{return"localStorage"in r&&null!==r.localStorage&&(r.localStorage[c]||r.localStorage.setItem(c,JSON.stringify({})),!0)}catch(e){return!1}}function a(){try{r.localStorage.removeItem(c)}catch(e){}}var s,u=e(1)("algoliasearch:src/hostIndexState.js"),c="algoliasearch-client-js",l={state:{},set:function(e,t){return this.state[e]=t,this.state[e]},get:function(e){return this.state[e]||null}},p={set:function(e,t){l.set(e,t);try{var n=JSON.parse(r.localStorage[c]);return n[e]=t,r.localStorage[c]=JSON.stringify(n),n[e]}catch(i){return o(e,i)}},get:function(e){try{return JSON.parse(r.localStorage[c])[e]||null}catch(t){return o(e,t)}}};s=i()?p:l,t.exports={get:n,set:n,supportsLocalStorage:i}}).call(this,"undefined"!=typeof global?global:"undefined"!=typeof self?self:"undefined"!=typeof window?window:{})},{1:1}],32:[function(e,t,r){"use strict";t.exports="3.35.1"},{}]},{},[17])(17)}); | zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/browser/static/algoliasearchLite.min.js | algoliasearchLite.min.js |
Adding and updating locales
---------------------------
For every language you want to translate into you need a
locales/[language]/LC_MESSAGES/collective.task.po
(e.g. locales/de/LC_MESSAGES/collective.task.po)
For German
.. code-block:: console
$ mkdir de
For updating locales
.. code-block:: console
$ ./bin/update_locale
Note
----
The script uses gettext package for internationalization.
Install it before running the script.
On macOS
--------
.. code-block:: console
$ brew install gettext
On Windows
----------
see https://mlocati.github.io/articles/gettext-iconv-windows.html
| zopyx.typesense | /zopyx.typesense-1.0.0a8.tar.gz/zopyx.typesense-1.0.0a8/src/zopyx/typesense/locales/README.rst | README.rst |
Using the development buildout
==============================
Create a virtualenv in the package::
$ virtualenv --clear .
Install requirements with pip::
$ ./bin/pip install -r requirements.txt
Run buildout::
$ ./bin/buildout
Start Plone in foreground:
$ ./bin/instance fg
Running tests
-------------
$ tox
list all tox environments:
$ tox -l
py27-Plone43
py27-Plone51
py27-Plone52
py37-Plone52
build_instance
code-analysis
lint-py27
lint-py37
coverage-report
run a specific tox env:
$ tox -e py37-Plone52
| zopyx.usersascontent | /zopyx.usersascontent-1.0a1.tar.gz/zopyx.usersascontent-1.0a1/DEVELOP.rst | DEVELOP.rst |
# zopyx.usersascontent
This add-on provides a new content-types `PloneUser` for representing a user in
Plone through a dedicated content-type. The motivation for this add-on is the
need for maintaining user profiles a content in order to make them referencable
from other content.
The `PloneUser` provides the standard fields like fullname, email organization,
a member picture etc. and a dedicated view.
The add-on also contains some ideas borrowed from `collective.onlogin` with
configurable redirections to a user's `PloneUser` object directly after the
first login after registration or after each login.
`zopyx.usersascountent` is designed as a lightweight alternative to Membrane & Co.
The add-on provides a control panel `Users As Content`.
## Requirements:
- Plone 6, Python 3.8+
- (untested with Plone 5.2)
## Installation
Install zopyx.usersascontent by adding it to your buildout:
[buildout]
...
eggs =
zopyx.usersascontent
and then running ``bin/buildout``
## Contribute
- Issue Tracker: https://github.com/zopyx/zopyx.usersascontent/issues
- Source Code: https://github.com/zopyx/zopyx.usersascontent
## License
The project is licensed under the GPLv2.
| zopyx.usersascontent | /zopyx.usersascontent-1.0a1.tar.gz/zopyx.usersascontent-1.0a1/README.md | README.md |
zopyx.usersascontent Copyright 2021, Andreas Jung
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License version 2
as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA.
| zopyx.usersascontent | /zopyx.usersascontent-1.0a1.tar.gz/zopyx.usersascontent-1.0a1/LICENSE.rst | LICENSE.rst |
from plone import api
from datetime import datetime
from Products.CMFPlone.interfaces import IRedirectAfterLogin
from Products.CMFPlone.utils import safe_unicode
from zope.interface import implementer
from zopyx.usersascontent.interfaces import IUsersAsContentSettings
@implementer(IRedirectAfterLogin)
class RedirectAfterLoginAdapter(object):
def __init__(self, context, request):
self.context = context
self.request = request
def create_and_get_user_folder(self):
portal = api.portal.get()
user = api.user.get_current()
user_id = user.getId()
user_folder_id = api.portal.get_registry_record(
"user_folder_id", IUsersAsContentSettings
)
with api.env.adopt_roles(roles=["Manager"]):
if user_folder_id not in portal.objectIds():
uf = api.content.create(
type="Folder", id=user_folder_id, title="Users", container=portal
)
api.content.transition(uf, "publish")
uf = portal[user_folder_id]
if user_id not in uf.objectIds():
api.portal.show_message(
"We created a new user object for you", self.request
)
user_obj = api.content.create(
type="PloneUser", id=user_id, title=user_id, container=uf
)
return portal[user_folder_id][user_id]
def __call__(self, came_from=None, is_initial_login=False):
user_obj = self.create_and_get_user_folder()
# first login?
if not user_obj.first_login:
user_obj.first_login = datetime.utcnow()
user_obj.last_login = datetime.utcnow()
redirect_after_registration= api.portal.get_registry_record(
"redirect_after_registration", IUsersAsContentSettings
)
if redirect_after_registration:
return user_obj.absolute_url()
# update login dates
user_obj.last_login = datetime.utcnow()
redirect_always = api.portal.get_registry_record(
"redirect_always", IUsersAsContentSettings
)
if redirect_always:
return user_obj.absolute_url()
came_from = self.request.get("came_from")
if came_from:
return came_from
return self.context.absolute_url() | zopyx.usersascontent | /zopyx.usersascontent-1.0a1.tar.gz/zopyx.usersascontent-1.0a1/src/zopyx/usersascontent/adapters.py | adapters.py |
Adding and updating locales
---------------------------
For every language you want to translate into you need a
locales/[language]/LC_MESSAGES/collective.task.po
(e.g. locales/de/LC_MESSAGES/collective.task.po)
For German
.. code-block:: console
$ mkdir de
For updating locales
.. code-block:: console
$ ./bin/update_locale
Note
----
The script uses gettext package for internationalization.
Install it before running the script.
On macOS
--------
.. code-block:: console
$ brew install gettext
On Windows
----------
see https://mlocati.github.io/articles/gettext-iconv-windows.html
| zopyx.usersascontent | /zopyx.usersascontent-1.0a1.tar.gz/zopyx.usersascontent-1.0a1/src/zopyx/usersascontent/locales/README.rst | README.rst |
zopyx_gridfs
============
This is a tiny GridFS (MongoDB) to web gateway based on the Pyramid
web framework.
Installation
============
* requires Python 2.6
* create a virtualized environment using virtualenv
* inside the virtualenv environment::
bin/easy_install zopyx_gridfs
* Create a ``server.ini`` configuration file containing::
[app:zopyx_gridfs]
use = egg:zopyx_gridfs
reload_templates = true
debug_authorization = false
debug_notfound = false
debug_routematch = false
debug_templates = true
default_locale_name = en
# MongoDB specific configurations
mongo_host = localhost
mongo_port = 27017
database = test
[pipeline:main]
pipeline =
egg:WebError#evalerror
zopyx_gridfs
[server:main]
use = egg:Paste#http
host = 0.0.0.0
port = 6543
# Begin logging configuration
[loggers]
keys = root
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = INFO
handlers = console
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
* Start the GridFS server using (this will start a HTTP
server on localhost:6543)::
bin/paster serve server.ini
Usage:
======
Downloading files
-----------------
You can access files stored inside GridFS through HTTP::
http://localhost:6543/<collection>/<filename>
where ``collection`` refers to collection used by GridFS
and ``filename`` is the ``_id`` used to store the file
inside GridFS.
The server will return a 404 response if the requested file could not be found.
Uploading files
---------------
For uploading files into a collection you need to use the
build-in upload form:
http://localhost:6543/<collection>/upload_form
Author
======
| ZOPYX Limited
| c/o Andreas Jung
| Charlottenstr. 37/1
| D-72070 Tuebingen, Germany
| www.zopyx.com
| [email protected]
| zopyx_gridfs | /zopyx_gridfs-0.3.0.tar.gz/zopyx_gridfs-0.3.0/README.txt | README.txt |
import logging
import os
import secrets
from flask import Blueprint, current_app, request
from werkzeug.exceptions import Forbidden
from . import settings
bp = Blueprint("csrf", __name__)
log = logging.getLogger(__name__)
# NOTE: We can't make these configurable until we have a way to pass settings
# to the frontend in Kubeflow Web Apps (e.g., a `/settings` endpoint).
CSRF_COOKIE = "XSRF-TOKEN"
CSRF_HEADER = "X-" + CSRF_COOKIE
SAMESITE_VALUES = ["Strict", "Lax", "None"]
def set_cookie(resp):
"""
Sets a new CSRF protection cookie to the response. The backend should call
this function every time it serves the index endpoint (`index.html`), in
order to refresh the cookie.
- The frontend should be able to read this cookie: HttpOnly=False
- The cookie should only be sent with HTTPS: Secure=True
- The cookie should only live in the app's path and not in the entire
domain. Path={app.prefix}
Finally, disable caching for the endpoint that calls this function, which
should be the index endpoint.
"""
cookie = secrets.token_urlsafe(32)
secure = settings.SECURE_COOKIES
if not secure:
log.info("Not setting Secure in CSRF cookie.")
samesite = os.getenv("CSRF_SAMESITE", "Strict")
if samesite not in SAMESITE_VALUES:
samesite = "Strict"
resp.set_cookie(key=CSRF_COOKIE, value=cookie, samesite=samesite,
httponly=False, secure=secure,
path=current_app.config["PREFIX"])
# Don't cache a response that sets a CSRF cookie
no_cache = "no-cache, no-store, must-revalidate, max-age=0"
resp.headers["Cache-Control"] = no_cache
@bp.before_app_request
def check_endpoint():
safe_methods = ["GET", "HEAD", "OPTIONS", "TRACE"]
if request.method in safe_methods:
log.info("Skipping CSRF check for safe method: %s", request.method)
return
log.debug("Ensuring endpoint is CSRF protected: %s", request.path)
if CSRF_COOKIE not in request.cookies:
raise Forbidden("Could not find CSRF cookie %s in the request."
% CSRF_COOKIE)
if CSRF_HEADER not in request.headers:
raise Forbidden("Could not detect CSRF protection header %s."
% CSRF_HEADER)
header_token = request.headers[CSRF_HEADER]
cookie_token = request.cookies[CSRF_COOKIE]
if header_token != cookie_token:
raise Forbidden("CSRF check failed. Token in cookie %s doesn't match "
"token in header %s." % (CSRF_COOKIE, CSRF_HEADER))
return | zoracloud | /crud_backend/csrf.py | csrf.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.