code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import itertools
from zfstools.util import simplify
from zfstools.models import Snapshot
import warnings
# it is time to determine which datasets need to be synced
# we walk the entire dataset structure, and sync snapshots recursively
def recursive_replicate(s, d):
sched = []
# we first collect all snapshot names, to later see if they are on both sides, one side, or what
all_snapshots = []
if s: all_snapshots.extend(s.get_snapshots())
if d: all_snapshots.extend(d.get_snapshots())
all_snapshots = [ y[1] for y in sorted([ (x.get_property('creation'), x.name) for x in all_snapshots ]) ]
snapshot_pairs = []
for snap in all_snapshots:
try: ssnap = s.get_snapshot(snap)
except (KeyError, AttributeError): ssnap = None
try: dsnap = d.get_snapshot(snap)
except (KeyError, AttributeError): dsnap = None
# if the source snapshot exists and is not already in the table of snapshots
# then pair it up with its destination snapshot (if it exists) or None
# and add it to the table of snapshots
if ssnap and not snap in [ x[0].name for x in snapshot_pairs ]:
snapshot_pairs.append((ssnap, dsnap))
# now we have a list of all snapshots, paired up by name, and in chronological order
# (it's quadratic complexity, but who cares)
# now we need to find the snapshot pair that happens to be the the most recent common pair
found_common_pair = False
for idx, (m, n) in enumerate(snapshot_pairs):
if m and n and m.name == n.name:
found_common_pair = idx
# we have combed through the snapshot pairs
# time to check what the latest common pair is
if not s.get_snapshots():
if d is None:
# well, no snapshots in source, just create a stub in the target
sched.append(("create_stub", s, d, None, None))
elif found_common_pair is False:
# no snapshot is in common, problem!
# theoretically destroying destination dataset and resyncing it recursively would work
# but this requires work in the optimizer that comes later
if d is not None and d.get_snapshots():
warnings.warn("Asked to replicate %s into %s but %s has snapshots and both have no snapshots in common!" % (s, d, d))
# see source snapshots
full_source_snapshots = [ y[1] for y in sorted([ (x.get_property('creation'), x) for x in s.get_snapshots() ]) ]
# send first snapshot as full snapshot
sched.append(("full", s, d, None, full_source_snapshots[0]))
if len(full_source_snapshots) > 1:
# send other snapshots as incremental snapshots
sched.append(("incremental", s, d, full_source_snapshots[0], full_source_snapshots[-1]))
elif found_common_pair == len(snapshot_pairs) - 1:
# the latest snapshot of both datasets that is common to both, is the latest snapshot in the source
# we have nothing to do here because the datasets are "in sync"
pass
else:
# the source dataset has more recent snapshots, not present in the destination dataset
# we need to transfer those
snapshots_to_transfer = [ x[0] for x in snapshot_pairs[found_common_pair:] ]
for n, x in enumerate(snapshots_to_transfer):
if n == 0: continue
sched.append(("incremental", s, d, snapshots_to_transfer[n - 1], x))
# now let's apply the same argument to the children
children_sched = []
for c in [ x for x in s.children if not isinstance(x, Snapshot) ]:
try: cd = d.get_child(c.name)
except (KeyError, AttributeError): cd = None
children_sched.extend(recursive_replicate(c, cd))
# and return our schedule of operations to the parent
return sched + children_sched
def optimize_coalesce(operation_schedule):
# now let's optimize the operation schedule
# this optimization is quite basic
# step 1: coalesce contiguous operations on the same file system
operations_grouped_by_source = itertools.groupby(
operation_schedule,
lambda op: op[1]
)
new = []
for _, opgroup in [ (x, list(y)) for x, y in operations_grouped_by_source ]:
if not opgroup: # empty opgroup
continue
if opgroup[0][0] == 'full': # full operations
new.extend(opgroup)
elif opgroup[0][0] == 'create_stub': # create stub operations
new.extend(opgroup)
elif opgroup[0][0] == 'incremental': # incremental
# 1->2->3->4 => 1->4
new_ops = [ (srcs, dsts) for _, _, _, srcs, dsts in opgroup ]
new_ops = simplify(new_ops)
for srcs, dsts in new_ops:
new.append(tuple(opgroup[0][:3] + (srcs, dsts)))
else:
assert 0, "not reached: unknown operation type in %s" % opgroup
return new
def optimize_recursivize(operation_schedule):
def recurse(dataset, func):
results = []
results.append((dataset, func(dataset)))
results.extend([ x for child in dataset.children if child.__class__ != Snapshot for x in recurse(child, func) ])
return results
def zero_out_sched(dataset):
dataset._ops_schedule = []
def evict_sched(dataset):
dataset._ops_schedule = []
operations_grouped_by_source = itertools.groupby(
operation_schedule,
lambda op: op[1]
)
operations_grouped_by_source = [ (x, list(y)) for x, y in operations_grouped_by_source ]
roots = set()
for root, opgroup in operations_grouped_by_source:
while root.parent is not None:
root = root.parent
roots.add(root)
for root in roots:
recurse(root, zero_out_sched)
for source, opgroup in operations_grouped_by_source:
source._ops_schedule = opgroup
def compare(*ops_schedules):
assert len(ops_schedules), "operations schedules cannot be empty: %r" % ops_schedules
# in the case of the list of operations schedules being just one (no children)
# we return True, cos it's safe to recursively replicate this one
if len(ops_schedules) == 1:
return True
# now let's check that all ops schedules are the same length
# otherwise they are not the same and we can say the comparison isn't the same
lens = set([ len(o) for o in ops_schedules ])
if len(lens) != 1:
return False
# we have multiple schedules
# if their type, snapshot origin and snapshot destination are all the same
# we can say that they are "the same"
comparisons = [
all([
# never attempt to recursivize operations who involve create_stub
all(["create_stub" not in o[0] for o in ops]),
len(set([o[0] for o in ops])) == 1,
any([o[3] is None for o in ops]) or len(set([o[3].name for o in ops])) == 1,
any([o[4] is None for o in ops]) or len(set([o[4].name for o in ops])) == 1,
])
for ops
in zip(*ops_schedules)
]
return all(comparisons)
# remove unnecessary stubs that stand in for only other stubs
for root in roots:
for dataset, _ in recurse(root, lambda d: d):
ops = [z for x, y in recurse(dataset, lambda d: d._ops_schedule) for z in y]
if all([o[0] == 'create_stub' for o in ops]):
dataset._ops_schedule = []
for root in roots:
for dataset, _ in recurse(root, lambda d: d):
if compare(*[y for x, y in recurse(dataset, lambda d: d._ops_schedule)]):
old_ops_schedule = dataset._ops_schedule
recurse(dataset, zero_out_sched)
for op in old_ops_schedule:
dataset._ops_schedule.append((
op[0] + "_recursive", op[1], op[2], op[3], op[4]
))
new_operation_schedule = []
for root in roots:
for dataset, ops_schedule in recurse(root, lambda d: d._ops_schedule):
new_operation_schedule.extend(ops_schedule)
for root in roots:
recurse(root, evict_sched)
return new_operation_schedule
def optimize(operation_schedule, allow_recursivize = True):
operation_schedule = optimize_coalesce(operation_schedule)
if allow_recursivize:
operation_schedule = optimize_recursivize(operation_schedule)
return operation_schedule
# we walk the entire dataset structure, and sync snapshots recursively
def recursive_clear_obsolete(s, d):
sched = []
# we first collect all snapshot names, to later see if they are on both sides, one side, or what
all_snapshots = []
snapshots_in_src = set([ m.name for m in s.get_snapshots() ])
snapshots_in_dst = set([ m.name for m in d.get_snapshots() ])
snapshots_to_delete = snapshots_in_dst - snapshots_in_src
snapshots_to_delete = [ d.get_snapshot(m) for m in snapshots_to_delete ]
for m in snapshots_to_delete:
sched.append(("destroy", m))
# now let's apply the same argument to the children
children_sched = []
for child_d in [ x for x in d.children if not isinstance(x, Snapshot) ]:
child_s = None
try:
child_s = s.get_child(child_d.name)
except (KeyError, AttributeError):
children_sched.append(("destroy_recursively", child_d))
if child_s:
children_sched.extend(recursive_clear_obsolete(child_s, child_d))
# and return our schedule of operations to the parent
return sched + children_sched | zfs-tools-zbackup | /zfs-tools-zbackup-0.5.1.tar.gz/zfs-tools-zbackup-0.5.1/src/zfstools/sync.py | sync.py |
from collections import OrderedDict
class Dataset(object):
name = None
children = None
_properties = None
parent = None
invalidated = False
def __init__(self, name, parent=None):
self.name = name
self.children = []
self._properties = {}
if parent:
self.parent = parent
self.parent.add_child(self)
def add_child(self, child):
self.children.append(child)
return child
def get_child(self, name):
child = [ c for c in self.children if c.name == name and isinstance(c, Dataset) ]
assert len(child) < 2
if not child: raise KeyError, name
return child[0]
def get_snapshots(self, flt=True):
if flt is True: flt = lambda x:True
children = [ c for c in self.children if isinstance(c, Snapshot) and flt(c) ]
return children
def get_snapshot(self, name):
children = [ c for c in self.get_snapshots() if c.name == name ]
assert len(children) < 2
if not children: raise KeyError, name
return children[0]
def lookup(self, name): # FINISH THIS
if "@" in name:
path, snapshot = name.split("@")
else:
path = name
snapshot = None
if "/" not in path:
try: dset = self.get_child(path)
except KeyError: raise KeyError, "No such dataset %s at %s" % (path, self.get_path())
if snapshot:
try: dset = dset.get_snapshot(snapshot)
except KeyError: raise KeyError, "No such snapshot %s at %s" % (snapshot, dset.get_path())
else:
head, tail = path.split("/", 1)
try: child = self.get_child(head)
except KeyError: raise KeyError, "No such dataset %s at %s" % (head, self.get_path())
if snapshot: tail = tail + "@" + snapshot
dset = child.lookup(tail)
return dset
def remove(self, child):
if child not in self.children: raise KeyError, child.name
child.invalidated = True
child.parent = None
self.children.remove(child)
for c in child.children:
child.remove(c)
def get_path(self):
if not self.parent: return self.name
return "%s/%s" % (self.parent.get_path(), self.name)
def get_relative_name(self):
if not self.parent: return self.name
return self.get_path()[len(self.parent.get_path()) + 1:]
def walk(self):
assert not self.invalidated, "%s invalidated" % self
yield self
for c in self.children:
for element in c.walk():
yield element
def __iter__(self):
return self.walk()
def __str__(self):
return "<Dataset: %s>" % self.get_path()
__repr__ = __str__
def get_property(self,name):
return self._properties[ name ]
class Pool(Dataset):
def __str__(self):
return "<Pool: %s>" % self.get_path()
__repr__ = __str__
class Snapshot(Dataset):
# def __init__(self,name):
# Dataset.__init__(self,name)
def get_path(self):
if not self.parent: return self.name
return "%s@%s" % (self.parent.get_path(), self.name)
def __str__(self):
return "<Snapshot: %s>" % self.get_path()
__repr__ = __str__
class PoolSet: # maybe rewrite this as a dataset or something?
pools = None
def __init__(self):
self.pools = {}
def lookup(self, name):
if "@" in name:
path, snapshot = name.split("@")
else:
path = name
snapshot = None
if "/" not in path:
try: dset = self.pools[path]
except KeyError: raise KeyError, "No such pool %s" % (name)
if snapshot:
try: dset = dset.get_snapshot(snapshot)
except KeyError: raise KeyError, "No such snapshot %s at %s" % (snapshot, dset.get_path())
else:
head, tail = path.split("/", 1)
try: pool = self.pools[head]
except KeyError: raise KeyError, "No such pool %s" % (head)
if snapshot: tail = tail + "@" + snapshot
dset = pool.lookup(tail)
return dset
def parse_zfs_r_output(self, creationtimes, properties = None ):
"""Parse the output of tab-separated zfs list.
properties must be a list of property names expected to be found as
tab-separated entries on each line of creationtimes after the
dataset name and a tab.
E.g. if properties passed here was ['creation'], we would expect
each creationtimes line to look like 'dataset 3249872348'
"""
properties = ['name', 'creation'] if properties == None else ['name'] + properties
def extract_properties( line ):
items = s.strip().split( '\t' )
assert len( items ) == len( properties ), (properties, items)
propvalues = map( lambda x: None if x == '-' else x, items[ 1: ] )
return [ items[ 0 ], zip( properties[ 1: ], propvalues ) ]
# make into array
creations = OrderedDict([ extract_properties( s ) for s in creationtimes.splitlines() if s.strip() ])
# names of pools
old_dsets = [ x.get_path() for x in self.walk() ]
old_dsets.reverse()
new_dsets = creations.keys()
for dset in new_dsets:
if "@" in dset:
dset, snapshot = dset.split("@")
else:
snapshot = None
if "/" not in dset: # pool name
if dset not in self.pools:
self.pools[dset] = Pool(dset)
fs = self.pools[dset]
poolname, pathcomponents = dset.split("/")[0], dset.split("/")[1:]
fs = self.pools[poolname]
for pcomp in pathcomponents:
# traverse the child hierarchy or create if that fails
try: fs = fs.get_child(pcomp)
except KeyError:
fs = Dataset(pcomp, fs)
if snapshot:
if snapshot not in [ x.name for x in fs.children ]:
fs = Snapshot(snapshot, fs)
fs._properties.update( creations[fs.get_path()] )
for dset in old_dsets:
if dset not in new_dsets:
if "/" not in dset and "@" not in dset: # a pool
self.remove(dset)
else:
d = self.lookup(dset)
d.parent.remove(d)
def remove(self, name): # takes a NAME, unlike the child that is taken in the remove of the dataset method
for c in self.pools[name].children:
self.pools[name].remove(c)
self.pools[name].invalidated = True
del self.pools[name]
def __getitem__(self, name):
return self.pools[name]
def __str__(self):
return "<PoolSet at %s>" % id(self)
__repr__ = __str__
def walk(self):
for item in self.pools.values():
for dset in item.walk():
yield dset
def __iter__(self):
return self.walk() | zfs-tools-zbackup | /zfs-tools-zbackup-0.5.1.tar.gz/zfs-tools-zbackup-0.5.1/src/zfstools/models.py | models.py |
# ZFS tools
| Donate to support this free software |
|:------------------------------------:|
| <img width="164" height="164" title="" alt="" src="doc/bitcoin.png" /> |
| [1Cw9nZu9ygknussPofMWCzmSMveusTbQvN](bitcoin:1Cw9nZu9ygknussPofMWCzmSMveusTbQvN) |
The ZFS backup tools will help you graft an entire ZFS pool as a filesystem
into a backup machine, without having to screw around snapshot names or
complicated shell commands or crontabs.
The utilities let you do this:
1. zfs-shell:
a shell that allows remote ZFS administration and nothing more
3. zsnap:
a command that snapshots a dataset or pool, then deletes old snapshots
4. zreplicate
a command that replicates an entire dataset tree using ZFS replication
streams. Best used in combination with zsnap as in:
- zsnap on the local machine
- zreplicate from the local machine to the destination machine
Obsolete snapshots deleted by zsnap will be automatically purged on
the destination machine by zreplicate, as a side effect of using
replication streams. To inhibit this, use the
--no-replication-stream option.
Run `zreplicate --help` for a compendium of options you may use.
5. zbackup:
a command to snapshot and replicate filesystems according to their user properties.
This uses zsnap and zreplicate to do the work, which is all driven by properties.
For details, see this [further description of zbackup](doc/README-zbackup.md).
The repository, bug tracker and Web site for this tool is at [http://github.com/Rudd-O/zfs-tools](http://github.com/Rudd-O/zfs-tools). Comments to me through [email protected].
## Setting up
Setup is rather complicated. It assumes that you already have ZFS running
and vaults on both the machine you're going to back up and the machine that
will be receiving the backup.
### On the machine to back up
- Install the zfs-shell command
`cp zfs-shell /usr/local/sbin`
`chmod 755 /usr/local/sbin/zfs-shell`
`chown root.root /usr/local/sbin/zfs-shell`
- Create a user with a home directory and shell `zfs-shell`
`useradd -rUm -b /var/lib -s /usr/local/sbin/zfs-shell zfs`
- Let `sudo` know that the new user can run the zfs command
`zfs ALL = NOPASSWD: /usr/local/sbin/zfs`
(ensure you remove the `requiretty` default on `/etc/sudoers`)
(check `sudoers.zfs-tools` in `contrib/` for an example)
- Set up a cron job to run `zsnap` as frequently as you want to,
snapshotting the dataset you intend to replicate.
### On the backup machine
- Set up public key authentication for SSH so the backup machine
may log as the user `zfs` (as laid out above) in the machine to
be backed up.
- Create a dataset to receive the backup stream.
- Set up a cron job to fetch the dataset snapshotted by zsnap
from the remote machine into the newly created dataset. You
will use `zreplicate` for that (see below for examples).
- After the first replication, you may want to set the `mountpoint`
attributes on the received datasets so they do not automount
on the backup machine.
### Test
If all went well, you should be able to do this without issue:
(on the machine to back up)
[root@peter]
zsnap senderpool
(on the machine to receive)
[root@paul]
zfs create receiverpool/senderpool # <--- run this ONLY ONCE
zreplicate -o zfs@paul:senderpool receiverpool/senderpool
# this should send the entire senderpool with all snapshots
# over from peter to paul, placing it in receiverpool/senderpool
(on the machine to back up)
[root@peter]
zsnap senderpool
(on the machine to receive)
[root@paul]
zreplicate -o zfs@paul:senderpool receiverpool/senderpool
# this should send an incremental stream of senderpool
# into receiverpool/senderpool
And that's it, really.
| zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/README.md | README.md |
#zbackup
## Introduction
zbackup is a front-end for a backup service using ZFS snapshots and replication to safely replicate a set of ZFS filesystems onto another server. It makes use of zsnap and zreplicate, so ensure these are working nicely before trying to get going with zbackup.
## ZFS properties governing zbackup behaviour
zbackup is driven by ZFS properties, so your scripts and/or crontab entries need make no mention of particular ZFS datasets, number of snapshots to keep, etc.
The following user properties define the behaviour, where *tier* is arbitrary, but expected to be e.g. hourly, daily, weekly, etc. All properties must be in the module `com.github.tesujimath.zbackup`, so prefix each property listed here with `com.github.tesujimath.zbackup:`, following the best practice for user properties as described on the zfs man page.
- `*tier*-snapshots` - turns on snapshots, and limits how many snapshots to keep in given tier
- `*tier*-snapshot-limit` - limits how many snapshots to keep in given tier (overrides *tier*-snapshots)
- `replica` - comma-separated list of dstdatasetname, as used by zreplicate
- `replicate` - *tier*, which tier to replicate
See `zbackup --list`, `zbackup --set`, and `zbackup --unset` below for an easy interface to listing, setting, and unsetting these properties.
Snapshotting for a given tier will be active as soon as `*tier*-snapshots` is defined with an integer value, with a property source of local. Received properties will not cause new snapshots to be taken.
However, old snapshots will be reaped if the property source is local or received. This means that reaping old snapshots on a destination replica is driven by the received property `*tier*-snapshots`, or the property `*tier*-snapshot-limit`, with the latter overriding the former if both are present. Note that the limit property functions even if its source is inherited.
Replication is done for a single tier only, as per the 'replicate' property. Again, these properties must have the source being local to have any effect. Note that the `--no-replication-stream` option for zreplicate is used, so that no destination replica snapshots and filesystems are deleted as a side-effect of running a backup. To purge obsolete snapshots from the destination, it is recommended to use the behaviour described in the previous paragraph.
## ssh authentication
It is up to you to arrange your own ssh authentication. For example, you could use an ssh agent and ssh public key authentication, or say Kerberos. (The crontab example below assumes Kerberos, which explains the call to kinit to acquire a Kerberos ticket from the local keytab file.)
## Interfacing with cron
zbackup is best run as a cron job. It is up to you to define the tiers which make sense to you, but these would be reasonable entries in a crontab:
```
0 8,9,10,11,12,13,14,15,16,17,18 * * * zbackup -v -t '\%Y-\%m-\%d-\%H\%M' hourly >/root/zbackup.hourly 2>&1
0 23 * * * kinit -k -C root/HOST.YOURDOMAIN ; zbackup -v -t '\%Y-\%m-\%d-\%H\%M' -d hourly daily >/root/zbackup.daily 2>&1
30 23 * * 0 zbackup -v -t '\%Y-\%m-\%d-\%H\%M' weekly >/root/zbackup.weekly 2>&1
45 23 1 * * zbackup -v -t '\%Y-\%m-\%d-\%H\%M' monthly >/root/zbackup.monthly 2>&1
```
### Notes
1. Percent signs need to be escaped in crontabs.
2. I specify the timestamp format explicitly, to avoid seconds appearing in the timestamps. You may choose to not care about that.
3. My daily tier sets up ssh authentication by acquiring a Kerberos ticket from the local keytab. This is for a system which performs replication in the daily tier. You will have to change this to match your system requirements.
4. the `-d hourly` option in the daily zbackup deletes all hourly snapshots, so these do not appear on the destination replica. It is up to you to decide what behaviour you want.
## Getting started
Run `zbackup --help` for the usage, and complete options.
Run `zbackup --list` to see what backup properties are set.
Setting and unsetting of the properties used by zbackup is most easily done using zbackup --set, and zbackup --unset. For example:
```
# zbackup --set zpoolsrc/playpen daily-snapshots=6 weekly-snapshots=5 replica=MYREPLICASERVER:zpooldst/playpen2/replica/zpoolsrc/playpen replicate=daily
# zbackup --list
```
## Error reporting
It is clearly rather important to know if zbackup fails. Any or all of these three mechanisms can be used:
1. Non-zero exit status
2. Error text appearing on stderr.
3. Optionally, use the -e option to email the given recipient on failure.
It is recommended to use these to check carefully that replication in particular is working as intended.
## The dangers of ZFS replication streams when used for backup
The default behaviour of zsnap is to use ZFS replication streams, i.e. `zfs send -R`. This is inhibited when zsnap is called from zbackup, using the `--no-replication-stream` option to zsnap.
The problem with ZFS replication streams arises when you have nested ZFS datasets, e.g. home, with a dataset for each user's home directory. If a user's home is deleted on the source side, using zfs destroy, then `zfs send -R` will replicate this deletion to the destination side.
zbackup avoids this unsafe behaviour following a `zfs destroy` operation on the source side.
## Author
zbackup was written by Simon Guest, developed in the [tesujimath fork of zfs-tools](https://github.com/tesujimath/zfs-tools), and now contributed upstream. Thanks to the original author of zfs-tools for providing an excellent framework on which to base zbackup.
| zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/doc/README-zbackup.md | README-zbackup.md |
import sys
import os
import subprocess
def simplify(x):
'''Take a list of tuples where each tuple is in form [v1,v2,...vn]
and then coalesce all tuples tx and ty where tx[v1] equals ty[v2],
preserving v3...vn of tx and discarding v3...vn of ty.
m = [
(1,2,"one"),
(2,3,"two"),
(3,4,"three"),
(8,9,"three"),
(4,5,"four"),
(6,8,"blah"),
]
simplify(x) -> [[1, 5, 'one'], [6, 9, 'blah']]
'''
y = list(x)
if len(x) < 2: return y
for idx,o in enumerate(list(y)):
for idx2,p in enumerate(list(y)):
if idx == idx2: continue
if o and p and o[0] == p[1]:
y[idx] = None
y[idx2] = list(p)
y[idx2][0] = p[0]
y[idx2][1] = o[1]
return [ n for n in y if n is not None ]
def uniq(seq, idfun=None):
'''Makes a sequence 'unique' in the style of UNIX command uniq'''
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
class SpecialPopen(subprocess.Popen):
def __init__(self, *a, **kw):
self._saved_args = a[0] if kw.get("args") is None else kw.get("args")
subprocess.Popen.__init__(self, *a, **kw)
def progressbar(pipe, bufsize=-1, ratelimit=-1):
def clpbar(cmdname):
barargs = []
if bufsize != -1:
barargs = ["-bs", str(bufsize)]
if ratelimit != -1:
barargs = barargs + ['-th', str(ratelimit)]
barprg = SpecialPopen(
[cmdname, "-dan"] + barargs,
stdin=pipe, stdout=subprocess.PIPE, bufsize=bufsize)
return barprg
def pv(cmdname):
barargs = []
if bufsize != -1:
barargs = ["-B", str(bufsize)]
if ratelimit != -1:
barargs = barargs + ['-L', str(ratelimit)]
barprg = SpecialPopen(
[cmdname, "-ptrb"] + barargs,
stdin=pipe, stdout=subprocess.PIPE, bufsize=bufsize)
return barprg
barprograms = [
("bar", clpbar),
("clpbar", clpbar),
("pv", pv),
]
for name, func in barprograms:
try:
subprocess.call([name, '-h'], stdout=open(os.devnull, "w"), stderr=open(os.devnull, "w"), stdin=open(os.devnull, "r"))
except OSError as e:
if e.errno == 2: continue
assert 0, "not reached while searching for clpbar or pv"
return func(name)
raise OSError(2, "no such file or directory searching for clpbar or pv")
def stderr(text):
"""print out something to standard error, followed by an ENTER"""
sys.stderr.write(text)
sys.stderr.write("\n")
__verbose = False
def verbose_stderr(*args, **kwargs):
global __verbose
if __verbose: stderr(*args, **kwargs)
def set_verbose(boolean):
global __verbose
__verbose = boolean | zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/src/zfstools/util.py | util.py |
import subprocess
import os
from zfstools.models import PoolSet
from zfstools.util import progressbar, SpecialPopen
try:
from Queue import Queue
except ImportError:
from multiprocessing import Queue
from threading import Thread
# Work-around for check_output not existing on Python 2.6, as per
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
# The implementation is lifted from
# http://hg.python.org/cpython/file/d37f963394aa/Lib/subprocess.py#l544
if "check_output" not in dir( subprocess ): # duck punch it in!
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd) # , output=output)
return output
subprocess.check_output = f
class ZFSConnection:
host = None
_poolset = None
_dirty = True
_trust = False
_properties = None
def __init__(self,host="localhost", trust=False, sshcipher=None, properties=None, identityfile=None, knownhostsfile=None):
self.host = host
self._trust = trust
self._properties = properties if properties else []
self._poolset= PoolSet()
if host in ['localhost','127.0.0.1']:
self.command = ["zfs"]
else:
self.command = ["ssh","-o","BatchMode yes","-a","-x"]
if self._trust:
self.command.extend(["-o","CheckHostIP no"])
self.command.extend(["-o","StrictHostKeyChecking no"])
if sshcipher != None:
self.command.extend(["-c",sshcipher])
if identityfile != None:
self.command.extend(["-i",identityfile])
if knownhostsfile != None:
self.command.extend(["-o","UserKnownHostsFile %s" % knownhostsfile])
self.command.extend([self.host,"zfs"])
def _get_poolset(self):
if self._dirty:
properties = [ 'creation' ] + self._properties
stdout2 = subprocess.check_output(self.command + ["list", "-Hpr", "-o", ",".join( ['name'] + properties ), "-t", "all"])
self._poolset.parse_zfs_r_output(stdout2,properties)
self._dirty = False
return self._poolset
pools = property(_get_poolset)
def create_dataset(self,name):
subprocess.check_call(self.command + ["create", name])
self._dirty = True
return self.pools.lookup(name)
def destroy_dataset(self, name):
subprocess.check_call(self.command + ["destroy", name])
self._dirty = True
def destroy_recursively(self, name):
subprocess.check_call(self.command + ["destroy", '-r', name])
self._dirty = True
def snapshot_recursively(self,name,snapshotname,properties={}):
plist = sum( map( lambda x: ['-o', '%s=%s' % x ], properties.items() ), [] )
subprocess.check_call(self.command + ["snapshot", "-r" ] + plist + [ "%s@%s" % (name, snapshotname)])
self._dirty = True
def send(self,name,opts=None,bufsize=-1,compression=False):
if not opts: opts = []
cmd = list(self.command)
if compression and cmd[0] == 'ssh': cmd.insert(1,"-C")
cmd = cmd + ["send"] + opts + [name]
p = SpecialPopen(cmd,stdin=open(os.devnull),stdout=subprocess.PIPE,bufsize=bufsize)
return p
def receive(self,name,pipe,opts=None,bufsize=-1,compression=False):
if not opts: opts = []
cmd = list(self.command)
if compression and cmd[0] == 'ssh': cmd.insert(1,"-C")
cmd = cmd + ["receive"] + opts + [name]
p = SpecialPopen(cmd,stdin=pipe,bufsize=bufsize)
return p
def transfer(self, dst_conn, s, d, fromsnapshot=None, showprogress=False, bufsize=-1, send_opts=None, receive_opts=None, ratelimit=-1, compression=False):
if send_opts is None: send_opts = []
if receive_opts is None: receive_opts = []
queue_of_killables = Queue()
if fromsnapshot: fromsnapshot=["-i",fromsnapshot]
else: fromsnapshot = []
sndprg = self.send(s, opts=[] + fromsnapshot + send_opts, bufsize=bufsize, compression=compression)
sndprg_supervisor = Thread(target=lambda: queue_of_killables.put((sndprg, sndprg.wait())))
sndprg_supervisor.start()
if showprogress:
try:
barprg = progressbar(pipe=sndprg.stdout,bufsize=bufsize,ratelimit=ratelimit)
barprg_supervisor = Thread(target=lambda: queue_of_killables.put((barprg, barprg.wait())))
barprg_supervisor.start()
sndprg.stdout.close()
except OSError:
os.kill(sndprg.pid,15)
raise
else:
barprg = sndprg
try:
rcvprg = dst_conn.receive(d,pipe=barprg.stdout,opts=["-Fu"]+receive_opts,bufsize=bufsize,compression=compression)
rcvprg_supervisor = Thread(target=lambda: queue_of_killables.put((rcvprg, rcvprg.wait())))
rcvprg_supervisor.start()
barprg.stdout.close()
except OSError:
os.kill(sndprg.pid, 15)
if sndprg.pid != barprg.pid: os.kill(barprg.pid, 15)
raise
dst_conn._dirty = True
allprocesses = set([rcvprg, sndprg]) | ( set([barprg]) if showprogress else set() )
while allprocesses:
diedprocess, retcode = queue_of_killables.get()
allprocesses = allprocesses - set([diedprocess])
if retcode != 0:
[ p.kill() for p in allprocesses ]
raise subprocess.CalledProcessError(retcode, diedprocess._saved_args) | zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/src/zfstools/connection.py | connection.py |
import itertools
from zfstools.util import simplify
from zfstools.models import Snapshot
import warnings
# it is time to determine which datasets need to be synced
# we walk the entire dataset structure, and sync snapshots recursively
def recursive_replicate(s, d):
sched = []
# we first collect all snapshot names, to later see if they are on both sides, one side, or what
all_snapshots = []
if s: all_snapshots.extend(s.get_snapshots())
if d: all_snapshots.extend(d.get_snapshots())
all_snapshots = [ y[1] for y in sorted([ (x.get_property('creation'), x.name) for x in all_snapshots ]) ]
snapshot_pairs = []
for snap in all_snapshots:
try: ssnap = s.get_snapshot(snap)
except (KeyError, AttributeError): ssnap = None
try: dsnap = d.get_snapshot(snap)
except (KeyError, AttributeError): dsnap = None
# if the source snapshot exists and is not already in the table of snapshots
# then pair it up with its destination snapshot (if it exists) or None
# and add it to the table of snapshots
if ssnap and not snap in [ x[0].name for x in snapshot_pairs ]:
snapshot_pairs.append((ssnap, dsnap))
# now we have a list of all snapshots, paired up by name, and in chronological order
# (it's quadratic complexity, but who cares)
# now we need to find the snapshot pair that happens to be the the most recent common pair
found_common_pair = False
for idx, (m, n) in enumerate(snapshot_pairs):
if m and n and m.name == n.name:
found_common_pair = idx
# we have combed through the snapshot pairs
# time to check what the latest common pair is
if not s.get_snapshots():
if d is None:
# well, no snapshots in source, just create a stub in the target
sched.append(("create_stub", s, d, None, None))
elif found_common_pair is False:
# no snapshot is in common, problem!
# theoretically destroying destination dataset and resyncing it recursively would work
# but this requires work in the optimizer that comes later
if d is not None and d.get_snapshots():
warnings.warn("Asked to replicate %s into %s but %s has snapshots and both have no snapshots in common!" % (s, d, d))
# see source snapshots
full_source_snapshots = [ y[1] for y in sorted([ (x.get_property('creation'), x) for x in s.get_snapshots() ]) ]
# send first snapshot as full snapshot
sched.append(("full", s, d, None, full_source_snapshots[0]))
if len(full_source_snapshots) > 1:
# send other snapshots as incremental snapshots
sched.append(("incremental", s, d, full_source_snapshots[0], full_source_snapshots[-1]))
elif found_common_pair == len(snapshot_pairs) - 1:
# the latest snapshot of both datasets that is common to both, is the latest snapshot in the source
# we have nothing to do here because the datasets are "in sync"
pass
else:
# the source dataset has more recent snapshots, not present in the destination dataset
# we need to transfer those
snapshots_to_transfer = [ x[0] for x in snapshot_pairs[found_common_pair:] ]
for n, x in enumerate(snapshots_to_transfer):
if n == 0: continue
sched.append(("incremental", s, d, snapshots_to_transfer[n - 1], x))
# now let's apply the same argument to the children
children_sched = []
for c in [ x for x in s.children if not isinstance(x, Snapshot) ]:
try: cd = d.get_child(c.name)
except (KeyError, AttributeError): cd = None
children_sched.extend(recursive_replicate(c, cd))
# and return our schedule of operations to the parent
return sched + children_sched
def optimize_coalesce(operation_schedule):
# now let's optimize the operation schedule
# this optimization is quite basic
# step 1: coalesce contiguous operations on the same file system
operations_grouped_by_source = itertools.groupby(
operation_schedule,
lambda op: op[1]
)
new = []
for _, opgroup in [ (x, list(y)) for x, y in operations_grouped_by_source ]:
if not opgroup: # empty opgroup
continue
if opgroup[0][0] == 'full': # full operations
new.extend(opgroup)
elif opgroup[0][0] == 'create_stub': # create stub operations
new.extend(opgroup)
elif opgroup[0][0] == 'incremental': # incremental
# 1->2->3->4 => 1->4
new_ops = [ (srcs, dsts) for _, _, _, srcs, dsts in opgroup ]
new_ops = simplify(new_ops)
for srcs, dsts in new_ops:
new.append(tuple(opgroup[0][:3] + (srcs, dsts)))
else:
assert 0, "not reached: unknown operation type in %s" % opgroup
return new
def optimize_recursivize(operation_schedule):
def recurse(dataset, func):
results = []
results.append((dataset, func(dataset)))
results.extend([ x for child in dataset.children if child.__class__ != Snapshot for x in recurse(child, func) ])
return results
def zero_out_sched(dataset):
dataset._ops_schedule = []
def evict_sched(dataset):
dataset._ops_schedule = []
operations_grouped_by_source = itertools.groupby(
operation_schedule,
lambda op: op[1]
)
operations_grouped_by_source = [ (x, list(y)) for x, y in operations_grouped_by_source ]
roots = set()
for root, opgroup in operations_grouped_by_source:
while root.parent is not None:
root = root.parent
roots.add(root)
for root in roots:
recurse(root, zero_out_sched)
for source, opgroup in operations_grouped_by_source:
source._ops_schedule = opgroup
def compare(*ops_schedules):
assert len(ops_schedules), "operations schedules cannot be empty: %r" % ops_schedules
# in the case of the list of operations schedules being just one (no children)
# we return True, cos it's safe to recursively replicate this one
if len(ops_schedules) == 1:
return True
# now let's check that all ops schedules are the same length
# otherwise they are not the same and we can say the comparison isn't the same
lens = set([ len(o) for o in ops_schedules ])
if len(lens) != 1:
return False
# we have multiple schedules
# if their type, snapshot origin and snapshot destination are all the same
# we can say that they are "the same"
comparisons = [
all([
# never attempt to recursivize operations who involve create_stub
all(["create_stub" not in o[0] for o in ops]),
len(set([o[0] for o in ops])) == 1,
any([o[3] is None for o in ops]) or len(set([o[3].name for o in ops])) == 1,
any([o[4] is None for o in ops]) or len(set([o[4].name for o in ops])) == 1,
])
for ops
in zip(*ops_schedules)
]
return all(comparisons)
# remove unnecessary stubs that stand in for only other stubs
for root in roots:
for dataset, _ in recurse(root, lambda d: d):
ops = [z for x, y in recurse(dataset, lambda d: d._ops_schedule) for z in y]
if all([o[0] == 'create_stub' for o in ops]):
dataset._ops_schedule = []
for root in roots:
for dataset, _ in recurse(root, lambda d: d):
if compare(*[y for x, y in recurse(dataset, lambda d: d._ops_schedule)]):
old_ops_schedule = dataset._ops_schedule
recurse(dataset, zero_out_sched)
for op in old_ops_schedule:
dataset._ops_schedule.append((
op[0] + "_recursive", op[1], op[2], op[3], op[4]
))
new_operation_schedule = []
for root in roots:
for dataset, ops_schedule in recurse(root, lambda d: d._ops_schedule):
new_operation_schedule.extend(ops_schedule)
for root in roots:
recurse(root, evict_sched)
return new_operation_schedule
def optimize(operation_schedule, allow_recursivize = True):
operation_schedule = optimize_coalesce(operation_schedule)
if allow_recursivize:
operation_schedule = optimize_recursivize(operation_schedule)
return operation_schedule
# we walk the entire dataset structure, and sync snapshots recursively
def recursive_clear_obsolete(s, d):
sched = []
# we first collect all snapshot names, to later see if they are on both sides, one side, or what
snapshots_in_src = set([ m.name for m in s.get_snapshots() ])
snapshots_in_dst = set([ m.name for m in d.get_snapshots() ])
snapshots_to_delete = snapshots_in_dst - snapshots_in_src
snapshots_to_delete = [ d.get_snapshot(m) for m in snapshots_to_delete ]
for m in snapshots_to_delete:
sched.append(("destroy", m))
# now let's apply the same argument to the children
children_sched = []
for child_d in [ x for x in d.children if not isinstance(x, Snapshot) ]:
child_s = None
try:
child_s = s.get_child(child_d.name)
except (KeyError, AttributeError):
children_sched.append(("destroy_recursively", child_d))
if child_s:
children_sched.extend(recursive_clear_obsolete(child_s, child_d))
# and return our schedule of operations to the parent
return sched + children_sched | zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/src/zfstools/sync.py | sync.py |
from collections import OrderedDict
class Dataset(object):
name = None
children = None
_properties = None
parent = None
invalidated = False
def __init__(self, name, parent=None):
self.name = name
self.children = []
self._properties = {}
if parent:
self.parent = parent
self.parent.add_child(self)
def add_child(self, child):
self.children.append(child)
return child
def get_child(self, name):
child = [ c for c in self.children if c.name == name and isinstance(c, Dataset) ]
assert len(child) < 2
if not child: raise KeyError(name)
return child[0]
def get_snapshots(self, flt=True):
if flt is True: flt = lambda _:True
children = [ c for c in self.children if isinstance(c, Snapshot) and flt(c) ]
return children
def get_snapshot(self, name):
children = [ c for c in self.get_snapshots() if c.name == name ]
assert len(children) < 2
if not children: raise KeyError(name)
return children[0]
def lookup(self, name): # FINISH THIS
if "@" in name:
path, snapshot = name.split("@")
else:
path = name
snapshot = None
if "/" not in path:
try: dset = self.get_child(path)
except KeyError: raise KeyError("No such dataset %s at %s" % (path, self.get_path()))
if snapshot:
try: dset = dset.get_snapshot(snapshot)
except KeyError: raise KeyError("No such snapshot %s at %s" % (snapshot, dset.get_path()))
else:
head, tail = path.split("/", 1)
try: child = self.get_child(head)
except KeyError: raise KeyError("No such dataset %s at %s" % (head, self.get_path()))
if snapshot: tail = tail + "@" + snapshot
dset = child.lookup(tail)
return dset
def remove(self, child):
if child not in self.children: raise KeyError(child.name)
child.invalidated = True
child.parent = None
self.children.remove(child)
for c in child.children:
child.remove(c)
def get_path(self):
if not self.parent: return self.name
return "%s/%s" % (self.parent.get_path(), self.name)
def get_relative_name(self):
if not self.parent: return self.name
return self.get_path()[len(self.parent.get_path()) + 1:]
def walk(self):
assert not self.invalidated, "%s invalidated" % self
yield self
for c in self.children:
for element in c.walk():
yield element
def __iter__(self):
return self.walk()
def __str__(self):
return "<Dataset: %s>" % self.get_path()
__repr__ = __str__
def get_property(self,name):
return self._properties[ name ]
class Pool(Dataset):
def __str__(self):
return "<Pool: %s>" % self.get_path()
__repr__ = __str__
class Snapshot(Dataset):
# def __init__(self,name):
# Dataset.__init__(self,name)
def get_path(self):
if not self.parent: return self.name
return "%s@%s" % (self.parent.get_path(), self.name)
def __str__(self):
return "<Snapshot: %s>" % self.get_path()
__repr__ = __str__
class PoolSet: # maybe rewrite this as a dataset or something?
pools = None
def __init__(self):
self.pools = {}
def lookup(self, name):
if "@" in name:
path, snapshot = name.split("@")
else:
path = name
snapshot = None
if "/" not in path:
try: dset = self.pools[path]
except KeyError: raise KeyError("No such pool %s" % (name))
if snapshot:
try: dset = dset.get_snapshot(snapshot)
except KeyError: raise KeyError("No such snapshot %s at %s" % (snapshot, dset.get_path()))
else:
head, tail = path.split("/", 1)
try: pool = self.pools[head]
except KeyError: raise KeyError("No such pool %s" % (head))
if snapshot: tail = tail + "@" + snapshot
dset = pool.lookup(tail)
return dset
def parse_zfs_r_output(self, zfs_r_output, properties = None):
"""Parse the output of tab-separated zfs list.
properties must be a list of property names expected to be found as
tab-separated entries on each line of zfs_r_output after the
dataset name and a tab.
E.g. if properties passed here was ['creation'], we would expect
each zfs_r_output line to look like 'dataset 3249872348'
"""
try:
properties = ['name', 'creation'] if properties == None else ['name'] + properties
except TypeError:
assert 0, repr(properties)
def extract_properties(s):
items = s.strip().split( '\t' )
assert len( items ) == len( properties ), (properties, items)
propvalues = map( lambda x: None if x == '-' else x, items[ 1: ] )
return [ items[ 0 ], zip( properties[ 1: ], propvalues ) ]
# make into array
creations = OrderedDict([ extract_properties( s ) for s in zfs_r_output.splitlines() if s.strip() ])
# names of pools
old_dsets = [ x.get_path() for x in self.walk() ]
old_dsets.reverse()
new_dsets = creations.keys()
for dset in new_dsets:
if "@" in dset:
dset, snapshot = dset.split("@")
else:
snapshot = None
if "/" not in dset: # pool name
if dset not in self.pools:
self.pools[dset] = Pool(dset)
fs = self.pools[dset]
poolname, pathcomponents = dset.split("/")[0], dset.split("/")[1:]
fs = self.pools[poolname]
for pcomp in pathcomponents:
# traverse the child hierarchy or create if that fails
try: fs = fs.get_child(pcomp)
except KeyError:
fs = Dataset(pcomp, fs)
if snapshot:
if snapshot not in [ x.name for x in fs.children ]:
fs = Snapshot(snapshot, fs)
fs._properties.update( creations[fs.get_path()] )
for dset in old_dsets:
if dset not in new_dsets:
if "/" not in dset and "@" not in dset: # a pool
self.remove(dset)
else:
d = self.lookup(dset)
d.parent.remove(d)
def remove(self, name): # takes a NAME, unlike the child that is taken in the remove of the dataset method
for c in self.pools[name].children:
self.pools[name].remove(c)
self.pools[name].invalidated = True
del self.pools[name]
def __getitem__(self, name):
return self.pools[name]
def __str__(self):
return "<PoolSet at %s>" % id(self)
__repr__ = __str__
def walk(self):
for item in self.pools.values():
for dset in item.walk():
yield dset
def __iter__(self):
return self.walk() | zfs-tools | /zfs-tools-0.5.1.tar.gz/zfs-tools-0.5.1/src/zfstools/models.py | models.py |
# ZFS Uploader
ZFS Uploader is a simple program for backing up full and incremental ZFS
snapshots to Amazon S3. It supports CRON based scheduling and can
automatically remove old snapshots and backups. A helpful CLI (`zfsup`) lets
you run jobs, restore, and list backups.
### Features
- Backup/restore ZFS file systems
- Create incremental and full backups
- Automatically remove old snapshots and backups
- Use any S3 storage class type
- Helpful CLI
### Requirements
- Python 3.6 or higher
- ZFS 0.8.1 or higher (untested on earlier versions)
## Install Instructions
Commands should be run as root.
1. Create a directory and virtual environment
```bash
mkdir /etc/zfs_uploader
cd /etc/zfs_uploader
virtualenv --python python3 env
```
2. Install ZFS Uploader
```bash
source env/bin/activate
pip install zfs_uploader
ln -sf /etc/zfs_uploader/env/bin/zfsup /usr/local/sbin/zfsup
```
3. Write configuration file
Please see the [Configuration File](#configuration-file) section below for
helpful configuration examples.
```bash
vi config.cfg
chmod 600 config.cfg
```
4. Start service
```bash
cp zfs_uploader.service /etc/systemd/system/zfs_uploader.service
sudo systemctl enable --now zfs_uploader
```
5. List backups
```bash
zfsup list
```
## Configuration File
The program reads backup job parameters from a configuration file. Default
parameters may be set which then apply to all backup jobs. Multiple backup
jobs can be set in one file.
### Parameters
#### bucket_name : str
S3 bucket name.
#### access_key : str
S3 access key.
#### secret_key : str
S3 secret key.
#### filesystem : str
ZFS filesystem.
#### prefix : str, optional
Prefix to be prepended to the s3 key.
#### region : str, default: us-east-1
S3 region.
#### endpoint : str, optional
S3 endpoint for alternative services
#### cron : str, optional
Cron schedule. Example: `* 0 * * *`
#### max_snapshots : int, optional
Maximum number of snapshots.
#### max_backups : int, optional
Maximum number of full and incremental backups.
#### max_incremental_backups_per_full : int, optional
Maximum number of incremental backups per full backup.
#### storage_class : str, default: STANDARD
S3 storage class.
#### max_multipart_parts : int, default: 10000
Maximum number of parts to use in a multipart S3 upload.
### Examples
#### Multiple full backups
```ini
[DEFAULT]
bucket_name = BUCKET_NAME
region = us-east-1
access_key = ACCESS_KEY
secret_key = SECRET_KEY
storage_class = STANDARD
[pool/filesystem]
cron = 0 2 * * *
max_snapshots = 7
max_incremental_backups_per_full = 6
max_backups = 7
```
Filesystem is backed up at 02:00 daily. Only the most recent 7 snapshots
are kept. The oldest backup without dependents is removed once there are
more than 7 backups.
#### Backblaze B2 S3-compatible endpoint, full backups
```ini
[DEFAULT]
bucket_name = BUCKET_NAME
region = eu-central-003
access_key = ACCESS_KEY
secret_key = SECRET_KEY
storage_class = STANDARD
endpoint = https://s3.eu-central-003.backblazeb2.com
[pool/filesystem]
cron = 0 2 * * *
max_snapshots = 7
max_incremental_backups_per_full = 6
max_backups = 7
```
##### Structure
full backup (f), incremental backup (i)
1. f
2. f i
3. f i i
4. f i i i
5. f i i i i
6. f i i i i i
7. f i i i i i i
8. f i i i i i f
9. f i i i i f i
10. f i i i f i i
11. f i i f i i i
12. f i f i i i i
13. f f i i i i i
14. f i i i i i i
#### Single full backup
```ini
[DEFAULT]
bucket_name = BUCKET_NAME
region = us-east-1
access_key = ACCESS_KEY
secret_key = SECRET_KEY
storage_class = STANDARD
[pool/filesystem]
cron = 0 2 * * *
max_snapshots = 7
max_backups = 7
```
Filesystem is backed up at 02:00 daily. Only the most recent 7 snapshots
are kept. The oldest incremental backup is removed once there are
more than 7 backups. The full backup is never removed.
##### Structure
full backup (f), incremental backup (i)
1. f
2. f i
3. f i i
4. f i i i
5. f i i i i
6. f i i i i i
7. f i i i i i i
#### Only full backups
```ini
[DEFAULT]
bucket_name = BUCKET_NAME
region = us-east-1
access_key = ACCESS_KEY
secret_key = SECRET_KEY
storage_class = STANDARD
[pool/filesystem]
cron = 0 2 * * *
max_snapshots = 7
max_incremental_backups_per_full = 0
max_backups = 7
```
Filesystem is backed up at 02:00 daily. Only the most recent 7 snapshots
are kept. The oldest full backup is removed once there are
more than 7 backups. No incremental backups are taken.
##### Structure
full backup (f)
1. f
2. f f
3. f f f
4. f f f f
5. f f f f f
6. f f f f f f
7. f f f f f f f
## Miscellaneous
### Storage class codes
- STANDARD
- REDUCED_REDUNDANCY
- STANDARD_IA
- ONEZONE_IA
- INTELLIGENT_TIERING
- GLACIER
- DEEP_ARCHIVE
- OUTPOSTS
## Release Instructions
1. Increment version in `__init__.py` file
2. Update `CHANGELOG.md` with new version
3. Tag release in GitHub when ready. Add changelog items to release
description. GitHub Action workflow will automatically build and push
the release to PyPi.
| zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/README.md | README.md |
import logging
import os
import sys
from logging.handlers import RotatingFileHandler
import click
from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.schedulers.background import BlockingScheduler
from zfs_uploader import __version__
from zfs_uploader.config import Config
LOG_FORMAT = 'time=%(asctime)s.%(msecs)03d level=%(levelname)s %(message)s'
@click.group()
@click.option('--config-path', default='config.cfg',
help='Config file path.',
show_default=True)
@click.option('--log-path', default='zfs_uploader.log',
help='Log file path.',
show_default=True)
@click.pass_context
def cli(ctx, config_path, log_path):
logger = logging.getLogger('zfs_uploader')
logger.setLevel(logging.INFO)
formatter = logging.Formatter(LOG_FORMAT, datefmt='%Y-%m-%dT%H:%M:%S')
fh = RotatingFileHandler(log_path, maxBytes=5*1024*1024, backupCount=5)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
if not os.path.isfile(config_path):
print('No configuration file found.')
sys.exit(1)
ctx.obj = {
'config_path': config_path,
'logger': logger
}
@cli.command()
@click.pass_context
def backup(ctx):
""" Start backup job scheduler or run the tasks serially if
cron is not provided in the config file.
"""
config_path = ctx.obj['config_path']
logger = ctx.obj['logger']
config = Config(config_path)
scheduler = BlockingScheduler(
executors={'default': ThreadPoolExecutor(max_workers=1)},
job_defaults={'misfire_grace_time': None}
)
for job in config.jobs.values():
if job.cron:
logger.info(f'filesystem={job.filesystem} '
f'cron="{job.cron}" '
'msg="Adding job."')
scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)
else:
logger.info(f'filesystem={job.filesystem}'
'msg="Running job."')
job.start()
try:
if len(scheduler.get_jobs()) > 0:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass
@cli.command('list')
@click.argument('filesystem', required=False)
@click.pass_context
def list_backups(ctx, filesystem):
""" List backups. """
config_path = ctx.obj['config_path']
logger = ctx.obj['logger']
logger.setLevel('CRITICAL')
config = Config(config_path)
if filesystem:
job = config.jobs.get(filesystem)
jobs = {filesystem: job}
if job is None:
print('Filesystem does not exist in config file.')
sys.exit(1)
else:
jobs = config.jobs
if jobs is None:
print('No filesystems exist in config file.')
sys.exit(1)
for filesystem, job in jobs.items():
print(f'{filesystem}:\n')
print('{0:<16} {1:<16} {2:<5} {3:<14}'.format('time', 'dependency',
'type', 'size (bytes)'))
print('-'*52)
for b in job.backup_db.get_backups():
dependency = b.dependency or str(b.dependency)
backup_size = b.backup_size or str(b.backup_size)
print(f'{b.backup_time:<16} {dependency:<16} '
f'{b.backup_type:<5} {backup_size:<14}')
print('\n')
@cli.command()
@click.option('--destination', help='Destination filesystem.')
@click.argument('filesystem')
@click.argument('backup-time', required=False)
@click.pass_context
def restore(ctx, destination, filesystem, backup_time):
""" Restore from backup.
Defaults to most recent backup if backup-time is not specified.
WARNING: If restoring to a file system that already exists, snapshots
and data that were written after the backup will be destroyed. Set
`destination` in order to restore to a new file system.
"""
config_path = ctx.obj['config_path']
config = Config(config_path)
job = config.jobs.get(filesystem)
if job is None:
print('Filesystem does not exist.')
sys.exit(1)
job.restore(backup_time, destination)
print('Restore successful.')
@cli.command(help='Print version.')
def version():
print(__version__)
if __name__ == '__main__':
cli() | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/__main__.py | __main__.py |
from configparser import ConfigParser
import logging
import os
import sys
from zfs_uploader.job import ZFSjob
class Config:
""" Wrapper for configuration file. """
@property
def jobs(self):
""" ZFS backup jobs. """
return self._jobs
def __init__(self, file_path=None):
""" Construct Config object from file.
Parameters
----------
file_path : str
File path to config file.
"""
file_path = file_path or 'config.cfg'
self._logger = logging.getLogger(__name__)
self._logger.info(f'file_path={file_path} '
'msg="Loading configuration file."')
if not os.path.isfile(file_path):
self._logger.critical('No configuration file found.')
sys.exit(1)
self._cfg = ConfigParser()
self._cfg.read(file_path)
default = self._cfg['DEFAULT']
self._jobs = {}
for k, v in self._cfg.items():
if k != 'DEFAULT':
bucket_name = (v.get('bucket_name') or
default.get('bucket_name'))
access_key = v.get('access_key') or default.get('access_key')
secret_key = v.get('secret_key') or default.get('secret_key')
filesystem = k
if not all((bucket_name, access_key, secret_key)):
self._logger.critical(f'file_path={file_path} '
f'filesystem={filesystem}'
'msg="bucket_name, access_key or '
'secret_key is missing from config."'
)
sys.exit(1)
cron_dict = None
cron = v.get('cron') or default.get('cron')
if cron:
cron_dict = _create_cron_dict(cron)
self._jobs[k] = (
ZFSjob(
bucket_name,
access_key,
secret_key,
filesystem,
prefix=v.get('prefix') or default.get('prefix'),
region=v.get('region') or default.get('region'),
endpoint=v.get('endpoint') or default.get('endpoint'),
cron=cron_dict,
max_snapshots=(v.getint('max_snapshots') or
default.getint('max_snapshots')),
max_backups=(
v.getint('max_backups') or
default.getint('max_backups')),
max_incremental_backups_per_full=(
v.getint('max_incremental_backups_per_full') or
default.getint('max_incremental_backups_per_full')), # noqa
storage_class=(v.get('storage_class') or
default.get('storage_class')),
max_multipart_parts=(
v.getint('max_multipart_parts') or
default.getint('max_multipart_parts'))
)
)
def _create_cron_dict(cron):
values = cron.split()
return {'minute': values[0],
'hour': values[1],
'day': values[2],
'month': values[3],
'day_of_week': values[4]} | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/config.py | config.py |
from time import sleep
from zfs_uploader.utils import get_date_time
from zfs_uploader import zfs
class SnapshotDB:
@property
def filesystem(self):
""" ZFS file system. """
return self._filesystem
def __init__(self, filesystem):
""" Create SnapshotDB object.
Snapshot DB is used for storing Snapshot objects. Creating a
snapshot will create an actual ZFS snapshot.
Parameters
----------
filesystem : str
ZFS filesystem.
"""
self._filesystem = filesystem
self._snapshots = {}
self.refresh()
def create_snapshot(self):
""" Create Snapshot object and ZFS snapshot.
Returns
-------
Snapshot
"""
name = get_date_time()
if name in self._snapshots:
# sleep for one second in order to increment name
sleep(1)
name = get_date_time()
out = zfs.create_snapshot(self._filesystem, name)
if out.returncode:
raise zfs.ZFSError(out.stderr)
self.refresh()
return self._snapshots[name]
def delete_snapshot(self, name):
""" Delete Snapshot object and ZFS snapshot.
Parameters
----------
name : str
"""
zfs.destroy_snapshot(self._filesystem, name)
del self._snapshots[name]
def get_snapshots(self):
""" Get sorted list of snapshots.
Most recent snapshot is last.
Returns
-------
list(Snapshot)
Sorted list of snapshots. Most recent snapshot is last.
"""
return list(self._snapshots.values())
def get_snapshot_names(self):
""" Get sorted list of snapshot names.
Most recent snapshot name is last.
Returns
-------
list(str)
Sorted list of snapshot names. Most recent snapshot is last.
"""
return list(self._snapshots.keys())
def refresh(self):
""" Refresh SnapshotDB with latest snapshots. """
self._snapshots = {}
for k, v in zfs.list_snapshots().items():
if self._filesystem in k:
filesystem, name = k.split('@')
referenced = int(v['REFER'])
used = int(v['USED'])
self._snapshots.update({
name: Snapshot(filesystem, name, referenced, used)
})
class Snapshot:
""" Snapshot object. """
@property
def filesystem(self):
""" ZFS file system. """
return self._filesystem
@property
def key(self):
""" filesystem@backup_time identifier """
return f'{self._filesystem}@{self._name}'
@property
def name(self):
""" Snapshot name. """
return self._name
@property
def referenced(self):
""" Space referenced by snapshot in bytes. """
return self._referenced
@property
def used(self):
""" Space used by snapshot in bytes. """
return self._used
def __init__(self, filesystem, name, referenced, used):
""" Create Snapshot object.
Parameters
----------
filesystem : str
ZFS filesystem.
name : str
Snapshot name.
referenced : int
Space referenced by snapshot in bytes.
used : int
Space used by snapshot in bytes.
"""
self._filesystem = filesystem
self._name = name
self._referenced = referenced
self._used = used
def __eq__(self, other):
return all((self._filesystem == other._filesystem, # noqa
self._name == other._name, # noqa
self._referenced == other._referenced, # noqa
self._used == other._used # noqa
))
def __hash__(self):
return hash((self._filesystem,
self._name,
self._referenced,
self._used
)) | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/snapshot_db.py | snapshot_db.py |
from datetime import datetime
from io import BytesIO
import json
from botocore.exceptions import ClientError # noqa
from zfs_uploader import BACKUP_DB_FILE, DATETIME_FORMAT
from zfs_uploader.utils import derive_s3_key
class BackupDB:
""" Backup DB object. """
@property
def filesystem(self):
""" ZFS filesystem. """
return self._filesystem
def __init__(self, bucket, filesystem, s3_prefix=None):
""" Create BackupDB object.
BackupDB is used for storing Backup objects. It does not upload
backups but serves as a database for backup records.
Parameters
----------
bucket : Bucket
S3 Bucket.
filesystem : str
ZFS filesystem.
s3_prefix: str, optional
The s3 prefix to prepend to the backup.db file.
"""
self._filesystem = filesystem
self._backups = {}
s3_key = derive_s3_key(BACKUP_DB_FILE, self.filesystem, s3_prefix)
self._s3_object = bucket.Object(s3_key)
# initialize from backup.db file if it exists
self.download()
def create_backup(self, backup_time, backup_type, s3_key,
dependency=None, backup_size=None):
""" Create backup object and upload `backup.db` file.
Parameters
----------
backup_time : str
Backup time in %Y%m%d_%H%M%S format.
backup_type : str
Supported backup types are `full` and `inc`.
s3_key : str
Backup S3 key.
dependency : str, optional
Backup time of dependency in %Y%m%d_%H%M%S format. Used for
storing the dependent full backup for an incremental backup.
backup_size : int, optional
Backup size in bytes.
"""
if backup_time in self._backups:
raise ValueError('Backup already exists.')
if dependency and dependency not in self._backups:
raise ValueError('Depending on backup does not exist.')
self._backups.update({
backup_time: Backup(backup_time, backup_type, self._filesystem,
s3_key, dependency, backup_size)
})
self.upload()
def delete_backup(self, backup_time):
""" Delete backup and upload `backup.db`.
Parameters
----------
backup_time : str
Backup time in %Y%m%d_%H%M%S format.
"""
if _validate_backup_time(backup_time) is False:
raise ValueError('backup_time is wrong format')
del self._backups[backup_time]
self.upload()
def get_backup(self, backup_time):
""" Get backup using backup time.
Parameters
----------
backup_time : str
Backup time in %Y%m%d_%H%M%S format.
Returns
-------
Backup
"""
if _validate_backup_time(backup_time) is False:
raise ValueError('backup_time is wrong format')
try:
return self._backups[backup_time]
except KeyError:
raise KeyError('Backup does not exist.') from None
def get_backups(self, backup_type=None):
""" Get sorted list of backups.
Parameters
----------
backup_type : str, optional
Supported backup types are `full` and `inc`.
Returns
-------
list(Backup)
Sorted list of backups. Most recent backup is last.
"""
backup_times = sorted(self._backups)
if backup_type in ['full', 'inc']:
backups = []
for time in backup_times:
backup = self._backups[time]
if backup.backup_type == backup_type:
backups.append(backup)
elif backup_type is None:
backups = [self._backups[time] for time in backup_times]
else:
raise ValueError('backup_type must be `full` or `inc`')
return backups
def get_backup_times(self, backup_type=None):
""" Get sorted list of backup times.
Parameters
----------
backup_type : str, optional
Supported backup types are `full` and `inc`.
Returns
-------
list(str)
Sorted list of backup times. Most recent backup is last.
"""
if backup_type in ['full', 'inc']:
backup_times = []
for time in sorted(self._backups):
backup = self._backups[time]
if backup.backup_type == backup_type:
backup_times.append(time)
return backup_times
elif backup_type is None:
return sorted(self._backups)
else:
raise ValueError('backup_type must be `full` or `inc`')
def download(self):
""" Download backup.db file. """
try:
with BytesIO() as f:
self._s3_object.download_fileobj(f)
f.seek(0)
self._backups = json.load(f, object_hook=_json_object_hook)
except ClientError:
pass
def upload(self):
""" Upload backup.db file. """
with BytesIO() as f:
json_str = json.dumps(self._backups, default=_json_default)
f.write(json_str.encode('utf-8'))
f.seek(0)
self._s3_object.upload_fileobj(f)
class Backup:
""" Backup object. """
@property
def backup_time(self):
""" Backup time. """
return self._backup_time
@property
def backup_type(self):
""" Backup type. """
return self._backup_type
@property
def filesystem(self):
""" ZFS filesystem. """
return self._filesystem
@property
def snapshot_name(self):
""" ZFS snapshot name. """
return f'{self._filesystem}@{self._backup_time}'
@property
def s3_key(self):
""" S3 key. """
return self._s3_key
@property
def dependency(self):
""" Backup time of dependency. """
return self._dependency
@property
def backup_size(self):
""" Backup size in bytes. """
return self._backup_size
def __init__(self, backup_time, backup_type, filesystem, s3_key,
dependency=None, backup_size=None):
""" Create Backup object.
Parameters
----------
backup_time : str
Backup time in %Y%m%d_%H%M%S format.
backup_type : str
Supported backup types are `full` and `inc`.
filesystem : str
ZFS filesystem.
s3_key : str
Backup S3 key.
dependency : str, optional
Backup time of dependency in %Y%m%d_%H%M%S format. Used for
storing the dependent full backup for an incremental backup.
backup_size : int, optional
Backup size in bytes.
"""
if _validate_backup_time(backup_time):
self._backup_time = backup_time
else:
raise ValueError('backup_time is wrong format')
if backup_type in ['full', 'inc']:
self._backup_type = backup_type
else:
raise ValueError('backup_type must be `full` or `inc`')
self._filesystem = filesystem
self._s3_key = s3_key
if dependency:
if not _validate_backup_time(dependency):
raise ValueError('dependency is wrong format')
self._dependency = dependency
self._backup_size = backup_size
def __eq__(self, other):
return all((self._backup_time == other._backup_time, # noqa
self._backup_type == other._backup_type, # noqa
self._filesystem == other._filesystem, # noqa
self._s3_key == other._s3_key, # noqa
self._dependency == other._dependency, # noqa
self._backup_size == other._backup_size # noqa
))
def __hash__(self):
return hash((self._backup_time,
self._backup_type,
self._filesystem,
self._s3_key,
self._dependency,
self._backup_size
))
def _json_default(obj):
if isinstance(obj, Backup):
return {
'_type': 'Backup',
'backup_time': obj._backup_time, # noqa
'backup_type': obj._backup_type, # noqa
'filesystem': obj._filesystem, # noqa
's3_key': obj._s3_key, # noqa
'dependency': obj._dependency, # noqa
'backup_size': obj._backup_size # noqa
}
def _json_object_hook(dct):
obj_type = dct.get('_type')
if obj_type == 'Backup':
dct_copy = dct.copy()
del dct_copy['_type']
return Backup(**dct_copy)
else:
return dct
def _validate_backup_time(backup_time):
try:
datetime.strptime(backup_time, DATETIME_FORMAT)
except ValueError:
return False
return True | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/backup_db.py | backup_db.py |
from datetime import datetime
import logging
import time
import sys
import boto3
from boto3.s3.transfer import TransferConfig
from zfs_uploader.backup_db import BackupDB, DATETIME_FORMAT
from zfs_uploader.snapshot_db import SnapshotDB
from zfs_uploader.utils import derive_s3_key
from zfs_uploader.zfs import (destroy_filesystem, destroy_snapshot,
get_snapshot_send_size,
get_snapshot_send_size_inc,
open_snapshot_stream,
open_snapshot_stream_inc, rollback_filesystem,
ZFSError)
KB = 1024
MB = KB * KB
S3_MAX_CONCURRENCY = 20
class BackupError(Exception):
""" Baseclass for backup exceptions. """
class RestoreError(Exception):
""" Baseclass for restore exceptions. """
class ZFSjob:
""" ZFS backup job. """
@property
def bucket(self):
""" S3 bucket. """
return self._bucket
@property
def region(self):
""" S3 region. """
return self._region
@property
def endpoint(self):
""" S3 Endpoint. """
return self._endpoint
@property
def access_key(self):
""" S3 access key. """
return self._access_key
@property
def secret_key(self):
""" S3 secret key. """
return self._secret_key
@property
def filesystem(self):
""" ZFS filesystem. """
return self._filesystem
@property
def prefix(self):
""" Prefix to be prepended to the s3 key. """
return self._prefix
@property
def s3(self):
""" S3 resource. """
return self._s3
@property
def cron(self):
""" Cron schedule. """
return self._cron
@property
def max_snapshots(self):
""" Maximum number of snapshots. """
return self._max_snapshots
@property
def max_backups(self):
""" Maximum number of full and incremental backups. """
return self._max_backups
@property
def max_incremental_backups_per_full(self):
""" Maximum number of incremental backups per full backup. """
return self._max_incremental_backups_per_full
@property
def storage_class(self):
""" S3 storage class. """
return self._storage_class
@property
def max_multipart_parts(self):
""" Maximum number of parts to use in a multipart S3 upload. """
return self._max_multipart_parts
@property
def backup_db(self):
""" BackupDB """
return self._backup_db
@property
def snapshot_db(self):
""" SnapshotDB """
return self._snapshot_db
def __init__(self, bucket_name, access_key, secret_key, filesystem,
prefix=None, region=None, cron=None, max_snapshots=None,
max_backups=None, max_incremental_backups_per_full=None,
storage_class=None, endpoint=None, max_multipart_parts=None):
""" Create ZFSjob object.
Parameters
----------
bucket_name : str
S3 bucket name.
access_key : str
S3 access key.
secret_key : str
S3 secret key.
filesystem : str
ZFS filesystem.
prefix : str, optional
The prefix added to the s3 key for backups.
region : str, default: us-east-1
S3 region.
endpoint : str, optional
S3 endpoint for alternative services
cron : str, optional
Cron schedule. Example: `* 0 * * *`
max_snapshots : int, optional
Maximum number of snapshots.
max_backups : int, optional
Maximum number of full and incremental backups.
max_incremental_backups_per_full : int, optional
Maximum number of incremental backups per full backup.
storage_class : str, default: STANDARD
S3 storage class.
max_multipart_parts : int, default: 10000
Maximum number of parts to use in a multipart S3 upload.
"""
self._bucket_name = bucket_name
self._region = region or 'us-east-1'
self._access_key = access_key
self._secret_key = secret_key
self._filesystem = filesystem
self._prefix = prefix
self._endpoint = endpoint
self._s3 = boto3.resource(service_name='s3',
region_name=self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key,
endpoint_url=endpoint)
self._bucket = self._s3.Bucket(self._bucket_name)
self._backup_db = BackupDB(self._bucket, self._filesystem,
self._prefix)
self._snapshot_db = SnapshotDB(self._filesystem)
self._cron = cron
self._max_snapshots = max_snapshots
self._max_backups = max_backups
self._max_incremental_backups_per_full = max_incremental_backups_per_full # noqa
self._storage_class = storage_class or 'STANDARD'
self._max_multipart_parts = max_multipart_parts or 10000
self._logger = logging.getLogger(__name__)
if max_snapshots and not max_snapshots >= 0:
self._logger.error(f'filesystem={self._filesystem} '
'msg="max_snapshots must be greater than or '
'equal to 0."')
sys.exit(1)
if max_backups and not max_backups >= 1:
self._logger.error(f'filesystem={self._filesystem} '
'msg="max_backups must be greater '
'than or equal to 1."')
sys.exit(1)
if max_incremental_backups_per_full and not max_incremental_backups_per_full >= 0: # noqa
self._logger.error(f'filesystem={self._filesystem} '
'msg="max_incremental_backups_per_full must be '
'greater than or equal to 0."')
sys.exit(1)
def start(self):
""" Start ZFS backup job. """
self._logger.info(f'filesystem={self._filesystem} msg="Starting job."')
backups_inc = self._backup_db.get_backups(backup_type='inc')
backups_full = self._backup_db.get_backups(backup_type='full')
# find most recent full backup
backup = backups_full[-1] if backups_full else None
# if no full backup exists
if backup is None:
self._backup_full()
# if we don't want incremental backups
elif self._max_incremental_backups_per_full == 0:
self._backup_full()
# if we want incremental backups and multiple full backups
elif self._max_incremental_backups_per_full:
backup_time = backup.backup_time
dependants = [True if b.dependency == backup_time
else False for b in backups_inc]
if sum(dependants) >= self._max_incremental_backups_per_full:
self._backup_full()
else:
self._backup_incremental(backup_time)
# if we want incremental backups and not multiple full backups
else:
self._backup_incremental(backup.backup_time)
if self._max_snapshots or self._max_snapshots == 0:
self._limit_snapshots()
if self._max_backups or self._max_backups == 0:
self._limit_backups()
self._logger.info(f'filesystem={self._filesystem} msg="Finished job."')
def restore(self, backup_time=None, filesystem=None):
""" Restore from backup.
Defaults to most recent backup if backup_time is not specified.
WARNING: If restoring to a file system that already exists, snapshots
and data that were written after the backup will be destroyed. The
file system will also be destroyed if there are no snapshots at any
point during the restore process.
Parameters
----------
backup_time : str, optional
Backup time in %Y%m%d_%H%M%S format.
filesystem : str, optional
File system to restore to. Defaults to the file system that the
backup was taken from.
"""
self._snapshot_db.refresh()
snapshots = self._snapshot_db.get_snapshot_names()
if backup_time:
backup = self._backup_db.get_backup(backup_time)
else:
backups = self._backup_db.get_backups()
if backups is None:
raise RestoreError('No backups exist.')
else:
backup = backups[-1]
backup_time = backup.backup_time
backup_type = backup.backup_type
s3_key = backup.s3_key
# Since we can't use the `-F` option with `zfs receive` for encrypted
# filesystems we have to handle removing filesystems, snapshots, and
# data written after the most recent snapshot ourselves.
if filesystem is None:
if snapshots:
# Destroy any snapshots that occurred after the backup
backup_datetime = datetime.strptime(backup_time,
DATETIME_FORMAT)
for snapshot in snapshots:
snapshot_datetime = datetime.strptime(snapshot,
DATETIME_FORMAT)
if snapshot_datetime > backup_datetime:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
f'msg="Destroying {snapshot} since '
'it occurred after the backup."')
destroy_snapshot(backup.filesystem, snapshot)
self._snapshot_db.refresh()
snapshots = self._snapshot_db.get_snapshot_names()
# Rollback to most recent snapshot or destroy filesystem if
# there are no snapshots.
if snapshots:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Rolling filesystem back to '
f'{snapshots[-1]}"')
out = rollback_filesystem(backup.filesystem, snapshots[-1])
if out.returncode:
raise ZFSError(out.stderr)
self._snapshot_db.refresh()
snapshots = self._snapshot_db.get_snapshot_names()
else:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Destroying filesystem since there '
'are no snapshots."')
destroy_filesystem(backup.filesystem)
else:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Destroying filesystem since there are '
'no snapshots."')
destroy_filesystem(backup.filesystem)
if backup_type == 'full':
if backup_time in snapshots and filesystem is None:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Snapshot already exists."')
else:
self._restore_snapshot(backup, filesystem)
elif backup_type == 'inc':
# restore full backup first
backup_full = self._backup_db.get_backup(backup.dependency)
if backup_full.backup_time in snapshots and filesystem is None:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_full.backup_time} '
f's3_key={backup_full.s3_key} '
'msg="Snapshot already exists."')
else:
self._restore_snapshot(backup_full, filesystem)
if backup_time in snapshots and filesystem is None:
self._logger.info(f'filesystem={self.filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Snapshot already exists."')
else:
self._restore_snapshot(backup, filesystem)
def _backup_full(self):
""" Create snapshot and upload full backup. """
snapshot = self._snapshot_db.create_snapshot()
backup_time = snapshot.name
filesystem = snapshot.filesystem
send_size = int(get_snapshot_send_size(filesystem, backup_time))
transfer_config = _get_transfer_config(send_size,
self._max_multipart_parts)
s3_key = derive_s3_key(f'{backup_time}.full', filesystem,
self.prefix)
self._logger.info(f'filesystem={filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Starting full backup."')
with open_snapshot_stream(filesystem, backup_time, 'r') as f:
transfer_callback = TransferCallback(self._logger, send_size,
filesystem, backup_time,
s3_key)
self._bucket.upload_fileobj(f.stdout,
s3_key,
Callback=transfer_callback.callback,
Config=transfer_config,
ExtraArgs={
'StorageClass': self._storage_class
})
stderr = f.stderr.read().decode('utf-8')
if f.returncode:
raise ZFSError(stderr)
backup_size = self._check_backup(s3_key)
self._backup_db.create_backup(backup_time, 'full', s3_key,
dependency=None, backup_size=backup_size)
self._logger.info(f'filesystem={filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Finished full backup."')
def _backup_incremental(self, backup_time_full):
""" Create snapshot and upload incremental backup.
Parameters
----------
backup_time_full : str
Backup time in %Y%m%d_%H%M%S format.
"""
snapshot = self._snapshot_db.create_snapshot()
backup_time = snapshot.name
filesystem = snapshot.filesystem
send_size = int(get_snapshot_send_size_inc(filesystem,
backup_time_full,
backup_time))
transfer_config = _get_transfer_config(send_size,
self._max_multipart_parts)
s3_key = derive_s3_key(f'{backup_time}.inc', filesystem,
self.prefix)
self._logger.info(f'filesystem={filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Starting incremental backup."')
with open_snapshot_stream_inc(
filesystem, backup_time_full, backup_time) as f:
transfer_callback = TransferCallback(self._logger, send_size,
filesystem, backup_time,
s3_key)
self._bucket.upload_fileobj(
f.stdout,
s3_key,
Callback=transfer_callback.callback,
Config=transfer_config,
ExtraArgs={
'StorageClass': self._storage_class
})
stderr = f.stderr.read().decode('utf-8')
if f.returncode:
raise ZFSError(stderr)
backup_size = self._check_backup(s3_key)
self._backup_db.create_backup(backup_time, 'inc', s3_key,
backup_time_full, backup_size)
self._logger.info(f'filesystem={filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Finished incremental backup."')
def _restore_snapshot(self, backup, filesystem=None):
""" Restore snapshot from backup.
Parameters
----------
backup : Backup
filesystem : str, optional
File system to restore to. Defaults to the file system that the
backup was taken from.
"""
backup_time = backup.backup_time
backup_size = backup.backup_size
filesystem = filesystem or backup.filesystem
s3_key = backup.s3_key
transfer_config = TransferConfig(max_concurrency=S3_MAX_CONCURRENCY)
self._logger.info(f'filesystem={filesystem} '
f'snapshot_name={backup_time} '
f's3_key={s3_key} '
'msg="Restoring snapshot."')
backup_object = self._s3.Object(self._bucket_name, s3_key)
with open_snapshot_stream(filesystem, backup_time, 'w') as f:
transfer_callback = TransferCallback(self._logger, backup_size,
filesystem, backup_time,
s3_key)
try:
backup_object.download_fileobj(
f.stdin,
Callback=transfer_callback.callback,
Config=transfer_config)
except BrokenPipeError:
pass
stderr = f.stderr.read().decode('utf-8')
if f.returncode:
raise ZFSError(stderr)
self._snapshot_db.refresh()
def _limit_snapshots(self):
""" Limit number of snapshots.
We only remove snapshots that were used for incremental backups.
Keeping snapshots that were used for full backups allow us to
restore without having to download the full backup.
"""
backup_times_full = self._backup_db.get_backup_times('full')
results = self._snapshot_db.get_snapshots()
if len(results) > self._max_snapshots:
self._logger.info(f'filesystem={self._filesystem} '
'msg="Snapshot limit achieved."')
while len(results) > self._max_snapshots:
snapshot = results.pop(0)
backup_time = snapshot.name
if backup_time not in backup_times_full:
self._logger.info(f'filesystem={self._filesystem} '
f'snapshot_name={snapshot.name} '
'msg="Deleting snapshot."')
self._snapshot_db.delete_snapshot(snapshot.name)
def _check_backup(self, s3_key):
""" Check if S3 object exists and returns object size.
Parameters
----------
s3_key : str
Returns
-------
int
"""
# load() will fail if object does not exist
backup_object = self._s3.Object(self._bucket_name, s3_key)
backup_object.load()
if backup_object.content_length == 0:
raise BackupError('Backup upload failed.')
return backup_object.content_length
def _delete_backup(self, backup):
""" Delete backup.
Parameters
----------
backup : Backup
"""
backup_time = backup.backup_time
s3_key = backup.s3_key
self._logger.info(f's3_key={s3_key} '
'msg="Deleting backup."')
backup_object = self._s3.Object(self._bucket_name, s3_key)
backup_object.delete()
self._backup_db.delete_backup(backup_time)
def _limit_backups(self):
""" Limit number of incremental and full backups.
Only backups with no dependants are removed.
"""
backups = self._backup_db.get_backups()
if len(backups) > self._max_backups:
self._logger.info(f'filesystem={self._filesystem} '
'msg="Backup limit achieved."')
count = 0
while len(backups) > self._max_backups:
backup = backups[count]
backup_time = backup.backup_time
backup_type = backup.backup_type
s3_key = backup.s3_key
if backup_type == "inc":
self._delete_backup(backup)
backups.pop(count)
elif backup_type == "full":
dependants = any([True if b.dependency == backup_time
else False for b in backups])
if dependants:
self._logger.info(f's3_key={s3_key} '
'msg="Backup has dependants. Not '
'deleting."')
else:
self._delete_backup(backup)
backups.pop(count)
count += 1
class TransferCallback:
def __init__(self, logger, file_size, filesystem, backup_time, s3_key):
self._logger = logger
self._file_size = file_size
self._filesystem = filesystem
self._backup_time = backup_time
self._s3_key = s3_key
self._transfer_0 = 0
self._transfer_buffer = 0
self._time_0 = time.time()
self._time_start = time.time()
def callback(self, transfer):
time_1 = time.time()
time_diff = time_1 - self._time_0
time_elapsed = time_1 - self._time_start
self._transfer_buffer += transfer
if time_diff > 5:
transfer_1 = self._transfer_0 + self._transfer_buffer
progress = transfer_1 / self._file_size
speed = self._transfer_buffer / time_diff
self._logger.info(
f'filesystem={self._filesystem} '
f'snapshot_name={self._backup_time} '
f's3_key={self._s3_key} '
f'progress={round(progress * 100)}% '
f'speed="{round(speed / MB)} MBps" '
f'transferred="{round(transfer_1 / MB)}/'
f'{round(self._file_size / MB)} MB" '
f'time_elapsed={round(time_elapsed / 60)}m'
)
self._transfer_0 = transfer_1
self._transfer_buffer = 0
self._time_0 = time_1
def _get_transfer_config(send_size, max_multipart_parts):
""" Get transfer config. """
# should never get close to the max part number
chunk_size = send_size // (max_multipart_parts - 100)
# only set chunk size if greater than default value
chunk_size = chunk_size if chunk_size > 8 * MB else 8 * MB
return TransferConfig(max_concurrency=S3_MAX_CONCURRENCY,
multipart_chunksize=chunk_size) | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/job.py | job.py |
import subprocess
SUBPROCESS_KWARGS = dict(stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8')
class ZFSError(Exception):
""" Baseclass for ZFS exceptions. """
def list_snapshots():
""" List snapshots. """
cmd = ['zfs', 'list', '-p', '-t', 'snapshot']
out = subprocess.run(cmd, **SUBPROCESS_KWARGS)
lines = out.stdout.splitlines()
snapshots = {}
if lines:
header = lines[0].split()
for data in lines[1:]:
name = data.split()[0]
snapshots.update(
{name: {k: v for k, v in zip(header[1:], data.split()[1:])}}
)
return snapshots
def create_snapshot(filesystem, snapshot_name):
""" Create filesystem snapshot. """
cmd = ['zfs', 'snapshot', f'{filesystem}@{snapshot_name}']
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def create_filesystem(filesystem):
""" Create filesystem. """
cmd = ['zfs', 'create', filesystem]
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def destroy_snapshot(filesystem, snapshot_name):
""" Destroy filesystem snapshot. """
cmd = ['zfs', 'destroy', f'{filesystem}@{snapshot_name}']
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def destroy_filesystem(filesystem):
""" Destroy filesystem and filesystem snapshots. """
cmd = ['zfs', 'destroy', '-r', filesystem]
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def mount_filesystem(filesystem):
""" Mount filesystem. """
cmd = ['zfs', 'mount', filesystem]
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def rollback_filesystem(filesystem, snapshot_name):
""" Rollback filesystem. """
cmd = ['zfs', 'rollback', '-r', f'{filesystem}@{snapshot_name}']
return subprocess.run(cmd, **SUBPROCESS_KWARGS)
def get_snapshot_send_size(filesystem, snapshot_name):
cmd = ['zfs', 'send', '--raw', '--parsable', '--dryrun',
f'{filesystem}@{snapshot_name}']
out = subprocess.run(cmd, **SUBPROCESS_KWARGS)
return out.stdout.splitlines()[1].split()[1]
def get_snapshot_send_size_inc(filesystem, snapshot_name_1, snapshot_name_2):
cmd = ['zfs', 'send', '--raw', '--parsable', '--dryrun', '-i',
f'{filesystem}@{snapshot_name_1}',
f'{filesystem}@{snapshot_name_2}']
out = subprocess.run(cmd, **SUBPROCESS_KWARGS)
return out.stdout.splitlines()[1].split()[1]
def open_snapshot_stream(filesystem, snapshot_name, mode):
""" Open snapshot stream. """
if mode == 'r':
cmd = ['zfs', 'send', '--raw', f'{filesystem}@{snapshot_name}']
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
elif mode == 'w':
cmd = ['zfs', 'receive', f'{filesystem}@{snapshot_name}']
return subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
raise ValueError('Mode must be r or w')
def open_snapshot_stream_inc(filesystem, snapshot_name_1, snapshot_name_2):
""" Open incremental snapshot read stream. """
cmd = ['zfs', 'send', '--raw', '-i', f'{filesystem}@{snapshot_name_1}',
f'{filesystem}@{snapshot_name_2}']
return subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def load_key(filesystem, keylocation):
""" Load encryption key. """
cmd = ['zfs', 'load-key', '-L', keylocation, filesystem]
return subprocess.run(cmd, **SUBPROCESS_KWARGS) | zfs-uploader | /zfs_uploader-0.9.0.tar.gz/zfs_uploader-0.9.0/zfs_uploader/zfs.py | zfs.py |
ZFS Utils for OS X
==============================================================================
Introduction
------------------------------------------------------------------------------
.. image:: https://travis-ci.org/WoLpH/zfs-utils-osx.png?branch=master
:alt: Test Status
:target: https://travis-ci.org/WoLpH/zfs-utils-osx
.. image:: https://landscape.io/github/WoLpH/django-statsd/master/landscape.png
:target: https://landscape.io/github/WoLpH/django-statsd/master
:alt: Code Health
.. image:: https://requires.io/github/WoLpH/zfs-utils-osx/requirements.png?branch=master
:target: https://requires.io/github/WoLpH/zfs-utils-osx/requirements/?branch=master
:alt: Requirements Status
A simple script to create and manage virtual ZFS images on OS X without
requiring repartitioning
Install
------------------------------------------------------------------------------
To install simply run `pip install zfs-utils-osx`.
Example usage
------------------------------------------------------------------------------
To create a pool (does a dry-run with the `-n` flag):
zfs.py zpool -n <pool_name>
Usage:
usage: zfs.py [-h] {zpool} ...
optional arguments:
-h, --help show this help message and exit
Subcommands:
Please specify one of the following subcommands
{zpool,load}
zpool zpool creation
load load (import) the zpools
Zpool create usage:
usage: zfs.py zpool [-h] [-c COUNT] [-s SIZE]
[-t {raidz1,raidz2,raidz3,mirror,raidz}] [-n]
[-m MOUNTPOINT] [-o] [-p PATTERN]
pool_name
positional arguments:
pool_name The name of the pool to create
optional arguments:
-h, --help show this help message and exit
-c COUNT, --count COUNT
The amount of images to use (default: 3)
-s SIZE, --size SIZE The usable size of the zpool in GiB (default: 10GiB)
-t {raidz1,raidz2,raidz3,mirror,raidz}, --type {raidz1,raidz2,raidz3,mirror,raidz}
The zpool type to use (default: raidz)
-n, --no-op, --dry-run
Show what will be done but dont execute
-m MOUNTPOINT, --mountpoint MOUNTPOINT
Where should the disk be mounted (default:
~/%(pool_name)s
-o, --overwrite Overwrite old images if they exist
-p PATTERN, --pattern PATTERN
File name pattern to store the images (default:
%(pool_name)s_%(i)02d)
Zpool import usage:
usage: zfs.py load [-h] [-d IMAGE_DIRECTORY] [-p PREFIX] [--postfix POSTFIX]
[-n]
pool_names [pool_names ...]
positional arguments:
pool_names The name of the pool to import
optional arguments:
-h, --help show this help message and exit
-d IMAGE_DIRECTORY, --image-directory IMAGE_DIRECTORY
File name pattern to store the images (default:
~/zfs/)
-p PREFIX, --prefix PREFIX
File name prefix for the images (default:
%(pool_name)s_)
--postfix POSTFIX File name postfix for the images (default: )
-n, --no-op, --dry-run
Show what will be done but dont execute
| zfs-utils-osx | /zfs-utils-osx-1.1.0.tar.gz/zfs-utils-osx-1.1.0/README.rst | README.rst |
import sys
import subprocess
import textwrap
import decimal
from . import constants
from . import utils
from . import argparse_utils
def zpool_command(args):
context = vars(args)
effective_image_count = constants.ZPOOL_TYPES[args.type](args.count)
context['image_size'] = args.size / effective_image_count
context['physical_size'] = context['image_size'] * args.count
context['effective_size'] = context['image_size'] * effective_image_count
context['prefix'] %= context
context['postfix'] %= context
context['i'] = 0
context['name'] = constants.IMAGE_NAME % context
context['extra_args'] = ''
print textwrap.fill(constants.ZPOOL_CREATE_MESSAGE % context)
devices = []
for i in range(args.count):
context['i'] = i
context['name'] = constants.IMAGE_NAME % context
try:
if args.overwrite:
arg = '-ov'
else:
arg = ''
utils.execute(context, constants.ZPOOL_CREATE_IMAGE_COMMAND, arg)
except subprocess.CalledProcessError:
print 'Unable to create a new image'
sys.exit(1)
try:
context['name'] += '.sparseimage'
device = utils.execute(context,
constants.ZPOOL_ATTACH_IMAGE_COMMAND)
if device:
devices.append(device.strip())
except subprocess.CalledProcessError:
print 'Unable to attach image'
sys.exit(1)
if devices:
context['devices'] = ' '.join(devices)
context['mountpoint'] %= context
utils.execute(context, constants.ZPOOL_CREATE_COMMAND)
def get_parser(subparsers):
zpool = subparsers.add_parser('zpool', help='zpool creation')
zpool.add_argument(
'-c', '--count', default=3,
type=lambda s: argparse_utils.greater_than(s, int, 1),
help='The amount of images to use (default: %(default)s)')
zpool.add_argument(
'-s', '--size', default=10,
type=lambda s: argparse_utils.greater_than(s, decimal.Decimal, 0),
help='The usable size of the zpool in GiB (default: %(default)sGiB)')
zpool.add_argument(
'-t', '--type', choices=constants.ZPOOL_TYPES, default='raidz',
help='The zpool type to use (default: %(default)s)')
zpool.add_argument(
'-n', '--no-op', '--dry-run', action='store_true',
help='Show what will be done but dont execute')
zpool.add_argument(
'-m', '--mountpoint', default='~/%(pool_name)s',
help='Where should the disk be mounted (default: %(default)s')
zpool.add_argument(
'-o', '--overwrite', action='store_true',
help='Overwrite old images if they exist')
zpool.add_argument('pool_name', help='The name of the pool to create')
zpool.add_argument(
'-p', '--prefix', default='%(pool_name)s_',
help='File name prefix for the images (default: %(default)s)')
zpool.add_argument(
'--postfix', default='',
help='File name postfix for the images (default: %(default)s)')
zpool.set_defaults(func=zpool_command) | zfs-utils-osx | /zfs-utils-osx-1.1.0.tar.gz/zfs-utils-osx-1.1.0/zfs_utils_osx/zpool.py | zpool.py |
import re
import os
import sys
import subprocess
import textwrap
import decimal
from . import constants
from . import utils
from . import argparse_utils
def load_command(args):
patterns = []
for name in args.pool_names:
context = vars(args)
context['pool_name'] = name
context['prefix'] %= context
context['postfix'] %= context
patterns.append(constants.IMAGE_NAME_RE % context)
pattern = re.compile('(%s)' % '|'.join(patterns))
full_path = os.path.abspath(os.path.expanduser(args.image_directory))
for path, dirs, files in os.walk(full_path):
for file_ in files:
match = pattern.match(file_)
if not match:
continue
try:
context['name'] = os.path.join(path, file_)
utils.execute( context, constants.ZPOOL_ATTACH_IMAGE_COMMAND)
except subprocess.CalledProcessError:
print 'Unable to attach image'
sys.exit(1)
for name in args.pool_names:
context = vars(args)
context['pool_name'] = name
try:
utils.execute( context, constants.ZPOOL_IMPORT_COMMAND)
except subprocess.CalledProcessError:
print 'Unable to import zpool'
sys.exit(1)
def get_parser(subparsers):
load = subparsers.add_parser('load', help='load (import) the zpools')
load.add_argument('pool_names', help='The name of the pool to import',
nargs='+')
load.add_argument(
'-d', '--image-directory', default='~/zfs/',
help='File name pattern to store the images (default: %(default)s)')
load.add_argument(
'-p', '--prefix', default='%(pool_name)s_',
help='File name prefix for the images (default: %(default)s)')
load.add_argument(
'--postfix', default='',
help='File name postfix for the images (default: %(default)s)')
load.add_argument(
'-n', '--no-op', '--dry-run', action='store_true',
help='Show what will be done but dont execute')
load.set_defaults(func=load_command) | zfs-utils-osx | /zfs-utils-osx-1.1.0.tar.gz/zfs-utils-osx-1.1.0/zfs_utils_osx/load.py | load.py |
<!-- # zfs3backup [](https://travis-ci.org/PressLabs/zfs3backup)
-->
This was forked from https://github.com/Presslabs/z3 which appears to be a dead project.
# Welcome to zfs3backup
zfs3backup is a ZFS to S3 backup tool. This is basically plumbing around `zfs send` and `zfs receive`
so you should have at least a basic understanding of what those commands do.
## Usage
`zfs3backup status` will show you the current state, what snapshots you have on S3 and on the local
zfs dataset.
`zfs3backup backup` perform full or incremental backups of your dataset.
`zfs3backup restore` restores your dataset to a certain snapshot.
See `zfs SUBCOMMAND --help` for more info.
### Installing
`pip install zfs3backup`
zfs3backup is tested on python 2.7.
#### Optional dependencies
```
# Install pv to get some progress indication while uploading.
apt-get install pv
# Install pigz to provide the pigz compressors.
apt-get install pigz
```
### Configuring
Most options can be configured as command line flags, environment variables or in a config file,
in that order of precedence.
The config file is read from `/etc/zfs3backup_backup/zfs3backup.conf` if it exists, some defaults are provided by the tool.
BUCKET `S3_KEY_ID` and `S3_SECRET` can't be provided on the command line.
For a list of all options see `zfs3backup/sample.conf`.
You'll usually want zfs3backup to only backup certain snapshots (hourly/daily/weekly).
To do that you can specify a `SNAPSHOT_PREFIX` (defaults to `zfs-auto-snap:daily`).
Defaults for `SNAPSHOT_PREFIX` and `COMPRESSOR` can be set per filesystem like so:
```
[fs:tank/spam]
SNAPSHOT_PREFIX=delicious-daily-spam
COMPRESSOR=pigz4
[fs:tank/ham]
SNAPSHOT_PREFIX=weekly-non-spam
```
### Dataset Size, Concurrency and Memory Usage
Since the data is streamed from `zfs send` it gets read in to memory in chunks.
zfs3backup estimates a good chunk size for you: no smaller than 5MB and large enough
to produce at most 9999 chunks. These are S3 limitation for multipart uploads.
Here are some example chunk sizes for different datasets:
* 50 GiB: 5 MiB
* 500 GIB: 53 MiB
* 1 TiB: 110 MiB
* 2 TiB: 220 MiB
Multiply that by `CONCURRENCY` to know how much memory your upload will use.
### Usage Examples
#### Status
```
# show global options
zfs3backup --help
# show status of backups for default dataset
zfs3backup status
# show status for other dataset; only snapshots named daily-spam-*
zfs3backup --dataset tank/spam --snapshot-prefix daily-spam- status
```
#### Backup
```
# show backup options
zfs3backup backup --help
# perform incremental backup the latest snapshot; use pigz4 compressor
zfs3backup backup --compressor pigz4 --dry-run
# inspect the commands that would be executed
zfs3backup backup --compressor pigz4
# perform full backup of a specific snapshot
zfs3backup backup --full --snapshot the-part-after-the-at-sign --dry-run
# inspect the commands that would be executed
zfs3backup backup --full --snapshot the-part-after-the-at-sign
```
#### Restore
```
# see restore options
zfs3backup restore --help
# restore a dataset to a certain snapshot
zfs3backup restore the-part-after-the-at-sign --dry-run
# inspect the commands that would be executed
zfs3backup restore the-part-after-the-at-sign
# force rollback of filesystem (zfs recv -F)
zfs3backup restore the-part-after-the-at-sign --force
```
### Other Commands
Other command line tools are provided.
`pput` reads a stream from standard in and uploads the data to S3.
`zfs3backup_ssh_sync` a convenience tool to allow you to push zfs snapshots to another host.
If you need replication you should checkout zrep. This exists because we've already
got zrep between 2 nodes and needed a way to push backups to a 3rd machine.
`zfs3backup_get` called by `zfs3backup restore` to download a backup.
## Development Overview
### Running the tests
The test suite uses pytest.
Some of the tests upload data to S3, so you need to setup the following environment:
```
export S3_KEY_ID=""
export S3_SECRET=""
export BUCKET="mytestbucket"
```
To skip tests that use S3:
```
py.test --capture=no --tb=native _tests/ -k "not with_s3"
```
### The Data
Snapshots are obtained using `zfs send`, optionally piped trough a compressor (pigz by default),
and finally piped to `pput`.
Incremental snapshots are always handled individually, so if you have multiple snapshots to send
since the last time you've performed a backup they get exported as individual snapshots
(multiple calls to `zfs send -i dataset@snapA dataset@snapB`).
Your snapshots end up as individual keys in an s3 bucket, with a configurable prefix (`S3_PREFIX`).
S3 key metadata is used to identify if a snapshot is full (`isfull="true"`) or incremental.
The parent of an incremental snapshot is identified with the `parent` attribute.
S3 and ZFS snapshots are matched by name.
### Health checks
The S3 health checks are very rudimentary, basically if a snapshot is incremental check
that the parent exists and is healthy. Full backups are always assumed healthy.
If backup/restore encounter unhealthy snapshots they abort execution.
### pput
pput is a simple tool with one job, read data from stdin and upload it to S3.
It's usually invoked by zfs3backup.
Consistency is important, it's better to fail hard when something goes wrong
than silently upload inconsistent or partial data.
There are few anticipated errors (if a part fails to upload, retry MAX_RETRY times).
Any other problem is unanticipated, so just let the tool crash.
TL;DR Fail early, fail hard.
| zfs3backup | /zfs3backup-0.2.2.tar.gz/zfs3backup-0.2.2/README.md | README.md |
Copyright 2021, Thomas Spielauer
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain this list of conditions
and the following disclaimer.
* Redistributions in binary form must reproduce this list of conditions
and the following disclaimer in the documentation and/or other materials
provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| zfsexporter-tspspi | /zfsexporter-tspspi-0.0.2.tar.gz/zfsexporter-tspspi-0.0.2/LICENSE.md | LICENSE.md |
# Simple Python ZFS exporter for Prometheus
This is a simple ZFS exporter as provider for the [Prometheus time series database
and monitoring system](https://prometheus.io/) written in Python. It uses
the [prometheus-client](https://github.com/prometheus/client_python) Python
package to do the main work of running the webservice and managing the gauges.
It's just a wrapper that periodically calls the ```zfs list``` and ```zpool status```
commands to gather information about the filesystems and the pools which is then
provided on the specified TCP port where it's collected by Prometheus at the
specified scrape interval. Note that this exporter does only scrape the filesystem
properties at a configurable interval instead at the query time of the time
series database itself.
Since this exporter scrapes the output of the CLI tools it may break with
any software update and might only work with particular versions of those
tools. It has been tested on:
* FreeBSD 11.2
* FreeBSD 12.2
## Exported metrics
* For each ZFS filesystem (```filesystem``` used as label):
* Used bytes (```zfs_used```)
* Available bytes (```zfs_avail```)
* Referred bytes (```zfs_referred```)
* For each pool (```pool``` used as label):
* Resilvered percentage (```zpoolResilvered```)
* Resilvered bytes (```zpoolResilveredByte```)
* Scrub scanned bytes (```zpoolScrubScanned```)
* Scrub datarate (```zpool_scrub_rate```)
* Scrub scanned percentage (```zpool_scrub_scanned_pct```)
* For each ```vdev``` (```vdev``` used as label):
* Read errors (```zpoolErrorRead```)
* Write errors (```zpoolErrorWrite```)
* Checksum errors (```zpoolErrorChecksum```)
* Operations read (```zpool_opread```)
* Operations write (```zpool_opwrite```)
* Bandwidth read (```zpool_bwread```)
* Bandwidth write (```zpool_bwwrite```)
* For each non terminal ```vdev``` (```vdev``` used as label):
* Allocated capacity (```zpool_capacityallocated```)
* Free capacity (```zpool_capacityfree```)
## Installation
The package can either be installed from PyPI
```
pip install zfsexporter-tspspi
```
or form a package downloaded directly from the ```tar.gz``` or ```whl``` from
the [releases](https://github.com/tspspi/gammacli/releases):
```
pip install zfsexporter-tspspi.tar.gz
```
## Usage
```
usage: zfsexporter [-h] [-f] [--uid UID] [--gid GID] [--chroot CHROOT] [--pidfile PIDFILE] [--loglevel LOGLEVEL] [--logfile LOGFILE] [--port PORT] [--interval INTERVAL]
ZFS exporter daemon
optional arguments:
-h, --help show this help message and exit
-f, --foreground Do not daemonize - stay in foreground and dump debug information to the terminal
--uid UID User ID to impersonate when launching as root
--gid GID Group ID to impersonate when launching as root
--chroot CHROOT Chroot directory that should be switched into
--pidfile PIDFILE PID file to keep only one daemon instance running
--loglevel LOGLEVEL Loglevel to use (debug, info, warning, error, critical). Default: error
--logfile LOGFILE Logfile that should be used as target for log messages
--port PORT Port to listen on
--interval INTERVAL Interval in seconds in which data is gathered
```
| zfsexporter-tspspi | /zfsexporter-tspspi-0.0.2.tar.gz/zfsexporter-tspspi-0.0.2/README.md | README.md |
import argparse
import sys
import logging
import signal, lockfile, grp, os
from pwd import getpwnam
from daemonize import Daemonize
from prometheus_client import start_http_server, Gauge
from typing import Dict
import subprocess
import time
class ZFSExporterDaemon:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.terminate = False
self.rereadConfig = True
self.metrics = {
'zfsUsed' : Gauge(
"zfs_used", "Used bytes", labelnames = [ 'filesystem' ]
),
'zfsAvail' : Gauge(
"zfs_avail", "Available bytes", labelnames = [ 'filesystem' ]
),
'zfsReferred' : Gauge(
"zfs_referred", "Referred bytes", labelnames = [ 'filesystem' ]
),
'zpoolCapacityAlloc' : Gauge(
"zpool_capacityallocated", "Allocated capacity", labelnames = [ 'vdev' ]
),
'zpoolCapacityFree' : Gauge(
"zpool_capacityfree", "Available (free) capacity", labelnames = [ 'vdev' ]
),
'zpoolOperationsRead' : Gauge(
"zpool_opread", "Operations read", labelnames = [ 'vdev' ]
),
'zpoolOperationsWrite' : Gauge(
"zpool_opwrite", "Operations write", labelnames = ['vdev']
),
'zpoolBandwidthRead' : Gauge(
"zpool_bwread", "Bandwidth read", labelnames = [ 'vdev' ]
),
'zpoolBandwidthWrite' : Gauge(
"zpool_bwwrite", "Bandwidth write", labelnames = [ 'vdev' ]
),
'zpoolErrorRead' : Gauge(
"zpool_errorread", "Read errors", labelnames = [ 'vdev' ]
),
'zpoolErrorWrite' : Gauge(
"zpool_errorwrite", "Write errors", labelnames = [ 'vdev' ]
),
'zpoolErrorChecksum' : Gauge(
"zpool_errorchecksum", "Checksum errors", labelnames = [ 'vdev' ]
),
'zpoolResilvered' : Gauge(
"zpool_resilvered_pct", "Percentage of resilvering done", labelnames = [ 'pool' ]
),
'zpoolResilveredByte' : Gauge(
"zpool_resilvered_bytes", "Bytes resilvered", labelnames = [ 'pool' ]
),
'zpoolScrubScanned' : Gauge(
"zpool_scrub_scanned", "Bytes scanned during scrub", labelnames = [ 'pool' ]
),
'zpoolScrubDatarate' : Gauge(
"zpool_scrub_rate", "Datarate of scrub", labelnames = [ 'pool' ]
),
'zpoolScrubScannedPct' : Gauge(
"zpool_scrub_scanned_pct", "Percentage currently scanned", labelnames = [ 'pool' ]
)
}
def SuffixNotationToBytes(self, inp):
if inp[-1] == 'K':
return float(inp[:-1]) * 1e3
if inp[-1] == 'M':
return float(inp[:-1]) * 1e6
if inp[-1] == 'G':
return float(inp[:-1]) * 1e9
if inp[-1] == 'T':
return float(inp[:-1]) * 1e12
else:
return float(inp)
def parseZPOOLIostat(self, metrics):
p = subprocess.Popen("zpool iostat -v", stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
status = p.wait()
output = output.decode("utf-8").split("\n")
for i in range(len(output)):
output[i] = output[i].strip()
knownVdevs = []
for i in range(3, len(output)):
if output[i].startswith("----"):
continue
line = output[i]
line = line.split()
if(len(line) == 7):
# We seem to be able to handle that ...
vdevname = line[0]
knownVdevs.append(vdevname)
if line[1] != '-':
capacityAlloc = self.SuffixNotationToBytes(line[1])
self.metrics['zpoolCapacityAlloc'].labels(vdevname).set(capacityAlloc)
if line[2] != '-':
capacityFree = self.SuffixNotationToBytes(line[2])
self.metrics['zpoolCapacityFree'].labels(vdevname).set(capacityFree)
if line[3] != '-':
opread = self.SuffixNotationToBytes(line[3])
self.metrics['zpoolOperationsRead'].labels(vdevname).set(opread)
if line[4] != '-':
opwrite = self.SuffixNotationToBytes(line[4])
self.metrics['zpoolOperationsWrite'].labels(vdevname).set(opwrite)
if line[5] != '-':
bwread = self.SuffixNotationToBytes(line[5])
self.metrics['zpoolBandwidthRead'].labels(vdevname).set(bwread)
if line[6] != '-':
bwwrite = self.SuffixNotationToBytes(line[6])
self.metrics['zpoolBandwidthWrite'].labels(vdevname).set(bwwrite)
self.logger.info("[ZPOOL-IOSTAT] {}: {} allocated, {} free, {} op.read, {} op.write, {} bw.read, {} bw.write".format(
vdevname,
capacityAlloc,
capacityFree,
opread,
opwrite,
bwread,
bwwrite
))
p = subprocess.Popen("zpool status", stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
status = p.wait()
output = output.decode("utf-8").split("\n")
bHeaderDone = False
currentPool = None
currentResilverPct = 0
currentResilverBytes = 0
currentScrubScanned = 0
currentScrubRate = 0
currentScrubPct = 0
currentScrubTotal = 0
for i in range(len(output)):
output[i] = output[i].strip()
if output[i].startswith("pool: "):
if currentPool:
# Publish the previouses pool scrub and resilver values
self.metrics['zpoolResilvered'].labels(currentPool).set(currentResilverPct)
self.metrics['zpoolResilveredByte'].labels(currentPool).set(currentResilverBytes)
self.metrics['zpoolScrubScanned'].labels(currentPool).set(currentScrubScanned)
self.metrics['zpoolScrubDatarate'].labels(currentPool).set(currentScrubRate)
self.metrics['zpoolScrubScannedPct'].labels(currentPool).set(currentScrubPct)
self.logger.info("[ZPOOL-STATUS] {} resilvered {}% ({} bytes)".format(currentPool, currentResilverPct, currentResilverBytes))
self.logger.info("[ZPOOL-STATUS] {} scrubed {}% ({} bytes) at {} bytes/sec".format(currentPool, currentScrubPct, currentScrubScanned, currentScrubRate))
currentPool = None
currentPool = output[i]
currentPool = currentPool.split("pool: ")[1]
currentPool = currentPool.strip()
if "scanned out of" in output[i]:
parts = output[i].split("scanned out of")
currentScrubScanned = self.SuffixNotationToBytes(parts[0].strip())
parts = parts[1].split(" at ")
currentScrubTotal = self.SuffixNotationToBytes(parts[0].strip())
currentScrubPct = currentScrubScanned / currentScrubTotal * 100.0
parts = parts[1].split("/")
currentScrubRate = self.SuffixNotationToBytes(parts[0].strip())
if " resilvered, " in output[i]:
parts = output[i].split(" resilvered, ")
currentResilverBytes = self.SuffixNotationToBytes(parts[0].strip())
parts = parts[1].split("%")
currentResilverPct = float(parts[0].strip())
if not bHeaderDone:
if not output[i].startswith("NAME"):
continue
bHeaderDone = True
continue
line = output[i].split()
if len(line) != 5:
continue
if line[0] == "errors:":
break
vdevname = line[0]
state = line[1]
readerror = self.SuffixNotationToBytes(line[2])
writeerror = self.SuffixNotationToBytes(line[3])
chksumerror = self.SuffixNotationToBytes(line[4])
self.metrics['zpoolErrorRead'].labels(vdevname).set(readerror)
self.metrics['zpoolErrorWrite'].labels(vdevname).set(writeerror)
self.metrics['zpoolErrorChecksum'].labels(vdevname).set(chksumerror)
self.logger.info("[ZPOOL-STATUS] {} ({}): {} read errors, {} write errors, {} checksum errors".format(vdevname, state, readerror, writeerror, chksumerror))
if currentPool:
# Publish the previouses pool scrub and resilver values
self.metrics['zpoolResilvered'].labels(currentPool).set(currentResilverPct)
self.metrics['zpoolResilveredByte'].labels(currentPool).set(currentResilverBytes)
self.metrics['zpoolScrubScanned'].labels(currentPool).set(currentScrubScanned)
self.metrics['zpoolScrubDatarate'].labels(currentPool).set(currentScrubRate)
self.metrics['zpoolScrubScannedPct'].labels(currentPool).set(currentScrubPct)
self.logger.info("[ZPOOL-STATUS] {} resilvered {}% ({} bytes)".format(currentPool, currentResilverPct, currentResilverBytes))
self.logger.info("[ZPOOL-STATUS] {} scrubed {}% ({} bytes) at {} bytes/sec".format(currentPool, currentScrubPct, currentScrubScanned, currentScrubRate))
currentPool = None
def parseZFSList(self, metrics):
p = subprocess.Popen("zfs list", stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
status = p.wait()
output = output.decode("utf-8").split("\n")
for i in range(len(output)):
output[i] = output[i].strip()
for i in range(1, len(output)-1):
line = output[i]
line = line.split()
fsName = line[0]
usedBytes = self.SuffixNotationToBytes(line[1])
availBytes = self.SuffixNotationToBytes(line[2])
referredBytes = self.SuffixNotationToBytes(line[3])
mountpoint = line[4]
metrics['zfsUsed'].labels(fsName).set(usedBytes)
metrics['zfsAvail'].labels(fsName).set(availBytes)
metrics['zfsReferred'].labels(fsName).set(referredBytes)
self.logger.info("[ZFS-FS] {}: {} used, {} avail, {} referred".format(fsName, usedBytes, availBytes, referredBytes))
def signalSigHup(self, *args):
self.rereadConfig = True
def signalTerm(self, *args):
self.terminate = True
def __enter__(self):
return self
def __exit__(self, type, value, tb):
pass
def run(self):
signal.signal(signal.SIGHUP, self.signalSigHup)
signal.signal(signal.SIGTERM, self.signalTerm)
signal.signal(signal.SIGINT, self.signalTerm)
self.logger.info("Service running")
start_http_server(self.args.port)
while True:
time.sleep(self.args.interval)
self.parseZFSList(self.metrics)
self.parseZPOOLIostat(self.metrics)
if self.terminate:
break
self.logger.info("Shutting down due to user request")
def mainDaemon():
parg = parseArguments()
args = parg['args']
logger = parg['logger']
logger.debug("Daemon starting ...")
with ZFSExporterDaemon(args, logger) as exporterDaemon:
exporterDaemon.run()
def parseArguments():
ap = argparse.ArgumentParser(description = 'ZFS exporter daemon')
ap.add_argument('-f', '--foreground', action='store_true', help="Do not daemonize - stay in foreground and dump debug information to the terminal")
ap.add_argument('--uid', type=str, required=False, default=None, help="User ID to impersonate when launching as root")
ap.add_argument('--gid', type=str, required=False, default=None, help="Group ID to impersonate when launching as root")
ap.add_argument('--chroot', type=str, required=False, default=None, help="Chroot directory that should be switched into")
ap.add_argument('--pidfile', type=str, required=False, default="/var/run/zfsexporter.pid", help="PID file to keep only one daemon instance running")
ap.add_argument('--loglevel', type=str, required=False, default="error", help="Loglevel to use (debug, info, warning, error, critical). Default: error")
ap.add_argument('--logfile', type=str, required=False, default="/var/log/zfsexporter.log", help="Logfile that should be used as target for log messages")
ap.add_argument('--port', type=int, required=False, default=9249, help="Port to listen on")
ap.add_argument('--interval', type=int, required=False, default=30, help="Interval in seconds in which data is gathered")
args = ap.parse_args()
loglvls = {
"DEBUG" : logging.DEBUG,
"INFO" : logging.INFO,
"WARNING" : logging.WARNING,
"ERROR" : logging.ERROR,
"CRITICAL" : logging.CRITICAL
}
if not args.loglevel.upper() in loglvls:
print("Unknown log level {}".format(args.loglevel.upper()))
sys.exit(1)
logger = logging.getLogger()
logger.setLevel(loglvls[args.loglevel.upper()])
if args.logfile:
fileHandleLog = logging.FileHandler(args.logfile)
logger.addHandler(fileHandleLog)
return { 'args' : args, 'logger' : logger }
def mainStartup():
parg = parseArguments()
args = parg['args']
logger = parg['logger']
daemonPidfile = args.pidfile
daemonUid = None
daemonGid = None
daemonChroot = "/"
if args.uid:
try:
args.uid = int(args.uid)
except ValueError:
try:
args.uid = getpwnam(args.uid).pw_uid
except KeyError:
logger.critical("Unknown user {}".format(args.uid))
print("Unknown user {}".format(args.uid))
sys.exit(1)
daemonUid = args.uid
if args.gid:
try:
args.gid = int(args.gid)
except ValueError:
try:
args.gid = grp.getgrnam(args.gid)[2]
except KeyError:
logger.critical("Unknown group {}".format(args.gid))
print("Unknown group {}".format(args.gid))
sys.exit(1)
daemonGid = args.gid
if args.chroot:
if not os.path.isdir(args.chroot):
logger.critical("Non existing chroot directors {}".format(args.chroot))
print("Non existing chroot directors {}".format(args.chroot))
sys.exit(1)
daemonChroot = args.chroot
if args.foreground:
logger.debug("Launching in foreground")
with ZFSExporterDaemon(args, logger) as zfsDaemon:
zfsDaemon.run()
else:
logger.debug("Daemonizing ...")
daemon = Daemonize(
app="ZFS exporter",
action=mainDaemon,
pid=daemonPidfile,
user=daemonUid,
group=daemonGid,
chdir=daemonChroot
)
daemon.start()
if __name__ == "__main__":
mainStartup() | zfsexporter-tspspi | /zfsexporter-tspspi-0.0.2.tar.gz/zfsexporter-tspspi-0.0.2/src/zfsexporter/zfsexporter.py | zfsexporter.py |
# zfslib
ZFS Libraries for Python
Python library for reading from ZFS Pools. Capable of reading, Pools, Datasets, Snapshots and Diffs for use by other Python Tools.
This tool presently targets Python v3.7+
## Installation
Install this plugin using `pip`:
$ python3 -m pip install zfslib
## Usage
See examples folder
## Sample code
```
import zfslib as zfs
# Read ZFS information from local computer
# For remote computer access, use certificate based ssh authentication
# see `examples/ex_remote.py`
conn = zfs.Connection(host='localhost')
# Load poolset.
# zfs properties can be queried here with: zfs_prop=['prop1','prop2',...]
# zpool properties can be queried here with: zpool_props=['prop1','prop2',...]
# Default properties: name, creation
# If get_mounts=True, mountpoint and mounted are also retrieved automatically
# unlocking some functionality
# To see all available properties use: % zfs list -o (-or-) % zpool list -o
poolset = conn.load_poolset()
# Load a pool by name
pool = poolset.get_pool('dpool')
# Get properties from ZFSItem's (Pool|DataSet|Snapshot)
# <ZFSItem>.get_property('<property>')
# -also-
# <ZFSItem>.path -> str: Full path for item
# <ZFSItem>.name -> str: Name of item
# <ZFSItem>.creation -> datetime
# <dataset>.mountpoint -> str
# <dataset|snapshot>.pool -> Pool
# <snapshot>.dataset -> DataSet
# <dataset>.dspath -> str: Dataset path excluding pool name
# <ZFSItem>.parent -> ZfsItem
# <pool|dataset>.children -> list(of ZfsItem)
# . Pools only contain DataSets
# . Datasets can contain DataSets and/or Snapshots
# Load dataset
ds = pool.get_dataset('vcmain')
# Load snapshots by with name of autosnap* that fall between
# the dates of 2020-12-20 and 2020-12-24
snapshots = ds.find_snapshots({
'name': 'autosnap*', 'date_from': '2020-12-20', 'date_to': '2020-12-24'
})
# Get all the changes file modification diffs for files that end with .py and .js
# excluding those in __pycache__ between the first and second snapshots
diffs = ds.get_diffs(
snapshots[0], snapshots[1]
,file_type='F', chg_type='M'
,include=['*.py', '*.js']
,ignore=['*_pycache_*']
)
# Load snapshots by with name of autosnap* in the last 12 hours
snapshots = ds.find_snapshots({'name': 'autosnap*', 'tdelta': '12H'})
# Get Path to a file in the Snapshot folder (under mountpoint/.zfs/snapshots):
find_path = '<path_to_some_local_file_in_ZFS>'
(exists, snap_path) = snapshots[0].resolve_snap_path(find_path)
if exists:
print('snap_path: {}'.format(snap_path))
else: # file did not exist at time of snapshot creation
print('File not found in snapshot: {}'.format(find_path))
```
## Some Key Features
### `<Dataset>.find_snapshots(dict)`
```
# find_snapshots(dict) - Query all snapshots in Dataset
# Options:
# - name: Snapshot name (wildcard supported)
# - contains: Path to resource (wildcard supported)
# - dt_from: datetime to start
# - tdelta: timedelta -or- string of nC where: n is an integer > 0 and C is one of y,m,d,H,M,S. Eg 5H = 5 Hours
# - dt_to: datetime to stop
# - index: (bool) - Return list(tuple(of int, snapshot, dataset)) where int is the index in current snaphot listing for dataset
# Notes:
# - Date searching is any combination of:
# (dt_from --> dt_to) | (dt_from --> dt_from + tdelta) | (dt_to - tdelta --> dt_to) | (dt_from --> now)
```
### `<Dataset>.get_property(str)`
```
# get_property(str) - Return zfs item or zpool property
# - use zfs_props or zpool_props to grab non-defaulted properties
```
### `<Dataset>.get_diffs()`
```
# get_diffs() - Gets Diffs in snapshot or between snapshots (if snap_to is specified)
# If snap_to is not specified, diff is to working copy
# snap_from - Left side of diff
# snap_to - Right side of diff. If not specified, diff is to current working version
# include - list of glob expressions to include (eg ['*_pycache_*'])
# exclude - list of glob expressions to exclude (eg ['*_pycache_*'])
# file_type - Filter on the following
# - B Block device
# - C Character device
# - / Directory
# - > Door
# - | Named pipe
# - @ Symbolic link
# - P Event port
# - = Socket
# - F Regular file
# chg_type - Filter on the following:
# - - The path has been removed
# - + The path has been created
# - M The path has been modified
# - R The path has been renamed
```
### `<Snapshot>.snap_path`
```
# Returns the path to read only zfs_snapshot directory (<ds_mount>/.zfs/snapshots/<snapshot>)
```
### `<Snapshot>.resolve_snap_path(path)`
```
# Resolves the path to file/dir within the zfs_snapshot dir
# Returns: tuple(of bool, str) where:
# - bool = True if item is found
# - str = Path to item if found else path to zfs_snapshot dir
```
### `<Diff>.snap_path_left`
```
# Path to resource on left side of diff in zfs_snapshot dir
```
### `<Diff>.snap_path_right`
```
# Path to resource on right side of diff in .zfs_snapshot dir or working copy
```
See `test.py` for more sample code
Credits: This code is based heavily on [zfs-tools by Rudd-O](https://github.com/Rudd-O/zfs-tools). | zfslib | /zfslib-0.11.0.tar.gz/zfslib-0.11.0/README.md | README.md |
ZFunds-Services
-----------------
Sahi. Asaan
## Development Setup
### System Dependency
* Python 3.10.2
* poetry
### Step
1) Clone the repo
2) cd zfunds-services
3) poetry install
4) poetry shell
Start developing
## Package zfunds-services
python version must be 3.10.2 or higher
### Build
python setup.py build
### Distribute
python setup.py sdist
### Upload
twine upload dist/*
### Python Dependency
* pymongo
* dynamodb
* s3
### Use
It wil load environment variable automatically, so all you need to do is make sure these environment variables are present.
It will also autoload .env ( example .env.dist , rename it to .env) file before running, so you can also put these variables in your .env file.
Needed Environment variables are
```
# Application
APP_NAME=redisconnection
LOG_LEVEL=DEBUG
ENVIRONMENT=staging
REGION=ind
#
```
from redisconnection import redis_connection
rc = redis_connection.RedisConnection()
conn = rc.connection
```
| zfunds-services | /zfunds-services-0.0.4.tar.gz/zfunds-services-0.0.4/README.md | README.md |
from dynamodb import logger, Config
import boto3
from botocore.config import Config as BConfig
class DynamoDB:
def __init__(self):
logger.debug('[DynamoDB]: Initiating DynamoDB Connection Class')
self._connection_parameter = None
self._resource = None
self._config = BConfig(retries={'max_attempts': 3, 'mode': 'standard'})
def set_connection_parameter(self, **kwargs):
self._connection_parameter = {
'region': Config.AWS['region'] if not kwargs.get('region') else kwargs.get('region'),
"s3_key_id": Config.AWS['s3_key_id'] if not kwargs.get('s3_key_id') else kwargs.get('s3_key_id'),
"s3_key_secret": Config.AWS['s3_key_secret'] if not kwargs.get('s3_key_secret')
else kwargs.get('s3_key_secret'),
"dynamo_table": Config.AWS['dynamo_table'] if not kwargs.get('dynamo_table')
else kwargs.get('dynamo_table')
}
def get_connection_parameter(self):
return self._connection_parameter
@property
def connection(self):
if self._resource is None:
self.connect()
return self._resource
def connect(self):
if self._connection_parameter is None:
self.set_connection_parameter()
try:
logger.debug('[DynamoDB]: Creating DynamoDB connection')
dynamodb_resource = boto3.resource('dynamodb', region_name=self._connection_parameter['region'],
aws_access_key_id=self._connection_parameter['s3_key_id'],
aws_secret_access_key=self._connection_parameter['s3_key_secret'],
config=self._config)
# For testing connection only, because boto3 only return resource class not exactly a connection
table_name = self._connection_parameter.get('dynamo_table')
if table_name:
table = dynamodb_resource.Table(table_name)
if table.table_status == 'ACTIVE': # this throws exception as expected
self._resource = dynamodb_resource
logger.info(f'[DynamoDB]: Connection Successful. Connection={self._resource}')
else:
raise Exception('Unable to connect to table=connection_backend')
except Exception as e:
self._resource = None
logger.error(f'[DynamoDB]: connection issue, conn={self._resource}', exc_info=True)
raise Exception(f'[DynamoDB]: Connection Error with DynamoDB. Error={e}') | zfunds-services | /zfunds-services-0.0.4.tar.gz/zfunds-services-0.0.4/dynamodb/dynamo.py | dynamo.py |
# ZGA - prokaryotic genome assembly and annotation pipeline
[](https://pypi.python.org/pypi/zga)
[](https://anaconda.org/bioconda/zga/)
[](https://dx.doi.org/10.1101/2021.04.27.441618)
## Main Features
* Wide range of supported reads: Illumina, Oxford Nanopore, PacBio, BGI.
* Short read multi-threaded processing: QC, filtering, trimming, overlapped pairs merging.
* Assemblies from short reads, long reads or hybrid assembly using modern and powerful assemblers: [SPAdes](http://cab.spbu.ru/software/spades/), [Unicycler](https://github.com/rrwick/Unicycler/) or [Flye](https://github.com/fenderglass/Flye).
* Quality control of assembly: completeness and contamination assessment with [CheckM](https://github.com/Ecogenomics/CheckM) as well as PhiX detection.
* Fast annotation of bacterial and archeal genome assemblies with [DFAST](https://github.com/nigyta/dfast_core) .
* No High Performance Computing needed. The pipeline works on laptop or desktop.
## Installation
ZGA is written in Python and tested with Python 3.6 and Python 3.7.
### Install with conda
[](https://anaconda.org/bioconda/zga/)
The simplest way to install ZGA and all dependencies is **conda**:
1. You need to install conda, e.g. [**miniconda**](https://conda.io/en/latest/miniconda.html). Python 3.7 is preferred.
2. After installation You should add channels - the conda's software sources:
`conda config --add channels bioconda`
`conda config --add channels conda-forge`
3. At the end You should install ZGA to an existing active environment (Python 3.6 or 3.7):
`conda install zga`
or create a fresh environment and activate it:
`conda create -n zga zga`
`conda activate zga`
If You have troubles with bioconda channel try to use my personal channel https://anaconda.org/laxeye/zga `conda install -c laxeye zga`
### Install from PyPI
[](https://pypi.python.org/pypi/zga)
Run `pip install zga`. Biopython is the only one dependency installed from PyPI. All other dependencies You should install manually or using **conda** as mentioned above. CheckM is available on **PyPi**, but it's easier to install it using **conda**.
### Get source from Github
You can get ZGA by cloning from the repository with `git clone https://github.com/laxeye/zga.git` or by downloading an archive. After downloading enter the directory `cd zga` and run `python3 setup.py install`.
Don't forget to install dependecies (see bellow).
### Installing dependencies
ZGA uses several software and libraries including:
* [fastp](https://github.com/OpenGene/fastp)
* [BBmap](https://sourceforge.net/projects/bbmap/)
* [NxTrim](https://github.com/sequencing/NxTrim)
* [mash](https://mash.readthedocs.io/en/latest/)
* [SPAdes](http://cab.spbu.ru/software/spades/) (>= 3.12 to support merged paired-end reads, >= 3.5.0 to support Nanopore reads)
* [Unicycler](https://github.com/rrwick/Unicycler/)
* [Flye](https://github.com/fenderglass/Flye) >= 2.6
* [minimap2](https://github.com/lh3/minimap2/)
* [racon](https://github.com/lbcb-sci/racon)
* [CheckM](https://github.com/Ecogenomics/CheckM) >= 1.1.0
* [BioPython](https://biopython.org/)
* [NCBI BLAST+](https://blast.ncbi.nlm.nih.gov/Blast.cgi)
* [DFAST](https://github.com/nigyta/dfast_core)
You may install all dependencies separately using **conda**. It's highly recommended to create a new conda environment:
`conda create -n zga "python>=3.6" fastp "spades>=3.12" unicycler checkm-genome dfast bbmap blast biopython"nxtrim "mash>=2" flye minimap2 racon "samtools>=1.9"`
and activate it
`conda activate zga`
Otherwise you may install dependencies to existing conda environment:
`conda install "python>=3.6" fastp "spades>=3.12" unicycler checkm-genome dfast bbmap blast biopython nxtrim "mash>=2" flye minimap2 racon "samtools>=1.9"`
Of course, it's possible to use *another ways* even compile all tools from source code. In this case you should check if binaries are in your '$PATH' variable.
#### DFAST database download
After installation DFAST downloads some basic databases. It's recommended to download more databases using *dfast_file_downloader.py* command-line script:
Run `dfast_file_downloader.py -h` to see available databases and options.
Default databases may be donloaded with `dfast_file_downloader.py --protein dfast --cdd Cog --hmm TIGR`
Soon auto-download of databases during installation with conda will be available.
If You want to use more databases You need to edit DFAST configuration file and provide it to ZGA with `--dfast-config` option.
### Operating systems requirements
ZGA was tested on Ubuntu 18.04, 19.10 and 20.04. Most probably any modern 64-bit Linux distribuition is suitable.
Your feedback on other OS is welcome!
## Usage
Run `zga -h` to get a help message.
### Pipeleine steps
ZGA includes several steps:
1. Read quality check ('readqc')
2. Read processing ('preprocessing')
3. Genome assembling ('assembling')
4. Genome polishing ('polishing')
5. Genome quality assessment ('check_genome')
6. Genome annotation ('annotation')
You may start from any step and finish at any step providing arguments `-s` or `--first-step` and `-l` or `--last-step` followed by step designation (in brackets in the list above).
E.g. if You like to perform read processing, genome assembling and genome polishing You should run
`zga --first-step preprocessing --last-step polishing ...`
### Input files
ZGA may use unprocessed or processed sequencing reads from different platforms as well as genome assemblies to perform assembly polishing, assembly quality assessment and assembly annotation. FASTQ format gzipped or not is required for sequencing reads. Paired-end reads shoul be provided in separate files, not interleaved. Sequencing reads should be provided as space separated list after corresponding argument:
`-1` or `--pe-1` for forward paired-end reads (Illumina, BGI)
`-2` or `--pe-2` for reverse paired-end reads
`-S` or `--single-end` for unpaired short reads
`--pe-merged` for merged overlapping paired-end reads (if You performed merging earlier)
`--mp-1` for first mate-pair reads, RF orientation is supposed
`--mp-2` for second mate-pair reads
`--pacbio` for PacBio single-end sequencing reads
`--nanopore` for Oxford Nanopore sequencing reads
When `bbduk.sh` (short read trimming tool) throws an exception ZGA tries to repair reads with `repair.sh` (from BBMap).
#### Examples
`zga -1 Raw.R1.fq.gz -2 Raw.R2.fq.gz` unprocessed paired-end reads
`zga -1 Unmerged_1.fq -2 Unmerged_2.fq --pe-merged Merged.fq` reads after processing (overlapping reads merging)
`zga -1 Lib1.R1.fq.gz Lib2.R1.fq -2 Lib1.R2.fq Lib2.R2.fq` combination of reads from two sequencing libraries
### Output
ZGA produces up to 4 sub-folders in output folder:
* **readQC** - results of reaq quality control with *fastp*,
* **reads** - processed reads,
* **assembly** - folder produced by genomic assembler,
* **annotation** - annotated genome.
Log-file *zga.log* is available in the output folder.
### Usage examples
Perform all steps: read qc, read trimming and merging, assembly, CheckM assesment with default (bacterial) marker set, DFAST annotation and use 4 CPU threads where possible:
`zga -1 R1.fastq.gz -2 R2.fastq.gz --bbmerge --threads 4 -o my_assembly`
Assemble with SPAdes using paired-end and nanopore reads of archaeal genome (CheckM will use archaeal markers) altering memory limit to 16 GB:
`zga -1 R1.fastq.gz -2 R2.fastq.gz --nanopore MiniION.fastq.gz -a spades --threads 4 --memory-limit 16 --domain archaea -o my_assembly`
*(New in 0.8 development releases)* Short read correction with SPAdes is a computationally expensive step, You may run read-correction with tadpole including
`--tadpole-correct` option which is much faster and needs less memory.
`zga --tadpole-correct -1 R1.fastq.gz -2 R2.fastq.gz --threads 4 -o my_assembly`
Assemble long reads with Flye skipping long read polishing and perfom short-read polishing with racon:
`zga -1 R1.fastq.gz -2 R2.fastq.gz --nanopore MiniION.fastq.gz -a flye --threads 4 --domain archaea -o my_assembly --flye-short-polish --flye-skip-long-polish`
Assemble from Nanopore reads using unicycler:
`zga -a unicycler --nanopore MiniION.fastq -o nanopore_assembly`
Perform assesment and annotation of genome assembly with e.g. *Pectobacterium* CheckM marker set:
`zga --first-step check_genome -g pectobacterium_sp.fasta --checkm_rank genus --checkm_taxon Pectobacterium -o my_output_dir`
Let CheckM to infer the right marker set:
`zga --first-step check_genome -g my_genome.fa --checkm_mode lineage -o my_output_dir`
## Known issues and limitations
ZGA is in the stage of active development.
Known issues and limitations:
* Unicycler can't use mate-pair reads or multiple libraries of same type.
Don't hesitate to report bugs or features!
## Cite
It's a great pleasure to know, that your software is useful. Please cite ZGA:
Korzhenkov A. 2021. ZGA: a flexible pipeline for read processing, de novo assembly and annotation of prokaryotic genomes. bioRxiv https://doi.org/10.1101/2021.04.27.441618
And of course tools it's using:
Chen, S., Zhou, Y., Chen, Y., & Gu, J. (2018). fastp: an ultra-fast all-in-one FASTQ preprocessor. Bioinformatics, 34(17), i884-i890. https://doi.org/10.1093/bioinformatics/bty560
Bushnell, B., Rood, J., & Singer, E. (2017). BBMerge–accurate paired shotgun read merging via overlap. PloS one, 12(10). https://doi.org/10.1371/journal.pone.0185056
Bankevich, A., Nurk, S., Antipov, D., Gurevich, A. A., Dvorkin, M., Kulikov, A. S., ... & Pyshkin, A. V. (2012). SPAdes: a new genome assembly algorithm and its applications to single-cell sequencing. Journal of computational biology, 19(5), 455-477. https://dx.doi.org/10.1089%2Fcmb.2012.0021
Wick, R. R., Judd, L. M., Gorrie, C. L., & Holt, K. E. (2017). Unicycler: resolving bacterial genome assemblies from short and long sequencing reads. PLoS computational biology, 13(6), e1005595. https://doi.org/10.1371/journal.pcbi.1005595
Vaser, R., Sović, I., Nagarajan, N., & Šikić, M. (2017). Fast and accurate de novo genome assembly from long uncorrected reads. Genome research, 27(5), 737-746. https://genome.cshlp.org/content/27/5/737.full
Li, H. (2018). Minimap2: pairwise alignment for nucleotide sequences. Bioinformatics, 34:3094-3100. https://dx.doi.org/10.1093/bioinformatics/bty191
Kolmogorov, M., Yuan, J., Lin, Y., & Pevzner, P. A. (2019). Assembly of long, error-prone reads using repeat graphs. Nature biotechnology, 37(5), 540-546. https://doi.org/10.1038/s41587-019-0072-8
Parks, D. H., Imelfort, M., Skennerton, C. T., Hugenholtz, P., & Tyson, G. W. (2015). CheckM: assessing the quality of microbial genomes recovered from isolates, single cells, and metagenomes. Genome research, 25(7), 1043-1055. https://dx.doi.org/10.1101%2Fgr.186072.114
Tanizawa, Y., Fujisawa, T., & Nakamura, Y. (2018). DFAST: a flexible prokaryotic genome annotation pipeline for faster genome publication. Bioinformatics, 34(6), 1037-1039. https://dx.doi.org/10.1093%2Fbioinformatics%2Fbtx713
Camacho, C., Coulouris, G., Avagyan, V. et al. (2009). BLAST+: architecture and applications. BMC Bioinformatics 10, 421. https://doi.org/10.1186/1471-2105-10-421
Cock, P. J., Antao, T., Chang, J. T., Chapman, B. A., Cox, C. J., Dalke, A., ... & De Hoon, M. J. (2009). Biopython: freely available Python tools for computational molecular biology and bioinformatics. Bioinformatics, 25(11), 1422-1423. https://doi.org/10.1093/bioinformatics/btp163
O’Connell, J., et al. (2015) NxTrim: optimized trimming of Illumina mate pair reads. Bioinformatics 31(12), 2035-2037. https://doi.org/10.1093/bioinformatics/btv057
Ondov, B.D., Treangen, T.J., Melsted, P. et al. Mash: fast genome and metagenome distance estimation using MinHash. Genome Biol 17, 132 (2016). https://doi.org/10.1186/s13059-016-0997-x
| zga | /zga-0.1a2.tar.gz/zga-0.1a2/README.md | README.md |
Provides Atom (RFC 4287) entry, subscription feed, and search feed documents
annotated with GeoRSS elements. Implements the Atom Publishing Protocol (RFC
5023).
AtomPub collections can be made of Zope containers using the
"atompub-collection" view. Atom entry representations of contained objects can
be made using the "atom-entry" view. By registering IFileFactory and IWriteFile
adapters, one can allow creation and edit of objects via the Atom Publishing
Protocol.
Major portions of this work were supported by a grant (to Pleiades_) from the
U.S. National Endowment for the Humanities (http://www.neh.gov).
.. _Pleiades: http://atlantides.org/trac/pleiades/wiki
| zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/README.txt | README.txt |
from zope import component
from zope.interface import implements, Interface
from zope.app.publication.http import HTTPPublication
from zope.app.publication.requestpublicationfactories import HTTPFactory
from zope.publisher.http import HTTPRequest
from zope.publisher.interfaces import NotFound
from zope.publisher.interfaces.http import IHTTPCredentials
from zope.publisher.interfaces.http import IHTTPRequest
from zope.publisher.interfaces.http import IHTTPApplicationRequest
from zope.publisher.interfaces.http import IHTTPPublisher
from zope.app.container.interfaces import IReadContainer
from zope.filerepresentation.interfaces import IFileFactory, IWriteFile
from zope.filerepresentation.interfaces import IWriteDirectory
from zope.traversing.api import getParent
from zgeo.atom.browser import absoluteURL, LinkEntry
from zgeo.atom.interfaces import IAtomEntryNameFactory
class IAtomPubRequestFactory(Interface):
pass
class IAtomPubRequest(IHTTPRequest):
pass
class AtomPubRequest(HTTPRequest):
implements(IHTTPCredentials, IAtomPubRequest, IHTTPApplicationRequest)
class AtomPubPublication(HTTPPublication):
pass
class AtomPubPublicationFactory(HTTPFactory):
def __call__(self):
request_class = component.queryUtility(
IAtomPubRequestFactory, default=AtomPubRequest
)
return request_class, AtomPubPublication
class AtomPubTraverser(object):
def __init__(self, context, request):
self.context = context
self.request = request
def publishTraverse(self, request, name):
if name in ['atompub-collection', 'atom-entry']:
return self.context
elif IReadContainer.providedBy(self.context):
item = self.context.get(name)
if item is not None:
return item
# fall back to views
view = component.queryMultiAdapter((self.context, request), name=name)
if view is not None:
return view
# give up and return a 404 Not Found error page
raise NotFound(self.context, name, request)
class AtomCollectionPOST(object):
def __init__(self, context, request):
self.context = context
self.request = request
def POST(self):
"""Add a new item to the container."""
name = component.queryMultiAdapter(
(self.context, self.request),
IAtomEntryNameFactory,
).chooseName()
body = self.request.bodyStream
factory = component.getAdapter(
self.context, IFileFactory,
name='application/atom+xml;type=entry'
)
placemark = factory(name, 'application/atom+xml;type=entry', body)
self.context[name] = placemark
response = self.request.response
response.setStatus(201)
loc = "%s/atom-entry" % absoluteURL(self.context[name], self.request)
response.setHeader('Location', loc)
return LinkEntry(placemark, self.request)()
class AtomMemberPUT(object):
def __init__(self, context, request):
self.context = context
self.request = request
def PUT(self):
body = self.request.bodyStream
adapter = component.getAdapter(
self.context, IWriteFile, name='application/atom+xml;type=entry'
)
adapter.write(body.read())
self.request.response.setStatus(200)
return ''
class AtomMemberDELETE(object):
def __init__(self, context, request):
self.context = context
self.request = request
def DELETE(self):
parent = getParent(self.context)
adapter = component.queryAdapter(
parent, IWriteDirectory,
name='application/atom+xml;type=entry',
default=parent
)
adapter.__delitem__(self.context.__name__)
self.request.response.setStatus(200)
return '' | zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/publication.py | publication.py |
from zope.interface import Interface, Attribute
class IAtomMetadata(Interface):
"""See http://tools.ietf.org/html/rfc4287, section 4.2.6.
"""
id = Attribute("""atom:id""")
categories = Attribute("""List of categories""")
class IWritableAtomMetadata(Interface):
def setId():
"""Set and return a urn:uuid id."""
class IWriteAtomMetadata(IAtomMetadata, IWritableAtomMetadata):
"""Write atomid as well as read."""
class IAtomEntryNameFactory(Interface):
def chooseName():
"""Returns the new name within the collection."""
class ICategory(Interface):
"""See http://tools.ietf.org/html/rfc4287, section 4.2.2.
"""
term = Attribute("""See 4.2.2.1.""")
scheme = Attribute("""See 4.2.2.2.""")
label = Attribute("""See 4.2.2.3.""")
class ILink(Interface):
"""See http://tools.ietf.org/html/rfc4287.
"""
href = Attribute("""A URI""")
type = Attribute("""Content type of the linked resource""")
rel = Attribute("""Link relationship: 'alternate', 'self', etc.""")
class IAtomBase(Interface):
"""See http://tools.ietf.org/html/rfc4287.
"""
author = Attribute("""A mapping with name, URI, and email keys""")
id = Attribute("""A universally unique identifier""")
title = Attribute("""A human readable text""")
updated = Attribute("""A RFC 3339 date/time string""")
links = Attribute("""A dict of links""")
class IEntry(IAtomBase):
"""See http://tools.ietf.org/html/rfc4287.
"""
summary = Attribute("""A human readable text summary""")
updated = Attribute("""A RFC 3339 date/time string""")
# geographic elements
coords_georss = Attribute("""GML coordinate encoding of the location""")
hasLineString = Attribute("""Boolean, True if has a line location""")
hasPoint = Attribute("""Boolean, True if has a point location""")
hasPolygon = Attribute("""Boolean, True if has a polygon location""")
class IFeed(IAtomBase):
"""See http://tools.ietf.org/html/rfc4287.
"""
entries = Attribute("""An iterator over feed entries""")
class ISubscriptionFeed(IFeed):
"""See http://tools.ietf.org/html/rfc5005.
Must contain a 'previous-archive' link.
"""
class IArchiveFeed(IFeed):
"""See http://tools.ietf.org/html/rfc5005.
Must contain 'current', 'next-archive', and 'previous-archive' links.
"""
class IPagingFeed(IFeed):
"""See http://tools.ietf.org/html/rfc5005.
Must contain 'first', 'last', 'previous', and 'next' links.
"""
class ISearchFeed(IPagingFeed):
bounds = Attribute("""A (minx, miny, maxx, maxy) tuple""")
page = Attribute("""Integer page number""")
class IAtomPubPOSTable(Interface):
"""Marker for collections."""
class IAtomPubPUTable(Interface):
"""Marker for editable members."""
class IAtomPubDELETEable(Interface):
"""Marker for deleteable members."""
class IAtomPublishable(IAtomPubPUTable, IAtomPubDELETEable):
"""Marker.""" | zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/interfaces.py | interfaces.py |
from zope.traversing.browser.interfaces import IAbsoluteURL
from zope.dublincore.interfaces import ICMFDublinCore
try:
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
raise Exception, "Five's ViewPageTemplateFile doesn't work with named templating"
except:
from zope.app.pagetemplate import ViewPageTemplateFile
from zope.interface import implements
from zope.publisher.browser import BrowserPage
from zope.formlib.namedtemplate import NamedTemplate
from zope.formlib.namedtemplate import NamedTemplateImplementation
from zope.component import getMultiAdapter, queryAdapter
import zope.security.proxy
from zgeo.geographer.interfaces import IGeoreferenced
from zgeo.atom.interfaces import IAtomBase, IEntry, ILink
from zgeo.atom.interfaces import IFeed, ISubscriptionFeed, ISearchFeed
from zgeo.atom.interfaces import IWriteAtomMetadata, IAtomPublishable
from zgeo.atom.interfaces import IAtomPubPOSTable
from zgeo.atom.link import Link
import zope.datetime
# zgeo.spatialindex is required to use a bbox parameter with paging search
# feeds
try:
from zgeo.spatialindex.site import get_catalog
except ImportError:
def noner(arg):
return None
get_catalog = noner
def coords_to_georss(geom):
gtype = geom.type
if gtype == 'Point':
coords = (geom.coordinates,)
elif gtype == 'Polygon':
coords = geom.coordinates[0]
else:
coords = geom.coordinates
tuples = ('%f %f' % (c[1], c[0]) for c in coords)
return ' '.join(tuples)
def rfc3339(date):
ts = zope.datetime.time(date)
return zope.datetime.iso8601_date(ts)
# Convert ISO or RFC 3339 datetime strings to RFC 3339
# Zope's ZDCAnnotatableAdapter gives RFC 3339. Lose the seconds precision
#if str(date).find('T') == 10:
# s = date.split('.')[0]
# Plone's AT content types give ISO
#else:
# t = time.strptime(date, '%Y-%m-%d %H:%M:%S')
# s = time.strftime('%Y-%m-%dT%H:%M:%S', t)
#tz = '%03d:00' % -int(time.timezone/3600)
#return s + tz
def absoluteURL(ob, request):
return getMultiAdapter((ob, request), IAbsoluteURL)()
class NullGeometry(object):
type = None
coordinates = None
class NullGeoItem(object):
id = None
properties = None
def __init__(self):
self.geometry = NullGeometry()
class AtomBase(BrowserPage):
"""Not to be instantiated.
"""
implements(IAtomBase)
@property
def id(self):
context = zope.security.proxy.removeSecurityProxy(self.context)
atom = IWriteAtomMetadata(context)
if atom.id is None:
return atom.setId()
return atom.id
@property
def title(self):
return self.dc.Title()
@property
def updated(self):
return rfc3339(self.dc.ModificationDate())
@property
def author(self):
return {
'name': self.dc.Creator()
,
'uri': '',
'email': ''
}
@property
def links(self):
"""Override this."""
raise NotImplementedError
class LinkEntry(AtomBase):
implements(IEntry)
__name__ = 'atom-entry'
template = NamedTemplate('template-atom-entry')
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = zope.security.proxy.removeSecurityProxy(
ICMFDublinCore(self.context)
)
try:
self.geom = IGeoreferenced(self.context)
except:
self.geom = NullGeometry()
@property
def published(self):
return rfc3339(self.dc.CreationDate())
@property
def summary(self):
return self.dc.Description()
@property
def links(self):
items = {
'alternate': Link(
absoluteURL(self.context, self.request),
rel='alternate',
type='text/html')
}
if IAtomPublishable.providedBy(self.context):
items['edit'] = Link(
"%s/atom-entry" % absoluteURL(self.context, self.request),
rel='edit',
type='application/atom+xml;type=entry')
return items
@property
def hasPoint(self):
return int(self.geom.type == 'Point')
@property
def hasLineString(self):
return int(self.geom.type == 'LineString')
@property
def hasPolygon(self):
return int(self.geom.type == 'Polygon')
@property
def coords_georss(self):
return coords_to_georss(self.geom)
def __call__(self):
return self.template().encode('utf-8')
class FeedBase(AtomBase):
implements(IFeed)
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = zope.security.proxy.removeSecurityProxy(
ICMFDublinCore(self.context)
)
@property
def links(self):
raise NotImplementedError
@property
def entries(self):
context = zope.security.proxy.removeSecurityProxy(self.context)
for item in context.values():
yield LinkEntry(item, self.request)
def collection_href(self):
if IAtomPubPOSTable.providedBy(self.context):
return '%s/atompub-collection' % absoluteURL(
self.context, self.request)
return None
class SubscriptionFeed(FeedBase):
implements(ISubscriptionFeed)
__name__ = 'atom-subscription-feed'
template = NamedTemplate('template-atom-subscription-feed')
@property
def links(self):
return {
'alternate': Link(
absoluteURL(self.context, self.request),
rel='alternate',
type='text/html'
),
'self': Link(
self.request.getURL(),
rel='self',
type='application/atom+xml'
),
'previous-archive': Link('None', rel='previous-archive')
}
def __call__(self):
return self.template().encode('utf-8')
class SearchFeed(FeedBase):
implements(ISearchFeed)
__name__ = 'atom-search-feed'
template = NamedTemplate('template-atom-search-feed')
page_size = 20
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = ICMFDublinCore(self.context)
self.catalog = None
self.bounds = None
self.page = 1
self.num_results = 0
self.num_pages = 0
self.results = []
def parse_bbox(self, bbox=None):
if bbox is None:
b = self.request.form.get('bbox')
if b is None:
return None
else:
b = bbox
return tuple(float(x) for x in b.split(','))
def _first_link(self):
url = "%s/@@%s" % (
absoluteURL(self.context, self.request), self.__name__
)
if self.bounds:
url = "%s?bbox=%f,%f,%f,%f" % ((url,) + self.bounds)
return url
def _last_link(self):
url = "%s/@@%s?page=-1" % (
absoluteURL(self.context, self.request), self.__name__
)
if self.bounds:
url = "%s&bbox=%f,%f,%f,%f" % ((url,) + self.bounds)
return url
def _previous_link(self):
if self.page == 1:
return 'None'
url = "%s/@@%s?page=%d" % (
absoluteURL(self.context, self.request),
self.__name__,
self.page - 1
)
if self.bounds:
url = "%s&bbox=%f,%f,%f,%f" % ((url,) + self.bounds)
return url
def _next_link(self):
if self.page == -1 or self.page >= self.num_pages:
return 'None'
url = "%s/@@%s?page=%d" % (
absoluteURL(self.context, self.request),
self.__name__,
self.page + 1
)
if self.bounds:
url = "%s&bbox=%f,%f,%f,%f" % ((url,) + self.bounds)
return url
def update(self):
self.bounds = self.parse_bbox()
if self.bounds is not None:
self.catalog = get_catalog(self.context)
if self.catalog is None:
raise Exception, "Spatial search is not supported in the absence of a spatial catalog"
results = self.catalog.searchResults(bounds=self.bounds)
num_results = len(results)
self.results = results
else:
results = list(self.context.values())
num_results = len(results)
self.results = results
self.num_pages = num_results/self.page_size + num_results%self.page_size
self.num_results = num_results
page = int(self.request.form.get('page', 1))
if page > 1 and page > self.num_pages:
raise Exception, "Page number exceeds number of pages"
elif page < 0 and -page > self.num_pages:
raise Exception, "Page number exceeds number of pages"
else:
self.page = page
@property
def entries(self):
if self.page >= 0:
begin = (self.page-1) * self.page_size
end = begin + self.page_size
else:
begin = self.num_results + (self.page * self.page_size)
end = begin + self.page_size
if end > self.num_results: end = self.num_results
for result in list(self.results)[begin:end]:
yield LinkEntry(result, self.request)
@property
def links(self):
return {
'alternate': Link(
absoluteURL(self.context, self.request),
rel='alternate',
type='text/html'
),
'self': Link(
'%s/@@%s' % (absoluteURL(self.context, self.request), self.__name__),
rel='self',
type='application/atom+xml'
),
'first': Link(self._first_link(), rel='first'),
'last': Link(self._last_link(), rel='last'),
'previous': Link(self._previous_link(), rel='previous'),
'next': Link(self._next_link(), rel='next'),
}
def __call__(self):
self.update()
return self.template().encode('utf-8')
# Named template implementations
entry_template = NamedTemplateImplementation(
ViewPageTemplateFile('entry.pt')
)
subscription_feed_template = NamedTemplateImplementation(
ViewPageTemplateFile('subscription_feed.pt')
)
search_feed_template = NamedTemplateImplementation(
ViewPageTemplateFile('search_feed.pt')
) | zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/browser.py | browser.py |
zgeo.atom Package Readme
=========================
Test setting atom:id through the AtomMetadata annotator
>>> from zgeo.atom.interfaces import IWriteAtomMetadata
>>> atom = IWriteAtomMetadata(places)
>>> atom.id = 'urn:uuid:places'
>>> atom.id
'urn:uuid:places'
>>> atom = IWriteAtomMetadata(placemark)
>>> atom.id = 'urn:uuid:placemark'
>>> atom.id
'urn:uuid:placemark'
Objects that provide IGeoItem can be represented as Atom entries using this
package's link entry view.
Test the Atom link entry view of the placemark
>>> from zgeo.atom.browser import LinkEntry
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> view = LinkEntry(placemark, request)
>>> view
<zgeo.atom.browser.LinkEntry object at ...>
>>> view.id
'urn:uuid:placemark'
>>> view.title
'A'
>>> view.summary
"Place marked 'A'"
>>> view.published
'2007-12-07T19:00:00Z'
>>> view.updated
'2007-12-07T19:01:00Z'
>>> [(x.href, x.rel, x.type) for x in view.links.values()]
[('http://127.0.0.1/places/a', 'alternate', 'text/html')]
>>> view.hasPolygon
0
>>> view.hasLineString
0
>>> view.hasPoint
1
>>> view.coords_georss
'40.590000 -105.080000'
Test the feed view
>>> from zgeo.atom.browser import SubscriptionFeed
>>> view = SubscriptionFeed(places, request)
>>> view
<zgeo.atom.browser.SubscriptionFeed object at ...>
>>> view.title
'Places'
>>> view.updated
'2007-12-07T19:01:00Z'
>>> [(x.href, x.rel, x.type) for x in view.links.values()]
[('http://127.0.0.1', 'self', 'application/atom+xml'), ('http://127.0.0.1/places', 'alternate', 'text/html'), ('None', 'previous-archive', 'application/atom+xml')]
>>> view.entries
<generator object at ...>
>>> [e for e in view.entries][0]
<zgeo.atom.browser.LinkEntry object at ...>
Test the search view. Any container that is to serve as a spatial index must
provide both IAttributeAnnotatable and ISpatiallyIndexable. This is done in the test setup.
Test with an empty request
>>> from zgeo.atom.browser import SearchFeed
>>> view = SearchFeed(places, request)
>>> view
<zgeo.atom.browser.SearchFeed object at ...>
>>> view.update()
>>> view.bounds
>>> view.page
1
>>> view.num_pages
1
>>> list(view.entries)
[<zgeo.atom.browser.LinkEntry object at ...>]
>>> [(x.href, x.rel, x.type) for x in view.links.values()]
[('http://127.0.0.1/places/@@atom-search-feed?page=-1', 'last', 'application/atom+xml'), ('http://127.0.0.1/places/@@atom-search-feed', 'self', 'application/atom+xml'), ('http://127.0.0.1/places', 'alternate', 'text/html'), ('None', 'next', 'application/atom+xml'), ('http://127.0.0.1/places/@@atom-search-feed', 'first', 'application/atom+xml'), ('None', 'previous', 'application/atom+xml')]
This request uses a bounding box that misses our mock content and should have
no results.
>>> request = TestRequest(form={'bbox': '0,30,15,45'})
>>> view = SearchFeed(places, request)
>>> view
<zgeo.atom.browser.SearchFeed object at ...>
>>> view.update()
>>> view.bounds
(0.0, 30.0, 15.0, 45.0)
>>> view.page
1
>>> view.num_pages
0
>>> list(view.entries)
[]
This request uses a bounding box that intersects our mock content and should
have 1 result
>>> request = TestRequest(form={'bbox': '-115,30,-100,45'})
>>> view = SearchFeed(places, request)
>>> view
<zgeo.atom.browser.SearchFeed object at ...>
>>> view.update()
>>> view.bounds
(-115.0, 30.0, -100.0, 45.0)
>>> view.page
1
>>> view.num_results
1
>>> view.num_pages
1
>>> list(view.entries)
[<zgeo.atom.browser.LinkEntry object at ...>]
>>> entry = list(view.entries)[0]
>>> entry.id
'urn:uuid:placemark'
>>> entry.title
'A'
>>> entry.summary
"Place marked 'A'"
>>> entry.published
'2007-12-07T19:00:00Z'
>>> entry.updated
'2007-12-07T19:01:00Z'
>>> [(x.href, x.rel, x.type) for x in view.links.values()]
[('http://127.0.0.1/places/@@atom-search-feed?page=-1&bbox=-115.000000,30.000000,-100.000000,45.000000', 'last', 'application/atom+xml'), ('http://127.0.0.1/places/@@atom-search-feed', 'self', 'application/atom+xml'), ('http://127.0.0.1/places', 'alternate', 'text/html'), ('None', 'next', 'application/atom+xml'), ('http://127.0.0.1/places/@@atom-search-feed?bbox=-115.000000,30.000000,-100.000000,45.000000', 'first', 'application/atom+xml'), ('None', 'previous', 'application/atom+xml')]
Test the "next" document. Should fail since there's only one possible page.
>>> request = TestRequest(form={'bbox': '-115,30,-100,45', 'page': '2'})
>>> view = SearchFeed(places, request)
>>> view.update()
Traceback (most recent call last):
...
Exception: Page number exceeds number of pages
Test the "last" document. Should succeed.
>>> request = TestRequest(form={'bbox': '-115,30,-100,45', 'page': '-1'})
>>> view = SearchFeed(places, request)
>>> view.update()
>>> view.page
-1
>>> view.num_pages
1
>>> list(view.entries)
[<zgeo.atom.browser.LinkEntry object at ...>]
Backing up one previous the last should raise an exception
>>> request = TestRequest(form={'bbox': '-115,30,-100,45', 'page': '-2'})
>>> view = SearchFeed(places, request)
>>> view.update()
Traceback (most recent call last):
...
Exception: Page number exceeds number of pages
| zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/README.txt | README.txt |
from zope.component import adapts
from zope.interface import implements, alsoProvides
from zope.dublincore.interfaces import ICMFDublinCore
from zope.dublincore.interfaces import IWriteZopeDublinCore, IZopeDublinCore
from zgeo.geographer.interfaces import IGeoreferenceable, IGeoreferenced
from zope.filerepresentation.interfaces import IFileFactory, IWriteFile
from zope.app.folder import Folder
from zope.app.folder.interfaces import IFolder
from zgeo.atom import feedparser
from zgeo.atom.interfaces import IAtomPublishable
from zgeo.atom.publication import IAtomPubRequest
from zgeo.atom.interfaces import IAtomEntryNameFactory
from zgeo.geographer.interfaces import IWriteGeoreferenced
import datetime
import re
import uuid
class Placemark(object):
implements((IGeoreferenceable, IGeoreferenced, ICMFDublinCore))
def __init__(self, id, summary, long, lat):
self.id = id
self.summary = summary
self.type = 'Point'
self.coordinates = (long, lat)
def Title(self):
return self.__name__.capitalize()
def Description(self):
return self.summary
def CreationDate(self):
return '2007-12-07 12:00:00'
def ModificationDate(self):
return '2007-12-07 12:01:00'
class PlacesFolder(Folder):
implements(ICMFDublinCore)
def Title(self):
return self.__name__.capitalize()
def Description(self):
return 'Test Places'
def CreationDate(self):
return '2007-12-07 12:00:00'
def ModificationDate(self):
return '2007-12-07 12:01:00'
class AbsoluteURL(object):
def __init__(self, context, request):
self.context = context
def __call__(self):
return self.context.absolute_url()
class AtomFileFactory(object):
"""Adapts a generic content object"""
implements(IFileFactory)
adapts(IFolder)
def __init__(self, context):
self.context = context
def __call__(self, name, content_type, data):
feed = feedparser.parse(data)
entry = feed.entries[0]
title = entry.get('title', u'Untitled')
summary = entry.get('summary', u'Unsummarized')
where = entry.where
p = Folder()
dc = IWriteZopeDublinCore(p)
dc.title = title
dc.description = summary
dc.creator = u'Grok'
geo = IWriteGeoreferenced(p)
geo.setGeoInterface(where['type'], where['coordinates'])
now = datetime.datetime.now()
dc.created = now
p.__name__ = name
alsoProvides(p, IAtomPublishable)
return p
class AtomWriteFile(object):
implements(IWriteFile)
adapts(IFolder)
def __init__(self, context):
self.context = context
def write(self, data):
dc = IWriteZopeDublinCore(self.context)
geo = IWriteGeoreferenced(self.context)
feed = feedparser.parse(data)
entry = feed.entries[0]
title = entry.get('title')
if title is not None:
dc.title = title
summary = entry.get('summary')
if summary is not None:
dc.description = summary
where = entry.get('where')
if where is not None:
geo.setGeoInterface(where['type'], where['coordinates'])
class AtomEntryNamer(object):
implements(IAtomEntryNameFactory)
adapts(IFolder, IAtomPubRequest)
def __init__(self, context, request):
self.context = context
self.request = request
def get_unique_name(self, candidate):
# Make a unique name from candidate by suffixing
if candidate not in self.context:
return candidate
m = re.search(r"(.+)\.(\d+)$", candidate)
if not m:
x = "%s.1" % candidate
else:
x = "%s.%d" % (m.groups[0], int(m.groups[1])+1)
return self.get_unique_name(x)
def chooseName(self):
slug = self.request.getHeader('Slug')
if slug is not None:
name = slug.strip().lower()
name = self.get_unique_name(re.sub('\W+', '-', name))
else:
name = str(uuid.uuid1())
return name | zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/example.py | example.py |
__version__ = "4.2-pre-" + "$Revision: 262 $"[11:14] + "-svn"
__license__ = """Copyright (c) 2002-2007, Mark Pilgrim, All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE."""
__author__ = "Mark Pilgrim <http://diveintomark.org/>"
__contributors__ = ["Jason Diamond <http://injektilo.org/>",
"John Beimler <http://john.beimler.org/>",
"Fazal Majid <http://www.majid.info/mylos/weblog/>",
"Aaron Swartz <http://aaronsw.com/>",
"Kevin Marks <http://epeus.blogspot.com/>",
"Sam Ruby <http://intertwingly.net/>"]
_debug = 0
# HTTP "User-Agent" header to send to servers when downloading feeds.
# If you are embedding feedparser in a larger application, you should
# change this to your application name and URL.
USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
# of pre-installed parsers until it finds one that supports everything we need.
PREFERRED_XML_PARSERS = ["drv_libxml2"]
# If you want feedparser to automatically run HTML markup through HTML Tidy, set
# this to 1. Requires mxTidy <http://www.egenix.com/files/python/mxTidy.html>
# or utidylib <http://utidylib.berlios.de/>.
TIDY_MARKUP = 0
# List of Python interfaces for HTML Tidy, in order of preference. Only useful
# if TIDY_MARKUP = 1
PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"]
# If you want feedparser to automatically resolve all relative URIs, set this
# to 1.
RESOLVE_RELATIVE_URIS = 1
# If you want feedparser to automatically sanitize all potentially unsafe
# HTML content, set this to 1.
SANITIZE_HTML = 1
# ---------- required modules (should come with any Python distribution) ----------
import sgmllib, re, sys, copy, urlparse, time, rfc822, types, cgi, urllib, urllib2
try:
from cStringIO import StringIO as _StringIO
except:
from StringIO import StringIO as _StringIO
# ---------- optional modules (feedparser will work without these, but with reduced functionality) ----------
# gzip is included with most Python distributions, but may not be available if you compiled your own
try:
import gzip
except:
gzip = None
try:
import zlib
except:
zlib = None
# If a real XML parser is available, feedparser will attempt to use it. feedparser has
# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the
# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some
# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing.
try:
import xml.sax
xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers
from xml.sax.saxutils import escape as _xmlescape
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
def _xmlescape(data,entities={}):
data = data.replace('&', '&')
data = data.replace('>', '>')
data = data.replace('<', '<')
for char, entity in entities:
data = data.replace(char, entity)
return data
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except:
base64 = binascii = None
# cjkcodecs and iconv_codec provide support for more character encodings.
# Both are available from http://cjkpython.i18n.org/
try:
import cjkcodecs.aliases
except:
pass
try:
import iconv_codec
except:
pass
# chardet library auto-detects character encodings
# Download from http://chardet.feedparser.org/
try:
import chardet
if _debug:
import chardet.constants
chardet.constants._debug = 1
except:
chardet = None
# reversable htmlentitydefs mappings for Python 2.2
try:
from htmlentitydefs import name2codepoint, codepoint2name
except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
# BeautifulSoup parser used for parsing microformats from embedded HTML content
# http://www.crummy.com/software/BeautifulSoup/. At the moment, it appears
# that there is a version incompatibility, so the import is replaced with
# a 'None'. Restoring the try/import/except/none will renable the MF tests.
BeautifulSoup = None
# ---------- don't touch these ----------
class ThingsNobodyCaresAboutButMe(Exception): pass
class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass
class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass
class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass
class UndeclaredNamespace(Exception): pass
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
sgmllib.special = re.compile('<!')
sgmllib.charref = re.compile('&#(\d+|x[0-9a-fA-F]+);')
if sgmllib.endbracket.search(' <').start(0):
class EndBracketMatch:
endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
def search(self,string,index=0):
self.match = self.endbracket.match(string,index)
if self.match: return self
def start(self,n):
return self.match.end(n)
sgmllib.endbracket = EndBracketMatch()
SUPPORTED_VERSIONS = {'': 'unknown',
'rss090': 'RSS 0.90',
'rss091n': 'RSS 0.91 (Netscape)',
'rss091u': 'RSS 0.91 (Userland)',
'rss092': 'RSS 0.92',
'rss093': 'RSS 0.93',
'rss094': 'RSS 0.94',
'rss20': 'RSS 2.0',
'rss10': 'RSS 1.0',
'rss': 'RSS (unknown version)',
'atom01': 'Atom 0.1',
'atom02': 'Atom 0.2',
'atom03': 'Atom 0.3',
'atom10': 'Atom 1.0',
'atom': 'Atom (unknown version)',
'cdf': 'CDF',
'hotrss': 'Hot RSS'
}
try:
UserDict = dict
except NameError:
# Python 2.1 does not have dict
from UserDict import UserDict
def dict(aList):
rc = {}
for k, v in aList:
rc[k] = v
return rc
class FeedParserDict(UserDict):
keymap = {'channel': 'feed',
'items': 'entries',
'guid': 'id',
'date': 'updated',
'date_parsed': 'updated_parsed',
'description': ['subtitle', 'summary'],
'url': ['href'],
'modified': 'updated',
'modified_parsed': 'updated_parsed',
'issued': 'published',
'issued_parsed': 'published_parsed',
'copyright': 'rights',
'copyright_detail': 'rights_detail',
'tagline': 'subtitle',
'tagline_detail': 'subtitle_detail'}
def __getitem__(self, key):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
if link['rel']=='license' and link.has_key('href'):
return link['href']
if key == 'categories':
return [(tag['scheme'], tag['term']) for tag in UserDict.__getitem__(self, 'tags')]
realkey = self.keymap.get(key, key)
if type(realkey) == types.ListType:
for k in realkey:
if UserDict.has_key(self, k):
return UserDict.__getitem__(self, k)
if UserDict.has_key(self, key):
return UserDict.__getitem__(self, key)
return UserDict.__getitem__(self, realkey)
def __setitem__(self, key, value):
for k in self.keymap.keys():
if key == k:
key = self.keymap[k]
if type(key) == types.ListType:
key = key[0]
return UserDict.__setitem__(self, key, value)
def get(self, key, default=None):
if self.has_key(key):
return self[key]
else:
return default
def setdefault(self, key, value):
if not self.has_key(key):
self[key] = value
return self[key]
def has_key(self, key):
try:
return hasattr(self, key) or UserDict.has_key(self, key)
except AttributeError:
return False
def __getattr__(self, key):
try:
return self.__dict__[key]
except KeyError:
pass
try:
assert not key.startswith('_')
return self.__getitem__(key)
except:
raise AttributeError, "object has no attribute '%s'" % key
def __setattr__(self, key, value):
if key.startswith('_') or key == 'data':
self.__dict__[key] = value
else:
return self.__setitem__(key, value)
def __contains__(self, key):
return self.has_key(key)
def zopeCompatibilityHack():
global FeedParserDict
del FeedParserDict
def FeedParserDict(aDict=None):
rc = {}
if aDict:
rc.update(aDict)
return rc
_ebcdic_to_ascii_map = None
def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
)
import string
_ebcdic_to_ascii_map = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(_ebcdic_to_ascii_map)
_cp1252 = {
unichr(128): unichr(8364), # euro sign
unichr(130): unichr(8218), # single low-9 quotation mark
unichr(131): unichr( 402), # latin small letter f with hook
unichr(132): unichr(8222), # double low-9 quotation mark
unichr(133): unichr(8230), # horizontal ellipsis
unichr(134): unichr(8224), # dagger
unichr(135): unichr(8225), # double dagger
unichr(136): unichr( 710), # modifier letter circumflex accent
unichr(137): unichr(8240), # per mille sign
unichr(138): unichr( 352), # latin capital letter s with caron
unichr(139): unichr(8249), # single left-pointing angle quotation mark
unichr(140): unichr( 338), # latin capital ligature oe
unichr(142): unichr( 381), # latin capital letter z with caron
unichr(145): unichr(8216), # left single quotation mark
unichr(146): unichr(8217), # right single quotation mark
unichr(147): unichr(8220), # left double quotation mark
unichr(148): unichr(8221), # right double quotation mark
unichr(149): unichr(8226), # bullet
unichr(150): unichr(8211), # en dash
unichr(151): unichr(8212), # em dash
unichr(152): unichr( 732), # small tilde
unichr(153): unichr(8482), # trade mark sign
unichr(154): unichr( 353), # latin small letter s with caron
unichr(155): unichr(8250), # single right-pointing angle quotation mark
unichr(156): unichr( 339), # latin small ligature oe
unichr(158): unichr( 382), # latin small letter z with caron
unichr(159): unichr( 376)} # latin capital letter y with diaeresis
_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)')
def _urljoin(base, uri):
uri = _urifixer.sub(r'\1\3', uri)
try:
return urlparse.urljoin(base, uri)
except:
uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)])
return urlparse.urljoin(base, uri)
class _FeedParserMixin:
namespaces = {'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://web.resource.org/cc/': 'cc',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/':'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf'
}
_matchnamespaces = {}
can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo']
can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description']
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self, baseuri=None, baselang=None, encoding='utf-8'):
if _debug: sys.stderr.write('initializing FeedParser\n')
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.encoding = encoding # character encoding
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
# georss
self.ingeometry = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.baseuri = baseuri or ''
self.lang = baselang or None
self.svgOK = 0
if baselang:
self.feeddata['language'] = baselang.replace('_','-')
def unknown_starttag(self, tag, attrs):
if _debug: sys.stderr.write('start %s with %s\n' % (tag, attrs))
# normalize attrs
attrs = [(k.lower(), v) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
# Note: probably shouldn't simply recreate localname here, but
# our namespace handling isn't actually 100% correct in cases where
# the feed redefines the default namespace (which is actually
# the usual case for inline content, thanks Sam), so here we
# cheat and just reconstruct the element based on localname
# because that compensates for the bugs in our namespace handling.
# This will horribly munge inline content with non-empty qnames,
# but nobody actually does that, so I'm not fixing it.
if tag.find(':') <> -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg': self.svgOK = 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
return self.push(prefix + suffix, 1)
def unknown_endtag(self, tag):
if _debug: sys.stderr.write('end %s\n' % tag)
# match namespaces
if tag.find(':') <> -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg': self.svgOK = 0
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack: return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = unichr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack: return
if _debug: sys.stderr.write('entering handle_entityref with %s\n' % ref)
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities.keys():
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try: name2codepoint[ref]
except KeyError: text = '&%s;' % ref
else: text = unichr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack: return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if _debug: sys.stderr.write('entering parse_declaration\n')
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1: k = len(self.rawdata)
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
return k+1
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version:
self.version = 'rss090'
if loweruri == 'http://purl.org/rss/1.0/' and not self.version:
self.version = 'rss10'
if loweruri == 'http://www.w3.org/2005/atom' and not self.version:
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if self._matchnamespaces.has_key(loweruri):
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack: return
if self.elementstack[-1][0] != element: return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type','text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0: break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText: return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = base64.decodestring(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
if self.lookslikehtml(output):
self.contentparams['type']='text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# parse microformats
# (must do this before sanitizing because some microformats
# rely on elements that we sanitize)
if is_htmlish and element in ['content', 'description', 'summary']:
mfresults = _parseMicroformats(output, self.baseuri, self.encoding)
if mfresults:
for tag in mfresults.get('tags', []):
self._addTag(tag['term'], tag['scheme'], tag['label'])
for enclosure in mfresults.get('enclosures', []):
self._start_enclosure(enclosure)
for xfn in mfresults.get('xfn', []):
self._addXFN(xfn['relationships'], xfn['href'], xfn['name'])
vcard = mfresults.get('vcard')
if vcard:
self._getContext()['vcard'] = vcard
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and type(output) != type(u''):
try:
output = unicode(output, self.encoding)
except:
pass
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding=='utf-8' and type(output) == type(u''):
try:
output = unicode(output.encode('iso-8859-1'), 'utf-8')
except:
pass
# map win-1252 extensions to the proper code points
if type(output) == type(u''):
output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output])
# categories/tags/keywords/whatever are handled in _end_category
if element == 'category':
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang: self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
def lookslikehtml(self, str):
if self.version.startswith('atom'): return
if self.contentparams.get('type','text/html') != 'text/plain': return
# must have a close tag or a entity reference to qualify
if not (re.search(r'</(\w+)>',str) or re.search("&#?\w+;",str)): return
# all tags must be in a restricted subset of valid HTML tags
if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements,
re.findall(r'</?(\w+)',str)): return
# all entities must have been defined as valid HTML entities
from htmlentitydefs import entitydefs
if filter(lambda e: e not in entitydefs.keys(),
re.findall(r'&(\w+);',str)): return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos <> -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value):
context = self._getContext()
context.setdefault(key, value)
def _start_rss(self, attrsD):
versionmap = {'0.91': 'rss091u',
'0.92': 'rss092',
'0.93': 'rss093',
'0.94': 'rss094'}
if not self.version:
attr_version = attrsD.get('version', '')
version = versionmap.get(attr_version)
if version:
self.version = version
elif attr_version.startswith('2.'):
self.version = 'rss20'
else:
self.version = 'rss'
def _start_dlhottitles(self, attrsD):
self.version = 'hotrss'
def _start_channel(self, attrsD):
self.infeed = 1
self._cdf_common(attrsD)
_start_feedinfo = _start_channel
def _cdf_common(self, attrsD):
if attrsD.has_key('lastmod'):
self._start_modified({})
self.elementstack[-1][-1] = attrsD['lastmod']
self._end_modified()
if attrsD.has_key('href'):
self._start_link({})
self.elementstack[-1][-1] = attrsD['href']
self._end_link()
def _start_feed(self, attrsD):
self.infeed = 1
versionmap = {'0.1': 'atom01',
'0.2': 'atom02',
'0.3': 'atom03'}
if not self.version:
attr_version = attrsD.get('version')
version = versionmap.get(attr_version)
if version:
self.version = version
else:
self.version = 'atom'
def _end_channel(self):
self.infeed = 0
_end_feed = _end_channel
def _start_image(self, attrsD):
context = self._getContext()
context.setdefault('image', FeedParserDict())
self.inimage = 1
self.push('image', 0)
def _end_image(self):
self.pop('image')
self.inimage = 0
def _start_textinput(self, attrsD):
context = self._getContext()
context.setdefault('textinput', FeedParserDict())
self.intextinput = 1
self.push('textinput', 0)
_start_textInput = _start_textinput
def _end_textinput(self):
self.pop('textinput')
self.intextinput = 0
_end_textInput = _end_textinput
def _start_author(self, attrsD):
self.inauthor = 1
self.push('author', 1)
_start_managingeditor = _start_author
_start_dc_author = _start_author
_start_dc_creator = _start_author
_start_itunes_author = _start_author
def _end_author(self):
self.pop('author')
self.inauthor = 0
self._sync_author_detail()
_end_managingeditor = _end_author
_end_dc_author = _end_author
_end_dc_creator = _end_author
_end_itunes_author = _end_author
def _start_itunes_owner(self, attrsD):
self.inpublisher = 1
self.push('publisher', 0)
def _end_itunes_owner(self):
self.pop('publisher')
self.inpublisher = 0
self._sync_author_detail('publisher')
def _start_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('contributor', 0)
def _end_contributor(self):
self.pop('contributor')
self.incontributor = 0
def _start_dc_contributor(self, attrsD):
self.incontributor = 1
context = self._getContext()
context.setdefault('contributors', [])
context['contributors'].append(FeedParserDict())
self.push('name', 0)
def _end_dc_contributor(self):
self._end_name()
self.incontributor = 0
def _start_name(self, attrsD):
self.push('name', 0)
_start_itunes_name = _start_name
def _end_name(self):
value = self.pop('name')
if self.inpublisher:
self._save_author('name', value, 'publisher')
elif self.inauthor:
self._save_author('name', value)
elif self.incontributor:
self._save_contributor('name', value)
elif self.intextinput:
context = self._getContext()
context['name'] = value
_end_itunes_name = _end_name
def _start_width(self, attrsD):
self.push('width', 0)
def _end_width(self):
value = self.pop('width')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['width'] = value
def _start_height(self, attrsD):
self.push('height', 0)
def _end_height(self):
value = self.pop('height')
try:
value = int(value)
except:
value = 0
if self.inimage:
context = self._getContext()
context['height'] = value
def _start_url(self, attrsD):
self.push('href', 1)
_start_homepage = _start_url
_start_uri = _start_url
def _end_url(self):
value = self.pop('href')
if self.inauthor:
self._save_author('href', value)
elif self.incontributor:
self._save_contributor('href', value)
_end_homepage = _end_url
_end_uri = _end_url
def _start_email(self, attrsD):
self.push('email', 0)
_start_itunes_email = _start_email
def _end_email(self):
value = self.pop('email')
if self.inpublisher:
self._save_author('email', value, 'publisher')
elif self.inauthor:
self._save_author('email', value)
elif self.incontributor:
self._save_contributor('email', value)
_end_itunes_email = _end_email
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%s_detail' % key)
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author: return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, FeedParserDict())
if author:
context['%s_detail' % key]['name'] = author
if email:
context['%s_detail' % key]['email'] = email
def _start_subtitle(self, attrsD):
self.pushContent('subtitle', attrsD, 'text/plain', 1)
_start_tagline = _start_subtitle
_start_itunes_subtitle = _start_subtitle
def _end_subtitle(self):
self.popContent('subtitle')
_end_tagline = _end_subtitle
_end_itunes_subtitle = _end_subtitle
def _start_rights(self, attrsD):
self.pushContent('rights', attrsD, 'text/plain', 1)
_start_dc_rights = _start_rights
_start_copyright = _start_rights
def _end_rights(self):
self.popContent('rights')
_end_dc_rights = _end_rights
_end_copyright = _end_rights
def _start_item(self, attrsD):
self.entries.append(FeedParserDict())
self.push('item', 0)
self.inentry = 1
self.guidislink = 0
id = self._getAttribute(attrsD, 'rdf:about')
if id:
context = self._getContext()
context['id'] = id
self._cdf_common(attrsD)
_start_entry = _start_item
_start_product = _start_item
def _end_item(self):
self.pop('item')
self.inentry = 0
_end_entry = _end_item
def _start_dc_language(self, attrsD):
self.push('language', 1)
_start_language = _start_dc_language
def _end_dc_language(self):
self.lang = self.pop('language')
_end_language = _end_dc_language
def _start_dc_publisher(self, attrsD):
self.push('publisher', 1)
_start_webmaster = _start_dc_publisher
def _end_dc_publisher(self):
self.pop('publisher')
self._sync_author_detail('publisher')
_end_webmaster = _end_dc_publisher
def _start_published(self, attrsD):
self.push('published', 1)
_start_dcterms_issued = _start_published
_start_issued = _start_published
def _end_published(self):
value = self.pop('published')
self._save('published_parsed', _parse_date(value))
_end_dcterms_issued = _end_published
_end_issued = _end_published
def _start_updated(self, attrsD):
self.push('updated', 1)
_start_modified = _start_updated
_start_dcterms_modified = _start_updated
_start_pubdate = _start_updated
_start_dc_date = _start_updated
def _end_updated(self):
value = self.pop('updated')
parsed_value = _parse_date(value)
self._save('updated_parsed', parsed_value)
_end_modified = _end_updated
_end_dcterms_modified = _end_updated
_end_pubdate = _end_updated
_end_dc_date = _end_updated
def _start_created(self, attrsD):
self.push('created', 1)
_start_dcterms_created = _start_created
def _end_created(self):
value = self.pop('created')
self._save('created_parsed', _parse_date(value))
_end_dcterms_created = _end_created
def _start_expirationdate(self, attrsD):
self.push('expired', 1)
def _end_expirationdate(self):
self._save('expired_parsed', _parse_date(self.pop('expired')))
# geospatial location, or "where", from georss.org
def _start_georssgeom(self, attrsD):
self.push('geometry', 0)
_start_georss_point = _start_georssgeom
_start_georss_line = _start_georssgeom
_start_georss_polygon = _start_georssgeom
_start_georss_box = _start_georssgeom
def _save_where(self, geometry):
context = self._getContext()
context.setdefault('where', {})
context['where'] = FeedParserDict(geometry)
def _end_georss_point(self):
geometry = _parse_georss_point(self.pop('geometry'))
self._save_where(geometry)
def _end_georss_line(self):
geometry = _parse_georss_line(self.pop('geometry'))
self._save_where(geometry)
def _end_georss_polygon(self):
this = self.pop('geometry')
geometry = _parse_georss_polygon(this)
self._save_where(geometry)
def _end_georss_box(self):
geometry = _parse_georss_box(self.pop('geometry'))
self._save_where(geometry)
def _start_where(self, attrsD):
self.push('where', 0)
_start_georss_where = _start_where
def _start_gml_point(self, attrsD):
self.ingeometry = 'point'
self.push('geometry', 0)
def _start_gml_linestring(self, attrsD):
self.ingeometry = 'linestring'
self.push('geometry', 0)
def _start_gml_polygon(self, attrsD):
self.push('geometry', 0)
def _start_gml_exterior(self, attrsD):
self.push('geometry', 0)
def _start_gml_linearring(self, attrsD):
self.ingeometry = 'polygon'
self.push('geometry', 0)
def _start_gml_pos(self, attrsD):
self.push('pos', 0)
def _end_gml_pos(self):
this = self.pop('pos')
geometry = _parse_georss_point(this)
self._save_where(geometry)
def _start_gml_poslist(self, attrsD):
self.push('pos', 0)
def _end_gml_poslist(self):
geometry = _parse_poslist(self.pop('pos'), self.ingeometry)
self._save_where(geometry)
def _end_geom(self):
self.ingeometry = 0
self.pop('geometry')
_end_gml_point = _end_geom
_end_gml_linestring = _end_geom
_end_gml_linearring = _end_geom
_end_gml_exterior = _end_geom
_end_gml_polygon = _end_geom
def _end_where(self):
self.pop('where')
_end_georss_where = _end_where
# end geospatial
def _start_cc_license(self, attrsD):
context = self._getContext()
value = self._getAttribute(attrsD, 'rdf:resource')
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
def _start_creativecommons_license(self, attrsD):
self.push('license', 1)
_start_creativeCommons_license = _start_creativecommons_license
def _end_creativecommons_license(self):
value = self.pop('license')
context = self._getContext()
attrsD = FeedParserDict()
attrsD['rel']='license'
if value: attrsD['href']=value
context.setdefault('links', []).append(attrsD)
del context['license']
_end_creativeCommons_license = _end_creativecommons_license
def _addXFN(self, relationships, href, name):
context = self._getContext()
xfn = context.setdefault('xfn', [])
value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name})
if value not in xfn:
xfn.append(value)
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label): return
value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label})
if value not in tags:
tags.append(value)
def _start_category(self, attrsD):
if _debug: sys.stderr.write('entering _start_category with %s\n' % repr(attrsD))
term = attrsD.get('term')
scheme = attrsD.get('scheme', attrsD.get('domain'))
label = attrsD.get('label')
self._addTag(term, scheme, label)
self.push('category', 1)
_start_dc_subject = _start_category
_start_keywords = _start_category
def _end_itunes_keywords(self):
for term in self.pop('itunes_keywords').split():
self._addTag(term, 'http://www.itunes.com/', None)
def _start_itunes_category(self, attrsD):
self._addTag(attrsD.get('text'), 'http://www.itunes.com/', None)
self.push('category', 1)
def _end_category(self):
value = self.pop('category')
if not value: return
context = self._getContext()
tags = context['tags']
if value and len(tags) and not tags[-1]['term']:
tags[-1]['term'] = value
else:
self._addTag(value, None, None)
_end_dc_subject = _end_category
_end_keywords = _end_category
_end_itunes_category = _end_category
def _start_cloud(self, attrsD):
self._getContext()['cloud'] = FeedParserDict(attrsD)
def _start_link(self, attrsD):
attrsD.setdefault('rel', 'alternate')
if attrsD['rel'] == 'self':
attrsD.setdefault('type', 'application/atom+xml')
else:
attrsD.setdefault('type', 'text/html')
context = self._getContext()
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
if attrsD.get('rel')=='enclosure' and not context.get('id'):
context['id'] = attrsD.get('href')
expectingText = self.infeed or self.inentry or self.insource
context.setdefault('links', [])
context['links'].append(FeedParserDict(attrsD))
if attrsD.has_key('href'):
expectingText = 0
if (attrsD.get('rel') == 'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types):
context['link'] = attrsD['href']
else:
self.push('link', expectingText)
_start_producturl = _start_link
def _end_link(self):
value = self.pop('link')
context = self._getContext()
_end_producturl = _end_link
def _start_guid(self, attrsD):
self.guidislink = (attrsD.get('ispermalink', 'true') == 'true')
self.push('id', 1)
def _end_guid(self):
value = self.pop('id')
self._save('guidislink', self.guidislink and not self._getContext().has_key('link'))
if self.guidislink:
# guid acts as link, but only if 'ispermalink' is not present or is 'true',
# and only if the item doesn't already have a link element
self._save('link', value)
def _start_title(self, attrsD):
if self.svgOK: return self.unknown_starttag('title', attrsD.items())
self.pushContent('title', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
_start_dc_title = _start_title
_start_media_title = _start_title
def _end_title(self):
if self.svgOK: return
value = self.popContent('title')
if not value: return
context = self._getContext()
_end_dc_title = _end_title
_end_media_title = _end_title
def _start_description(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self.pushContent('description', attrsD, 'text/html', self.infeed or self.inentry or self.insource)
_start_dc_description = _start_description
def _start_abstract(self, attrsD):
self.pushContent('description', attrsD, 'text/plain', self.infeed or self.inentry or self.insource)
def _end_description(self):
if self._summaryKey == 'content':
self._end_content()
else:
value = self.popContent('description')
self._summaryKey = None
_end_abstract = _end_description
_end_dc_description = _end_description
def _start_info(self, attrsD):
self.pushContent('info', attrsD, 'text/plain', 1)
_start_feedburner_browserfriendly = _start_info
def _end_info(self):
self.popContent('info')
_end_feedburner_browserfriendly = _end_info
def _start_generator(self, attrsD):
if attrsD:
attrsD = self._itsAnHrefDamnIt(attrsD)
if attrsD.has_key('href'):
attrsD['href'] = self.resolveURI(attrsD['href'])
self._getContext()['generator_detail'] = FeedParserDict(attrsD)
self.push('generator', 1)
def _end_generator(self):
value = self.pop('generator')
context = self._getContext()
if context.has_key('generator_detail'):
context['generator_detail']['name'] = value
def _start_admin_generatoragent(self, attrsD):
self.push('generator', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('generator')
self._getContext()['generator_detail'] = FeedParserDict({'href': value})
def _start_admin_errorreportsto(self, attrsD):
self.push('errorreportsto', 1)
value = self._getAttribute(attrsD, 'rdf:resource')
if value:
self.elementstack[-1][2].append(value)
self.pop('errorreportsto')
def _start_summary(self, attrsD):
context = self._getContext()
if context.has_key('summary'):
self._summaryKey = 'content'
self._start_content(attrsD)
else:
self._summaryKey = 'summary'
self.pushContent(self._summaryKey, attrsD, 'text/plain', 1)
_start_itunes_summary = _start_summary
def _end_summary(self):
if self._summaryKey == 'content':
self._end_content()
else:
self.popContent(self._summaryKey or 'summary')
self._summaryKey = None
_end_itunes_summary = _end_summary
def _start_enclosure(self, attrsD):
attrsD = self._itsAnHrefDamnIt(attrsD)
context = self._getContext()
attrsD['rel']='enclosure'
context.setdefault('links', []).append(FeedParserDict(attrsD))
href = attrsD.get('href')
if href and not context.get('id'):
context['id'] = href
def _start_source(self, attrsD):
self.insource = 1
def _end_source(self):
self.insource = 0
self._getContext()['source'] = copy.deepcopy(self.sourcedata)
self.sourcedata.clear()
def _start_content(self, attrsD):
self.pushContent('content', attrsD, 'text/plain', 1)
src = attrsD.get('src')
if src:
self.contentparams['src'] = src
self.push('content', 1)
def _start_prodlink(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
def _start_body(self, attrsD):
self.pushContent('content', attrsD, 'application/xhtml+xml', 1)
_start_xhtml_body = _start_body
def _start_content_encoded(self, attrsD):
self.pushContent('content', attrsD, 'text/html', 1)
_start_fullitem = _start_content_encoded
def _end_content(self):
copyToDescription = self.mapContentType(self.contentparams.get('type')) in (['text/plain'] + self.html_types)
value = self.popContent('content')
if copyToDescription:
self._save('description', value)
_end_body = _end_content
_end_xhtml_body = _end_content
_end_content_encoded = _end_content
_end_fullitem = _end_content
_end_prodlink = _end_content
def _start_itunes_image(self, attrsD):
self.push('itunes_image', 0)
self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')})
_start_itunes_link = _start_itunes_image
def _end_itunes_block(self):
value = self.pop('itunes_block', 0)
self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0
def _end_itunes_explicit(self):
value = self.pop('itunes_explicit', 0)
self._getContext()['itunes_explicit'] = (value == 'yes') and 1 or 0
if _XML_AVAILABLE:
class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler):
def __init__(self, baseuri, baselang, encoding):
if _debug: sys.stderr.write('trying StrictFeedParser\n')
xml.sax.handler.ContentHandler.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
self.bozo = 0
self.exc = None
def startPrefixMapping(self, prefix, uri):
self.trackNamespace(prefix, uri)
def startElementNS(self, name, qname, attrs):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if lowernamespace.find('backend.userland.com/rss') <> -1:
# match any backend.userland.com namespace
namespace = 'http://backend.userland.com/rss'
lowernamespace = namespace
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = None
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix):
raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix
localname = str(localname).lower()
# qname implementation is horribly broken in Python 2.1 (it
# doesn't report any), and slightly broken in Python 2.2 (it
# doesn't report the xml: namespace). So we match up namespaces
# with a known list first, and then possibly override them with
# the qnames the SAX parser gives us (if indeed it gives us any
# at all). Thanks to MatejC for helping me test this and
# tirelessly telling me that it didn't work yet.
attrsD = {}
if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrsD['xmlns']=namespace
if localname=='svg' and namespace=='http://www.w3.org/2000/svg':
attrsD['xmlns']=namespace
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
if _debug: sys.stderr.write('startElementNS: qname = %s, namespace = %s, givenprefix = %s, prefix = %s, attrs = %s, localname = %s\n' % (qname, namespace, givenprefix, prefix, attrs.items(), localname))
for (namespace, attrlocalname), attrvalue in attrs._attrs.items():
lowernamespace = (namespace or '').lower()
prefix = self._matchnamespaces.get(lowernamespace, '')
if prefix:
attrlocalname = prefix + ':' + attrlocalname
attrsD[str(attrlocalname).lower()] = attrvalue
for qname in attrs.getQNames():
attrsD[str(qname).lower()] = attrs.getValueByQName(qname)
self.unknown_starttag(localname, attrsD.items())
def characters(self, text):
self.handle_data(text)
def endElementNS(self, name, qname):
namespace, localname = name
lowernamespace = str(namespace or '').lower()
if qname and qname.find(':') > 0:
givenprefix = qname.split(':')[0]
else:
givenprefix = ''
prefix = self._matchnamespaces.get(lowernamespace, givenprefix)
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
for name,value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
localname = str(localname).lower()
self.unknown_endtag(localname)
def error(self, exc):
self.bozo = 1
self.exc = exc
def fatalError(self, exc):
self.error(exc)
raise exc
class _BaseHTMLProcessor(sgmllib.SGMLParser):
special = re.compile('''[<>'"]''')
bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)")
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self, encoding, type):
self.encoding = encoding
self.type = type
if _debug: sys.stderr.write('entering BaseHTMLProcessor, encoding=%s\n' % self.encoding)
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def _shorttag_replace(self, match):
tag = match.group(1)
if tag in self.elements_no_end_tag:
return '<' + tag + ' />'
else:
return '<' + tag + '></' + tag + '>'
def parse_starttag(self,i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self.type == 'application/xhtml+xml':
if j>2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
def feed(self, data):
data = re.compile(r'<!((?!DOCTYPE|--|\[))', re.IGNORECASE).sub(r'<!\1', data)
#data = re.sub(r'<(\S+?)\s*?/>', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace
data = re.sub(r'<([^<\s]+?)\s*/>', self._shorttag_replace, data)
data = data.replace(''', "'")
data = data.replace('"', '"')
if self.encoding and type(data) == type(u''):
data = data.encode(self.encoding)
sgmllib.SGMLParser.feed(self, data)
sgmllib.SGMLParser.close(self)
def normalize_attrs(self, attrs):
if not attrs: return attrs
# utility method to be called by descendants
attrs = dict([(k.lower(), v) for k, v in attrs]).items()
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
attrs.sort()
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class='screen'>, tag='pre', attrs=[('class', 'screen')]
if _debug: sys.stderr.write('_BaseHTMLProcessor, unknown_starttag, tag=%s\n' % tag)
uattrs = []
strattrs=''
if attrs:
for key, value in attrs:
value=value.replace('>','>').replace('<','<').replace('"','"')
value = self.bare_ampersand.sub("&", value)
# thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
if type(value) != type(u''):
try:
value = unicode(value, self.encoding)
except:
value = unicode(value, 'iso-8859-1')
uattrs.append((unicode(key, self.encoding), value))
strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
if self.encoding:
try:
strattrs=strattrs.encode(self.encoding)
except:
pass
if tag in self.elements_no_end_tag:
self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
else:
self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be 'pre'
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
value = unichr(int(ref[1:],16))
else:
value = unichr(int(ref))
if value in _cp1252.keys():
self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:])
else:
self.pieces.append('&#%(ref)s;' % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
# Reconstruct the original entity reference.
if name2codepoint.has_key(ref):
self.pieces.append('&%(ref)s;' % locals())
else:
self.pieces.append('&%(ref)s' % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
if _debug: sys.stderr.write('_BaseHTMLProcessor, handle_text, text=%s\n' % text)
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append('<!--%(text)s-->' % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append('<?%(text)s>' % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append('<!%(text)s>' % locals())
_new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = self._new_declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.handle_data(rawdata)
# self.updatepos(declstartpos, i)
return None, -1
def convert_charref(self, name):
return '&#%s;' % name
def convert_entityref(self, name):
return '&%s;' % name
def output(self):
'''Return processed HTML as a single string'''
return ''.join([str(p) for p in self.pieces])
class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
def __init__(self, baseuri, baselang, encoding, entities):
sgmllib.SGMLParser.__init__(self)
_FeedParserMixin.__init__(self, baseuri, baselang, encoding)
_BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml')
self.entities=entities
def decodeEntities(self, element, data):
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace('"', '"')
data = data.replace(''', ''')
data = data.replace(''', ''')
if self.contentparams.has_key('type') and not self.contentparams.get('type', 'xml').endswith('xml'):
data = data.replace('<', '<')
data = data.replace('>', '>')
data = data.replace('&', '&')
data = data.replace('"', '"')
data = data.replace(''', "'")
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs])
class _MicroformatsParser:
STRING = 1
DATE = 2
URI = 3
NODE = 4
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
self.baseuri = baseuri
self.encoding = encoding
if type(data) == type(u''):
data = data.encode(encoding)
self.tags = []
self.enclosures = []
self.xfn = []
self.vcard = None
def vcardEscape(self, s):
if type(s) in (type(''), type(u'')):
s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n')
return s
def vcardFold(self, s):
s = re.sub(';+$', '', s)
sFolded = ''
iMax = 75
sPrefix = ''
while len(s) > iMax:
sFolded += sPrefix + s[:iMax] + '\n'
s = s[iMax:]
sPrefix = ' '
iMax = 74
sFolded += sPrefix + s
return sFolded
def normalize(self, s):
return re.sub(r'\s+', ' ', s).strip()
def unique(self, aList):
results = []
for element in aList:
if element not in results:
results.append(element)
return results
def toISO8601(self, dt):
return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt)
def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0):
all = lambda x: 1
sProperty = sProperty.lower()
bFound = 0
bNormalize = 1
propertyMatch = re.compile(r'\b%s\b' % sProperty)
if bAllowMultiple and (iPropertyType != self.NODE):
snapResults = []
containers = elmRoot(['ul', 'ol'], propertyMatch)
for container in containers:
snapResults.extend(container('li'))
bFound = (len(snapResults) != 0)
if not bFound:
snapResults = elmRoot(all, propertyMatch)
bFound = (len(snapResults) != 0)
if (not bFound) and (sProperty == 'value'):
snapResults = elmRoot('pre')
bFound = (len(snapResults) != 0)
bNormalize = not bFound
if not bFound:
snapResults = [elmRoot]
bFound = (len(snapResults) != 0)
arFilter = []
if sProperty == 'vcard':
snapFilter = elmRoot(all, propertyMatch)
for node in snapFilter:
if node.findParent(all, propertyMatch):
arFilter.append(node)
arResults = []
for node in snapResults:
if node not in arFilter:
arResults.append(node)
bFound = (len(arResults) != 0)
if not bFound:
if bAllowMultiple: return []
elif iPropertyType == self.STRING: return ''
elif iPropertyType == self.DATE: return BeautifulSoup.Null
elif iPropertyType == self.URI: return ''
elif iPropertyType == self.NODE: return BeautifulSoup.Null
else: return BeautifulSoup.Null
arValues = []
for elmResult in arResults:
sValue = BeautifulSoup.Null
if iPropertyType == self.NODE:
if bAllowMultiple:
arValues.append(elmResult)
continue
else:
return elmResult
sNodeName = elmResult.name.lower()
if (iPropertyType == self.EMAIL) and (sNodeName == 'a'):
sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0]
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'abbr'):
sValue = elmResult.get('title')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (iPropertyType == self.URI):
if sNodeName == 'a': sValue = elmResult.get('href')
elif sNodeName == 'img': sValue = elmResult.get('src')
elif sNodeName == 'object': sValue = elmResult.get('data')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if (not sValue) and (sNodeName == 'img'):
sValue = elmResult.get('alt')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue:
sValue = elmResult.renderContents()
sValue = re.sub(r'<\S[^>]*>', '', sValue)
sValue = sValue.replace('\r\n', '\n')
sValue = sValue.replace('\r', '\n')
if sValue:
sValue = bNormalize and self.normalize(sValue) or sValue.strip()
if not sValue: continue
if iPropertyType == self.DATE:
sValue = _parse_date_iso8601(sValue)
if bAllowMultiple:
arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue)
else:
return bAutoEscape and self.vcardEscape(sValue) or sValue
return arValues
def findVCards(self, elmRoot, bAgentParsing=0):
sVCards = ''
if not bAgentParsing:
arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1)
else:
arCards = [elmRoot]
for elmCard in arCards:
arLines = []
def processSingleString(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue))
return sValue or ''
def processSingleURI(sProperty):
sValue = self.getPropertyValue(elmCard, sProperty, self.URI)
if sValue:
sContentType = ''
sEncoding = ''
sValueKey = ''
if sValue.startswith('data:'):
sEncoding = ';ENCODING=b'
sContentType = sValue.split(';')[0].split('/').pop()
sValue = sValue.split(',', 1).pop()
else:
elmValue = self.getPropertyValue(elmCard, sProperty)
if elmValue:
if sProperty != 'url':
sValueKey = ';VALUE=uri'
sContentType = elmValue.get('type', '').strip().split('/').pop().strip()
sContentType = sContentType.upper()
if sContentType == 'OCTET-STREAM':
sContentType = ''
if sContentType:
sContentType = ';TYPE=' + sContentType.upper()
arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue))
def processTypeValue(sProperty, arDefaultType, arForceType=None):
arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1)
for elmResult in arResults:
arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1)
if arForceType:
arType = self.unique(arForceType + arType)
if not arType:
arType = arDefaultType
sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0)
if sValue:
arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue))
# AGENT
# must do this before all other properties because it is destructive
# (removes nested class="vcard" nodes so they don't interfere with
# this vcard's other properties)
arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1)
for elmAgent in arAgent:
if re.compile(r'\bvcard\b').search(elmAgent.get('class')):
sAgentValue = self.findVCards(elmAgent, 1) + '\n'
sAgentValue = sAgentValue.replace('\n', '\\n')
sAgentValue = sAgentValue.replace(';', '\\;')
if sAgentValue:
arLines.append(self.vcardFold('AGENT:' + sAgentValue))
elmAgent['class'] = ''
elmAgent.contents = BeautifulSoup.Null
else:
sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1);
if sAgentValue:
arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue))
# FN (full name)
sFN = processSingleString('fn')
# N (name)
elmName = self.getPropertyValue(elmCard, 'n')
if elmName:
sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1)
sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1)
arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1)
arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1)
arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1)
arLines.append(self.vcardFold('N:' + sFamilyName + ';' +
sGivenName + ';' +
','.join(arAdditionalNames) + ';' +
','.join(arHonorificPrefixes) + ';' +
','.join(arHonorificSuffixes)))
elif sFN:
# implied "N" optimization
# http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization
arNames = self.normalize(sFN).split()
if len(arNames) == 2:
bFamilyNameFirst = (arNames[0].endswith(',') or
len(arNames[1]) == 1 or
((len(arNames[1]) == 2) and (arNames[1].endswith('.'))))
if bFamilyNameFirst:
arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1]))
else:
arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0]))
# SORT-STRING
sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1)
if sSortString:
arLines.append(self.vcardFold('SORT-STRING:' + sSortString))
# NICKNAME
arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1)
if arNickname:
arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname)))
# PHOTO
processSingleURI('photo')
# BDAY
dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE)
if dtBday:
arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday)))
# ADR (address)
arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1)
for elmAdr in arAdr:
arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1)
if not arType:
arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1
sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1)
sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1)
sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1)
sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1)
sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1)
sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1)
sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1)
arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' +
sPostOfficeBox + ';' +
sExtendedAddress + ';' +
sStreetAddress + ';' +
sLocality + ';' +
sRegion + ';' +
sPostalCode + ';' +
sCountryName))
# LABEL
processTypeValue('label', ['intl','postal','parcel','work'])
# TEL (phone number)
processTypeValue('tel', ['voice'])
# EMAIL
processTypeValue('email', ['internet'], ['internet'])
# MAILER
processSingleString('mailer')
# TZ (timezone)
processSingleString('tz')
# GEO (geographical information)
elmGeo = self.getPropertyValue(elmCard, 'geo')
if elmGeo:
sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1)
sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1)
arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude))
# TITLE
processSingleString('title')
# ROLE
processSingleString('role')
# LOGO
processSingleURI('logo')
# ORG (organization)
elmOrg = self.getPropertyValue(elmCard, 'org')
if elmOrg:
sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1)
if not sOrganizationName:
# implied "organization-name" optimization
# http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization
sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1)
if sOrganizationName:
arLines.append(self.vcardFold('ORG:' + sOrganizationName))
else:
arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1)
arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit)))
# CATEGORY
arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1)
if arCategory:
arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory)))
# NOTE
processSingleString('note')
# REV
processSingleString('rev')
# SOUND
processSingleURI('sound')
# UID
processSingleString('uid')
# URL
processSingleURI('url')
# CLASS
processSingleString('class')
# KEY
processSingleURI('key')
if arLines:
arLines = ['BEGIN:vCard','VERSION:3.0'] + arLines + ['END:vCard']
sVCards += '\n'.join(arLines) + '\n'
return sVCards.strip()
def isProbablyDownloadable(self, elm):
attrsD = elm.attrMap
if not attrsD.has_key('href'): return 0
linktype = attrsD.get('type', '').strip()
if linktype.startswith('audio/') or \
linktype.startswith('video/') or \
(linktype.startswith('application/') and not linktype.endswith('xml')):
return 1
path = urlparse.urlparse(attrsD['href'])[2]
if path.find('.') == -1: return 0
fileext = path.split('.').pop().lower()
return fileext in self.known_binary_extensions
def findTags(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile(r'\btag\b')}):
href = elm.get('href')
if not href: continue
urlscheme, domain, path, params, query, fragment = \
urlparse.urlparse(_urljoin(self.baseuri, href))
segments = path.split('/')
tag = segments.pop()
if not tag:
tag = segments.pop()
tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', ''))
if not tagscheme.endswith('/'):
tagscheme += '/'
self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''}))
def findEnclosures(self):
all = lambda x: 1
enclosure_match = re.compile(r'\benclosure\b')
for elm in self.document(all, {'href': re.compile(r'.+')}):
if not enclosure_match.search(elm.get('rel', '')) and not self.isProbablyDownloadable(elm): continue
if elm.attrMap not in self.enclosures:
self.enclosures.append(elm.attrMap)
if elm.string and not elm.get('title'):
self.enclosures[-1]['title'] = elm.string
def findXFN(self):
all = lambda x: 1
for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}):
rels = elm.get('rel', '').split()
xfn_rels = []
for rel in rels:
if rel in self.known_xfn_relationships:
xfn_rels.append(rel)
if xfn_rels:
self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string})
def _parseMicroformats(htmlSource, baseURI, encoding):
if not BeautifulSoup: return
if _debug: sys.stderr.write('entering _parseMicroformats\n')
p = _MicroformatsParser(htmlSource, baseURI, encoding)
p.vcard = p.findVCards(p.document)
p.findTags()
p.findEnclosures()
p.findXFN()
return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard}
class _RelativeURIResolver(_BaseHTMLProcessor):
relative_uris = [('a', 'href'),
('applet', 'codebase'),
('area', 'href'),
('blockquote', 'cite'),
('body', 'background'),
('del', 'cite'),
('form', 'action'),
('frame', 'longdesc'),
('frame', 'src'),
('iframe', 'longdesc'),
('iframe', 'src'),
('head', 'profile'),
('img', 'longdesc'),
('img', 'src'),
('img', 'usemap'),
('input', 'src'),
('input', 'usemap'),
('ins', 'cite'),
('link', 'href'),
('object', 'classid'),
('object', 'codebase'),
('object', 'data'),
('object', 'usemap'),
('q', 'cite'),
('script', 'src')]
def __init__(self, baseuri, encoding, type):
_BaseHTMLProcessor.__init__(self, encoding, type)
self.baseuri = baseuri
def resolveURI(self, uri):
return _urljoin(self.baseuri, uri.strip())
def unknown_starttag(self, tag, attrs):
attrs = self.normalize_attrs(attrs)
attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def _resolveRelativeURIs(htmlSource, baseURI, encoding, type):
if _debug: sys.stderr.write('entering _resolveRelativeURIs\n')
p = _RelativeURIResolver(baseURI, encoding, type)
p.feed(htmlSource)
return p.output()
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'article',
'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', 'canvas',
'caption', 'center', 'cite', 'code', 'col', 'colgroup', 'command',
'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', 'dialog', 'dir',
'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', 'figure', 'footer',
'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i',
'img', 'input', 'ins', 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map',
'menu', 'meter', 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup',
'option', 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select',
'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', 'sub',
'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead',
'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autoplay', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols',
'colspan', 'compact', 'contenteditable', 'coords', 'data', 'datafld',
'datapagesize', 'datasrc', 'datetime', 'default', 'delay', 'dir',
'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for',
'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus',
'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode',
'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc',
'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max',
'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref',
'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size',
'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max',
'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows',
'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src',
'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template',
'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign',
'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap',
'xml:lang']
unacceptable_elements_with_end_tag = ['script', 'applet']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
# survey of common keywords found in feeds
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
'\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
# svgtiny - foreignObject + linearGradient + radialGradient + stop
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'font-face',
'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', 'image',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath',
'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop',
'svg', 'switch', 'text', 'title', 'tspan', 'use']
# svgtiny + class + opacity + offset + xmlns + xmlns:xlink
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx',
'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-rule',
'font-family', 'font-size', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', 'glyph-name',
'gradientUnits', 'hanging', 'height', 'horiz-adv-x', 'horiz-origin-x',
'id', 'ideographic', 'k', 'keyPoints', 'keySplines', 'keyTimes',
'lang', 'mathematical', 'marker-end', 'marker-mid', 'marker-start',
'markerHeight', 'markerUnits', 'markerWidth', 'max', 'min', 'name',
'offset', 'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount', 'repeatDur',
'requiredExtensions', 'requiredFeatures', 'restart', 'rotate', 'rx',
'ry', 'slope', 'stemh', 'stemv', 'stop-color', 'stop-opacity',
'strikethrough-position', 'strikethrough-thickness', 'stroke',
'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', 'xlink:href',
'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', 'xml:base',
'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', 'y2',
'zoomAndPan']
svg_attr_map = None
svg_elem_map = None
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
self.mathmlOK = 0
self.svgOK = 0
def unknown_starttag(self, tag, attrs):
acceptable_attributes = self.acceptable_attributes
keymap = {}
if not tag in self.acceptable_elements or self.svgOK:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
# not otherwise acceptable, perhaps it is MathML or SVG?
if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs:
self.mathmlOK = 1
if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs:
self.svgOK = 1
# chose acceptable attributes based on tag class, else bail
if self.mathmlOK and tag in self.mathml_elements:
acceptable_attributes = self.mathml_attributes
elif self.svgOK and tag in self.svg_elements:
# for most vocabularies, lowercasing is a good idea. Many
# svg elements, however, are camel case
if not self.svg_attr_map:
lower=[attr.lower() for attr in self.svg_attributes]
mix=[a for a in self.svg_attributes if a not in lower]
self.svg_attributes = lower
self.svg_attr_map = dict([(a.lower(),a) for a in mix])
lower=[attr.lower() for attr in self.svg_elements]
mix=[a for a in self.svg_elements if a not in lower]
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
tag = self.svg_elem_map.get(tag,tag)
keymap = self.svg_attr_map
else:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
if filter(lambda (n,v): n.startswith('xlink:'),attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
key=keymap.get(key,key)
clean_attrs.append((key,value))
elif key=='style':
clean_value = self.sanitize_style(value)
if clean_value: clean_attrs.append((key,clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math': self.mathmlOK = 0
elif self.svgOK and tag in self.svg_elements:
tag = self.svg_elem_map.get(tag,tag)
if tag == 'svg': self.svgOK = 0
else:
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
def sanitize_style(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^(\s*[-\w]+\s*:\s*[^:;]*(;|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin','padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not self.valid_css_values.match(keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif self.svgOK and prop.lower() in self.acceptable_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
def _sanitizeHTML(htmlSource, encoding, type):
p = _HTMLSanitizer(encoding, type)
p.feed(htmlSource)
data = p.output()
if TIDY_MARKUP:
# loop through list of preferred Tidy interfaces looking for one that's installed,
# then set up a common _tidy function to wrap the interface-specific API.
_tidy = None
for tidy_interface in PREFERRED_TIDY_INTERFACES:
try:
if tidy_interface == "uTidy":
from tidy import parseString as _utidy
def _tidy(data, **kwargs):
return str(_utidy(data, **kwargs))
break
elif tidy_interface == "mxTidy":
from mx.Tidy import Tidy as _mxtidy
def _tidy(data, **kwargs):
nerrors, nwarnings, data, errordata = _mxtidy.tidy(data, **kwargs)
return data
break
except:
pass
if _tidy:
utf8 = type(data) == type(u'')
if utf8:
data = data.encode('utf-8')
data = _tidy(data, output_xhtml=1, numeric_entities=1, wrap=0, char_encoding="utf8")
if utf8:
data = unicode(data, 'utf-8')
if data.count('<body'):
data = data.split('<body', 1)[1]
if data.count('>'):
data = data.split('>', 1)[1]
if data.count('</body'):
data = data.split('</body', 1)[0]
data = data.strip().replace('\r\n', '\n')
return data
class _FeedURLHandler(urllib2.HTTPDigestAuthHandler, urllib2.HTTPRedirectHandler, urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
if ((code / 100) == 3) and (code != 304):
return self.http_error_302(req, fp, code, msg, headers)
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
return infourl
def http_error_302(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_302(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
def http_error_301(self, req, fp, code, msg, headers):
if headers.dict.has_key('location'):
infourl = urllib2.HTTPRedirectHandler.http_error_301(self, req, fp, code, msg, headers)
else:
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
if not hasattr(infourl, 'status'):
infourl.status = code
return infourl
http_error_300 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def http_error_401(self, req, fp, code, msg, headers):
# Check if
# - server requires digest auth, AND
# - we tried (unsuccessfully) with basic auth, AND
# - we're using Python 2.3.3 or later (digest auth is irreparably broken in earlier versions)
# If all conditions hold, parse authentication information
# out of the Authorization header we sent the first time
# (for the username and password) and the WWW-Authenticate
# header the server sent back (for the realm) and retry
# the request with the appropriate digest auth headers instead.
# This evil genius hack has been brought to you by Aaron Swartz.
host = urlparse.urlparse(req.get_full_url())[1]
try:
assert sys.version.split()[0] >= '2.3.3'
assert base64 != None
user, passw = base64.decodestring(req.headers['Authorization'].split(' ')[1]).split(':')
realm = re.findall('realm="([^"]*)"', headers['WWW-Authenticate'])[0]
self.add_password(realm, host, user, passw)
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
self.reset_retry_count()
return retry
except:
return self.http_error_default(req, fp, code, msg, headers)
def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers):
"""URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the modified argument is supplied, it can be a tuple of 9 integers
(as returned by gmtime() in the standard Python time module) or a date
string in any format supported by feedparser. Regardless, it MUST
be in GMT (Greenwich Mean Time). It will be reformatted into an
RFC 1123-compliant date and used as the value of an If-Modified-Since
request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
If the referrer argument is supplied, it will be used as the value of a
Referer[sic] request header.
If handlers is supplied, it is a list of handlers used to build a
urllib2 opener.
"""
if hasattr(url_file_stream_or_string, 'read'):
return url_file_stream_or_string
if url_file_stream_or_string == '-':
return sys.stdin
if urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp'):
if not agent:
agent = USER_AGENT
# test for inline user:password for basic auth
auth = None
if base64:
urltype, rest = urllib.splittype(url_file_stream_or_string)
realhost, rest = urllib.splithost(rest)
if realhost:
user_passwd, realhost = urllib.splituser(realhost)
if user_passwd:
url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest)
auth = base64.encodestring(user_passwd).strip()
# iri support
try:
if isinstance(url_file_stream_or_string,unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna')
except:
pass
# try to open with urllib2 (to use optional headers)
request = urllib2.Request(url_file_stream_or_string)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if type(modified) == type(''):
modified = _parse_date(modified)
if modified:
# format into an RFC 1123-compliant timestamp. We can't use
# time.strftime() since the %a and %b directives can be affected
# by the current locale, but RFC 2616 states that dates must be
# in English.
short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5]))
if referrer:
request.add_header('Referer', referrer)
if gzip and zlib:
request.add_header('Accept-encoding', 'gzip, deflate')
elif gzip:
request.add_header('Accept-encoding', 'gzip')
elif zlib:
request.add_header('Accept-encoding', 'deflate')
else:
request.add_header('Accept-encoding', '')
if auth:
request.add_header('Authorization', 'Basic %s' % auth)
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
request.add_header('A-IM', 'feed') # RFC 3229 support
opener = apply(urllib2.build_opener, tuple([_FeedURLHandler()] + handlers))
opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent
try:
return opener.open(request)
finally:
opener.close() # JohnD
# try to open with native open function (if url_file_stream_or_string is a filename)
try:
return open(url_file_stream_or_string)
except:
pass
# treat url_file_stream_or_string as string
return _StringIO(str(url_file_stream_or_string))
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO',
'YY-?MM-?DD', 'YY-?OOO', 'YYYY',
'-YY-?MM', '-OOO', '-YY',
'--MM-?DD', '--MM',
'---DD',
'CC', '']
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}(\.\d*)?))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
del tmpl
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
del regex
def _parse_date_iso8601(dateString):
'''Parse a variety of ISO-8601-compatible formats like 20040105'''
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(dateString)
if m: break
if not m: return
if m.span() == (0, 0): return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params.keys():
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tm))
registerDateHandler(_parse_date_iso8601)
# 8-bit date handling routines written by ytrewq1.
_korean_year = u'\ub144' # b3e2 in euc-kr
_korean_month = u'\uc6d4' # bff9 in euc-kr
_korean_day = u'\uc77c' # c0cf in euc-kr
_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr
_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr
_korean_onblog_date_re = \
re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \
(_korean_year, _korean_month, _korean_day))
_korean_nate_date_re = \
re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \
(_korean_am, _korean_pm))
def _parse_date_onblog(dateString):
'''Parse a string according to the OnBlog 8-bit date format'''
m = _korean_onblog_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('OnBlog date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_onblog)
def _parse_date_nate(dateString):
'''Parse a string according to the Nate 8-bit date format'''
m = _korean_nate_date_re.match(dateString)
if not m: return
hour = int(m.group(5))
ampm = m.group(4)
if (ampm == _korean_pm):
hour += 12
hour = str(hour)
if len(hour) == 1:
hour = '0' + hour
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': hour, 'minute': m.group(6), 'second': m.group(7),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('Nate date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_nate)
_mssql_date_re = \
re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?')
def _parse_date_mssql(dateString):
'''Parse a string according to the MS SQL date format'''
m = _mssql_date_re.match(dateString)
if not m: return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \
{'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\
'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\
'zonediff': '+09:00'}
if _debug: sys.stderr.write('MS SQL date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_mssql)
# Unicode strings for Greek date strings
_greek_months = \
{ \
u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7
u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7
u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7
u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7
u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7
u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7
u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7
u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7
u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7
u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7
u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7
u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7
u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7
u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7
u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7
u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7
u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7
u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7
u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7
u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7
u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7
u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7
u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7
u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7
u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m: return
try:
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
except:
return
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
if _debug: sys.stderr.write('Greek date parsed as: %s\n' % rfc822date)
return _parse_date_rfc822(rfc822date)
registerDateHandler(_parse_date_greek)
# Unicode strings for Hungarian date strings
_hungarian_months = \
{ \
u'janu\u00e1r': u'01', # e1 in iso-8859-2
u'febru\u00e1ri': u'02', # e1 in iso-8859-2
u'm\u00e1rcius': u'03', # e1 in iso-8859-2
u'\u00e1prilis': u'04', # e1 in iso-8859-2
u'm\u00e1ujus': u'05', # e1 in iso-8859-2
u'j\u00fanius': u'06', # fa in iso-8859-2
u'j\u00falius': u'07', # fa in iso-8859-2
u'augusztus': u'08',
u'szeptember': u'09',
u'okt\u00f3ber': u'10', # f3 in iso-8859-2
u'november': u'11',
u'december': u'12',
}
_hungarian_date_format_re = \
re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))')
def _parse_date_hungarian(dateString):
'''Parse a string according to a Hungarian 8-bit date format.'''
m = _hungarian_date_format_re.match(dateString)
if not m: return
try:
month = _hungarian_months[m.group(2)]
day = m.group(3)
if len(day) == 1:
day = '0' + day
hour = m.group(4)
if len(hour) == 1:
hour = '0' + hour
except:
return
w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \
{'year': m.group(1), 'month': month, 'day': day,\
'hour': hour, 'minute': m.group(5),\
'zonediff': m.group(6)}
if _debug: sys.stderr.write('Hungarian date parsed as: %s\n' % w3dtfdate)
return _parse_date_w3dtf(w3dtfdate)
registerDateHandler(_parse_date_hungarian)
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
def __extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
def __extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0)
if gmt[0] == 0: return
return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone)
registerDateHandler(_parse_date_w3dtf)
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
rfc822._timezones.update(_additional_timezones)
registerDateHandler(_parse_date_rfc822)
def _parse_date_perforce(aDateString):
"""parse a date in yyyy/mm/dd hh:mm:ss TTT format"""
# Fri, 2006/09/15 08:19:53 EDT
_my_date_pattern = re.compile( \
r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})')
dow, year, month, day, hour, minute, second, tz = \
_my_date_pattern.search(aDateString).groups()
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz)
tm = rfc822.parsedate_tz(dateString)
if tm:
return time.gmtime(rfc822.mktime_tz(tm))
registerDateHandler(_parse_date_perforce)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
if not date9tuple: continue
if len(date9tuple) != 9:
if _debug: sys.stderr.write('date handler function must return 9-tuple\n')
raise ValueError
map(int, date9tuple)
return date9tuple
except Exception, e:
if _debug: sys.stderr.write('%s raised %s\n' % (handler.__name__, repr(e)))
pass
return None
def _getCharacterEncoding(http_headers, xml_data):
'''Get the character encoding of the XML document
http_headers is a dictionary
xml_data is a raw string (not Unicode)
This is so much trickier than it sounds, it's not even funny.
According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type
is application/xml, application/*+xml,
application/xml-external-parsed-entity, or application/xml-dtd,
the encoding given in the charset parameter of the HTTP Content-Type
takes precedence over the encoding given in the XML prefix within the
document, and defaults to 'utf-8' if neither are specified. But, if
the HTTP Content-Type is text/xml, text/*+xml, or
text/xml-external-parsed-entity, the encoding given in the XML prefix
within the document is ALWAYS IGNORED and only the encoding given in
the charset parameter of the HTTP Content-Type header should be
respected, and it defaults to 'us-ascii' if not specified.
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
must be treated as us-ascii. (We now do this.) And also that it
must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
encoding given in the XML prefix of the document and default to
'iso-8859-1' as per the HTTP specification (RFC 2616).
Then, assuming we didn't find a character encoding in the HTTP headers
(and the HTTP Content-type allowed us to look in the body), we need
to sniff the first few bytes of the XML data and try to determine
whether the encoding is ASCII-compatible. Section F of the XML
specification shows the way here:
http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
If the sniffed encoding is not ASCII-compatible, we need to make it
ASCII compatible so that we can sniff further into the XML declaration
to find the encoding attribute, which will tell us the true encoding.
Of course, none of this guarantees that we will be able to parse the
feed in the declared character encoding (assuming it was declared
correctly, which many are not). CJKCodecs and iconv_codec help a lot;
you should definitely install them if you can.
http://cjkpython.i18n.org/
'''
def _parseHTTPContentType(content_type):
'''takes HTTP Content-Type header and returns (content type, charset)
If no charset is specified, returns (content type, '')
If no content type is specified, returns ('', '')
Both return parameters are guaranteed to be lowercase strings
'''
content_type = content_type or ''
content_type, params = cgi.parse_header(content_type)
return content_type, params.get('charset', '').replace("'", '')
sniffed_xml_encoding = ''
xml_encoding = ''
true_encoding = ''
http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type'))
# Must sniff for non-ASCII-compatible character encodings before
# searching for XML declaration. This heuristic is defined in
# section F of the XML specification:
# http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = _ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and (xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
# ASCII-compatible
pass
xml_encoding_match = re.compile('^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
except:
xml_encoding_match = None
if xml_encoding_match:
xml_encoding = xml_encoding_match.groups()[0].lower()
if sniffed_xml_encoding and (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', 'iso-10646-ucs-4', 'ucs-4', 'csucs4', 'utf-16', 'utf-32', 'utf_16', 'utf_32', 'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
acceptable_content_type = 0
application_content_types = ('application/xml', 'application/xml-dtd', 'application/xml-external-parsed-entity')
text_content_types = ('text/xml', 'text/xml-external-parsed-entity')
if (http_content_type in application_content_types) or \
(http_content_type.startswith('application/') and http_content_type.endswith('+xml')):
acceptable_content_type = 1
true_encoding = http_encoding or xml_encoding or 'utf-8'
elif (http_content_type in text_content_types) or \
(http_content_type.startswith('text/')) and http_content_type.endswith('+xml'):
acceptable_content_type = 1
true_encoding = http_encoding or 'us-ascii'
elif http_content_type.startswith('text/'):
true_encoding = http_encoding or 'us-ascii'
elif http_headers and (not http_headers.has_key('content-type')):
true_encoding = xml_encoding or 'iso-8859-1'
else:
true_encoding = xml_encoding or 'utf-8'
return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type
def _toUTF8(data, encoding):
'''Changes an XML data stream on the fly to specify a new encoding
data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already
encoding is a string recognized by encodings.aliases
'''
if _debug: sys.stderr.write('entering _toUTF8, trying encoding %s\n' % encoding)
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16be':
sys.stderr.write('trying utf-16be instead\n')
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') and (data[2:4] != '\x00\x00'):
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-16le':
sys.stderr.write('trying utf-16le instead\n')
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-8':
sys.stderr.write('trying utf-8 instead\n')
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32be':
sys.stderr.write('trying utf-32be instead\n')
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
if _debug:
sys.stderr.write('stripping BOM\n')
if encoding != 'utf-32le':
sys.stderr.write('trying utf-32le instead\n')
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
if _debug: sys.stderr.write('successfully converted %s data to unicode\n' % encoding)
declmatch = re.compile('^<\?xml[^>]*?>')
newdecl = '''<?xml version='1.0' encoding='utf-8'?>'''
if declmatch.search(newdata):
newdata = declmatch.sub(newdecl, newdata)
else:
newdata = newdecl + u'\n' + newdata
return newdata.encode('utf-8')
def _stripDoctype(data):
'''Strips DOCTYPE from XML document, returns (rss_version, stripped_data)
rss_version may be 'rss091n' or None
stripped_data is the same XML document, minus the DOCTYPE
'''
entity_pattern = re.compile(r'<!ENTITY([^>]*?)>', re.MULTILINE)
entity_results=entity_pattern.findall(data)
data = entity_pattern.sub('', data)
doctype_pattern = re.compile(r'<!DOCTYPE([^>]*?)>', re.MULTILINE)
doctype_results = doctype_pattern.findall(data)
doctype = doctype_results and doctype_results[0] or ''
if doctype.lower().count('netscape'):
version = 'rss091n'
else:
version = None
# only allow in 'safe' inline entity definitions
replacement=''
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')
safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
if safe_entities:
replacement='<!DOCTYPE feed [\n <!ENTITY %s>\n]>' % '>\n <!ENTITY '.join(safe_entities)
data = doctype_pattern.sub(replacement, data)
return version, data, dict(replacement and safe_pattern.findall(replacement))
# GeoRSS geometry parsers. Each return a dict with 'type' and 'coordinates'
# keys, or None in the case of a parsing error
def _parse_poslist(value, geom_type):
if geom_type == 'linestring':
return _parse_georss_line(value)
elif geom_type == 'polygon':
ring = _parse_georss_line(value)
return {'type': 'Polygon', 'coordinates': (ring['coordinates'],)}
else:
raise ValueError, "unsupported geometry type: %s" % geom_type
# Point coordinates are a 2-tuple (lon, lat)
def _parse_georss_point(value):
try:
lat, lon = value.replace(',', ' ').split()
return {'type': 'Point', 'coordinates': (float(lon), float(lat))}
except Exception, e:
if _debug:
sys.stderr.write('_parse_georss_point raised %s\n' % (handler.__name__, repr(e)))
pass
return None
# Line coordinates are a tuple of 2-tuples ((lon0, lat0), ... (lonN, latN))
def _parse_georss_line(value):
try:
latlons = value.replace(',', ' ').split()
coords = []
for i in range(0, len(latlons), 2):
lat = float(latlons[i])
lon = float(latlons[i+1])
coords.append((lon, lat))
return {'type': 'LineString', 'coordinates': tuple(coords)}
except Exception, e:
if _debug:
sys.stderr.write('_parse_georss_line raised %s\n' % repr(e))
pass
return None
# Polygon coordinates are a tuple of closed LineString tuples. The first item
# in the tuple is the exterior ring. Subsequent items are interior rings, but
# georss:polygon elements usually have no interior rings.
def _parse_georss_polygon(value):
try:
latlons = value.replace(',', ' ').split()
coords = []
for i in range(0, len(latlons), 2):
lat = float(latlons[i])
lon = float(latlons[i+1])
coords.append((lon, lat))
return {'type': 'Polygon', 'coordinates': (tuple(coords),)}
except Exception, e:
if _debug:
sys.stderr.write('_parse_georss_polygon raised %s\n' % repr(e))
pass
return None
# Box coordinates are a 2-tuple of 2-tuples ((lon_ll, lat_ll), (lon_ur, lat_ur))
def _parse_georss_box(value):
try:
vals = [float(x) for x in value.replace(',', ' ').split()]
return {'type': 'Box', 'coordinates': ((vals[1], vals[0]), (vals[3], vals[2]))}
except Exception, e:
if _debug:
sys.stderr.write('_parse_georss_box raised %s\n' % repr(e))
pass
return None
# end geospatial parsers
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]):
'''Parse a feed from a URL, file, stream, or string'''
result = FeedParserDict()
result['feed'] = FeedParserDict()
result['entries'] = []
if _XML_AVAILABLE:
result['bozo'] = 0
if type(handlers) == types.InstanceType:
handlers = [handlers]
try:
f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers)
data = f.read()
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
f = None
# if feed is gzip-compressed, decompress it
if f and data and hasattr(f, 'headers'):
if gzip and f.headers.get('content-encoding', '') == 'gzip':
try:
data = gzip.GzipFile(fileobj=_StringIO(data)).read()
except Exception, e:
# Some feeds claim to be gzipped but they're not, so
# we get garbage. Ideally, we should re-request the
# feed without the 'Accept-encoding: gzip' header,
# but we don't.
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
elif zlib and f.headers.get('content-encoding', '') == 'deflate':
try:
data = zlib.decompress(data, -zlib.MAX_WBITS)
except Exception, e:
result['bozo'] = 1
result['bozo_exception'] = e
data = ''
# save HTTP headers
if hasattr(f, 'info'):
info = f.info()
result['etag'] = info.getheader('ETag')
last_modified = info.getheader('Last-Modified')
if last_modified:
result['modified'] = _parse_date(last_modified)
if hasattr(f, 'url'):
result['href'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
if hasattr(f, 'headers'):
result['headers'] = f.headers.dict
if hasattr(f, 'close'):
f.close()
# there are four encodings to keep track of:
# - http_encoding is the encoding declared in the Content-Type HTTP header
# - xml_encoding is the encoding declared in the <?xml declaration
# - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data
# - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications
http_headers = result.get('headers', {})
result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \
_getCharacterEncoding(http_headers, data)
if http_headers and (not acceptable_content_type):
if http_headers.has_key('content-type'):
bozo_message = '%s is not an XML media type' % http_headers['content-type']
else:
bozo_message = 'no Content-type specified'
result['bozo'] = 1
result['bozo_exception'] = NonXMLContentType(bozo_message)
result['version'], data, entities = _stripDoctype(data)
baseuri = http_headers.get('content-location', result.get('href'))
baselang = http_headers.get('content-language', None)
# if server sent 304, we're done
if result.get('status', 0) == 304:
result['version'] = ''
result['debug_message'] = 'The feed has not changed since you last checked, ' + \
'so the server sent no data. This is a feature, not a bug!'
return result
# if there was a problem downloading, we're done
if not data:
return result
# determine character encoding
use_strict_parser = 0
known_encoding = 0
tried_encodings = []
# try: HTTP encoding, declared XML encoding, encoding sniffed from BOM
for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding):
if not proposed_encoding: continue
if proposed_encoding in tried_encodings: continue
tried_encodings.append(proposed_encoding)
try:
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
break
except:
pass
# if no luck and we have auto-detection library, try that
if (not known_encoding) and chardet:
try:
proposed_encoding = chardet.detect(data)['encoding']
if proposed_encoding and (proposed_encoding not in tried_encodings):
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried utf-8 yet, try that
if (not known_encoding) and ('utf-8' not in tried_encodings):
try:
proposed_encoding = 'utf-8'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried windows-1252 yet, try that
if (not known_encoding) and ('windows-1252' not in tried_encodings):
try:
proposed_encoding = 'windows-1252'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck and we haven't tried iso-8859-2 yet, try that.
if (not known_encoding) and ('iso-8859-2' not in tried_encodings):
try:
proposed_encoding = 'iso-8859-2'
tried_encodings.append(proposed_encoding)
data = _toUTF8(data, proposed_encoding)
known_encoding = use_strict_parser = 1
except:
pass
# if still no luck, give up
if not known_encoding:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingUnknown( \
'document encoding unknown, I tried ' + \
'%s, %s, utf-8, windows-1252, and iso-8859-2 but nothing worked' % \
(result['encoding'], xml_encoding))
result['encoding'] = ''
elif proposed_encoding != result['encoding']:
result['bozo'] = 1
result['bozo_exception'] = CharacterEncodingOverride( \
'documented declared as %s, but parsed as %s' % \
(result['encoding'], proposed_encoding))
result['encoding'] = proposed_encoding
if not _XML_AVAILABLE:
use_strict_parser = 0
if use_strict_parser:
# initialize the SAX parser
feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8')
saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS)
saxparser.setFeature(xml.sax.handler.feature_namespaces, 1)
saxparser.setContentHandler(feedparser)
saxparser.setErrorHandler(feedparser)
source = xml.sax.xmlreader.InputSource()
source.setByteStream(_StringIO(data))
if hasattr(saxparser, '_ns_stack'):
# work around bug in built-in SAX parser (doesn't recognize xml: namespace)
# PyXML doesn't have this problem, and it doesn't have _ns_stack either
saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'})
try:
saxparser.parse(source)
except Exception, e:
if _debug:
import traceback
traceback.print_stack()
traceback.print_exc()
sys.stderr.write('xml parsing failed\n')
result['bozo'] = 1
result['bozo_exception'] = feedparser.exc or e
use_strict_parser = 0
if not use_strict_parser:
feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '', entities)
feedparser.feed(data)
result['feed'] = feedparser.feeddata
result['entries'] = feedparser.entries
result['version'] = result['version'] or feedparser.version
result['namespaces'] = feedparser.namespacesInUse
return result
class Serializer:
def __init__(self, results):
self.results = results
class TextSerializer(Serializer):
def write(self, stream=sys.stdout):
self._writer(stream, self.results, '')
def _writer(self, stream, node, prefix):
if not node: return
if hasattr(node, 'keys'):
keys = node.keys()
keys.sort()
for k in keys:
if k in ('description', 'link'): continue
if node.has_key(k + '_detail'): continue
if node.has_key(k + '_parsed'): continue
self._writer(stream, node[k], prefix + k + '.')
elif type(node) == types.ListType:
index = 0
for n in node:
self._writer(stream, n, prefix[:-1] + '[' + str(index) + '].')
index += 1
else:
try:
s = str(node).encode('utf-8')
s = s.replace('\\', '\\\\')
s = s.replace('\r', '')
s = s.replace('\n', r'\n')
stream.write(prefix[:-1])
stream.write('=')
stream.write(s)
stream.write('\n')
except:
pass
class PprintSerializer(Serializer):
def write(self, stream=sys.stdout):
if self.results.has_key('href'):
stream.write(self.results['href'] + '\n\n')
from pprint import pprint
pprint(self.results, stream)
stream.write('\n')
if __name__ == '__main__':
try:
from optparse import OptionParser
except:
OptionParser = None
if OptionParser:
optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-")
optionParser.set_defaults(format="pprint")
optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs")
optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs")
optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs")
optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)")
optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)")
optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr")
(options, urls) = optionParser.parse_args()
if options.verbose:
_debug = 1
if not urls:
optionParser.print_help()
sys.exit(0)
else:
if not sys.argv[1:]:
print __doc__
sys.exit(0)
class _Options:
etag = modified = agent = referrer = None
format = 'pprint'
options = _Options()
urls = sys.argv[1:]
zopeCompatibilityHack()
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
for url in urls:
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer)
serializer(results).write(sys.stdout)
#REVISION HISTORY
#1.0 - 9/27/2002 - MAP - fixed namespace processing on prefixed RSS 2.0 elements,
# added Simon Fell's test suite
#1.1 - 9/29/2002 - MAP - fixed infinite loop on incomplete CDATA sections
#2.0 - 10/19/2002
# JD - use inchannel to watch out for image and textinput elements which can
# also contain title, link, and description elements
# JD - check for isPermaLink='false' attribute on guid elements
# JD - replaced openAnything with open_resource supporting ETag and
# If-Modified-Since request headers
# JD - parse now accepts etag, modified, agent, and referrer optional
# arguments
# JD - modified parse to return a dictionary instead of a tuple so that any
# etag or modified information can be returned and cached by the caller
#2.0.1 - 10/21/2002 - MAP - changed parse() so that if we don't get anything
# because of etag/modified, return the old etag/modified to the caller to
# indicate why nothing is being returned
#2.0.2 - 10/21/2002 - JB - added the inchannel to the if statement, otherwise its
# useless. Fixes the problem JD was addressing by adding it.
#2.1 - 11/14/2002 - MAP - added gzip support
#2.2 - 1/27/2003 - MAP - added attribute support, admin:generatorAgent.
# start_admingeneratoragent is an example of how to handle elements with
# only attributes, no content.
#2.3 - 6/11/2003 - MAP - added USER_AGENT for default (if caller doesn't specify);
# also, make sure we send the User-Agent even if urllib2 isn't available.
# Match any variation of backend.userland.com/rss namespace.
#2.3.1 - 6/12/2003 - MAP - if item has both link and guid, return both as-is.
#2.4 - 7/9/2003 - MAP - added preliminary Pie/Atom/Echo support based on Sam Ruby's
# snapshot of July 1 <http://www.intertwingly.net/blog/1506.html>; changed
# project name
#2.5 - 7/25/2003 - MAP - changed to Python license (all contributors agree);
# removed unnecessary urllib code -- urllib2 should always be available anyway;
# return actual url, status, and full HTTP headers (as result['url'],
# result['status'], and result['headers']) if parsing a remote feed over HTTP --
# this should pass all the HTTP tests at <http://diveintomark.org/tests/client/http/>;
# added the latest namespace-of-the-week for RSS 2.0
#2.5.1 - 7/26/2003 - RMK - clear opener.addheaders so we only send our custom
# User-Agent (otherwise urllib2 sends two, which confuses some servers)
#2.5.2 - 7/28/2003 - MAP - entity-decode inline xml properly; added support for
# inline <xhtml:body> and <xhtml:div> as used in some RSS 2.0 feeds
#2.5.3 - 8/6/2003 - TvdV - patch to track whether we're inside an image or
# textInput, and also to return the character encoding (if specified)
#2.6 - 1/1/2004 - MAP - dc:author support (MarekK); fixed bug tracking
# nested divs within content (JohnD); fixed missing sys import (JohanS);
# fixed regular expression to capture XML character encoding (Andrei);
# added support for Atom 0.3-style links; fixed bug with textInput tracking;
# added support for cloud (MartijnP); added support for multiple
# category/dc:subject (MartijnP); normalize content model: 'description' gets
# description (which can come from description, summary, or full content if no
# description), 'content' gets dict of base/language/type/value (which can come
# from content:encoded, xhtml:body, content, or fullitem);
# fixed bug matching arbitrary Userland namespaces; added xml:base and xml:lang
# tracking; fixed bug tracking unknown tags; fixed bug tracking content when
# <content> element is not in default namespace (like Pocketsoap feed);
# resolve relative URLs in link, guid, docs, url, comments, wfw:comment,
# wfw:commentRSS; resolve relative URLs within embedded HTML markup in
# description, xhtml:body, content, content:encoded, title, subtitle,
# summary, info, tagline, and copyright; added support for pingback and
# trackback namespaces
#2.7 - 1/5/2004 - MAP - really added support for trackback and pingback
# namespaces, as opposed to 2.6 when I said I did but didn't really;
# sanitize HTML markup within some elements; added mxTidy support (if
# installed) to tidy HTML markup within some elements; fixed indentation
# bug in _parse_date (FazalM); use socket.setdefaulttimeout if available
# (FazalM); universal date parsing and normalization (FazalM): 'created', modified',
# 'issued' are parsed into 9-tuple date format and stored in 'created_parsed',
# 'modified_parsed', and 'issued_parsed'; 'date' is duplicated in 'modified'
# and vice-versa; 'date_parsed' is duplicated in 'modified_parsed' and vice-versa
#2.7.1 - 1/9/2004 - MAP - fixed bug handling " and '. fixed memory
# leak not closing url opener (JohnD); added dc:publisher support (MarekK);
# added admin:errorReportsTo support (MarekK); Python 2.1 dict support (MarekK)
#2.7.4 - 1/14/2004 - MAP - added workaround for improperly formed <br/> tags in
# encoded HTML (skadz); fixed unicode handling in normalize_attrs (ChrisL);
# fixed relative URI processing for guid (skadz); added ICBM support; added
# base64 support
#2.7.5 - 1/15/2004 - MAP - added workaround for malformed DOCTYPE (seen on many
# blogspot.com sites); added _debug variable
#2.7.6 - 1/16/2004 - MAP - fixed bug with StringIO importing
#3.0b3 - 1/23/2004 - MAP - parse entire feed with real XML parser (if available);
# added several new supported namespaces; fixed bug tracking naked markup in
# description; added support for enclosure; added support for source; re-added
# support for cloud which got dropped somehow; added support for expirationDate
#3.0b4 - 1/26/2004 - MAP - fixed xml:lang inheritance; fixed multiple bugs tracking
# xml:base URI, one for documents that don't define one explicitly and one for
# documents that define an outer and an inner xml:base that goes out of scope
# before the end of the document
#3.0b5 - 1/26/2004 - MAP - fixed bug parsing multiple links at feed level
#3.0b6 - 1/27/2004 - MAP - added feed type and version detection, result['version']
# will be one of SUPPORTED_VERSIONS.keys() or empty string if unrecognized;
# added support for creativeCommons:license and cc:license; added support for
# full Atom content model in title, tagline, info, copyright, summary; fixed bug
# with gzip encoding (not always telling server we support it when we do)
#3.0b7 - 1/28/2004 - MAP - support Atom-style author element in author_detail
# (dictionary of 'name', 'url', 'email'); map author to author_detail if author
# contains name + email address
#3.0b8 - 1/28/2004 - MAP - added support for contributor
#3.0b9 - 1/29/2004 - MAP - fixed check for presence of dict function; added
# support for summary
#3.0b10 - 1/31/2004 - MAP - incorporated ISO-8601 date parsing routines from
# xml.util.iso8601
#3.0b11 - 2/2/2004 - MAP - added 'rights' to list of elements that can contain
# dangerous markup; fiddled with decodeEntities (not right); liberalized
# date parsing even further
#3.0b12 - 2/6/2004 - MAP - fiddled with decodeEntities (still not right);
# added support to Atom 0.2 subtitle; added support for Atom content model
# in copyright; better sanitizing of dangerous HTML elements with end tags
# (script, frameset)
#3.0b13 - 2/8/2004 - MAP - better handling of empty HTML tags (br, hr, img,
# etc.) in embedded markup, in either HTML or XHTML form (<br>, <br/>, <br />)
#3.0b14 - 2/8/2004 - MAP - fixed CDATA handling in non-wellformed feeds under
# Python 2.1
#3.0b15 - 2/11/2004 - MAP - fixed bug resolving relative links in wfw:commentRSS;
# fixed bug capturing author and contributor URL; fixed bug resolving relative
# links in author and contributor URL; fixed bug resolvin relative links in
# generator URL; added support for recognizing RSS 1.0; passed Simon Fell's
# namespace tests, and included them permanently in the test suite with his
# permission; fixed namespace handling under Python 2.1
#3.0b16 - 2/12/2004 - MAP - fixed support for RSS 0.90 (broken in b15)
#3.0b17 - 2/13/2004 - MAP - determine character encoding as per RFC 3023
#3.0b18 - 2/17/2004 - MAP - always map description to summary_detail (Andrei);
# use libxml2 (if available)
#3.0b19 - 3/15/2004 - MAP - fixed bug exploding author information when author
# name was in parentheses; removed ultra-problematic mxTidy support; patch to
# workaround crash in PyXML/expat when encountering invalid entities
# (MarkMoraes); support for textinput/textInput
#3.0b20 - 4/7/2004 - MAP - added CDF support
#3.0b21 - 4/14/2004 - MAP - added Hot RSS support
#3.0b22 - 4/19/2004 - MAP - changed 'channel' to 'feed', 'item' to 'entries' in
# results dict; changed results dict to allow getting values with results.key
# as well as results[key]; work around embedded illformed HTML with half
# a DOCTYPE; work around malformed Content-Type header; if character encoding
# is wrong, try several common ones before falling back to regexes (if this
# works, bozo_exception is set to CharacterEncodingOverride); fixed character
# encoding issues in BaseHTMLProcessor by tracking encoding and converting
# from Unicode to raw strings before feeding data to sgmllib.SGMLParser;
# convert each value in results to Unicode (if possible), even if using
# regex-based parsing
#3.0b23 - 4/21/2004 - MAP - fixed UnicodeDecodeError for feeds that contain
# high-bit characters in attributes in embedded HTML in description (thanks
# Thijs van de Vossen); moved guid, date, and date_parsed to mapped keys in
# FeedParserDict; tweaked FeedParserDict.has_key to return True if asking
# about a mapped key
#3.0fc1 - 4/23/2004 - MAP - made results.entries[0].links[0] and
# results.entries[0].enclosures[0] into FeedParserDict; fixed typo that could
# cause the same encoding to be tried twice (even if it failed the first time);
# fixed DOCTYPE stripping when DOCTYPE contained entity declarations;
# better textinput and image tracking in illformed RSS 1.0 feeds
#3.0fc2 - 5/10/2004 - MAP - added and passed Sam's amp tests; added and passed
# my blink tag tests
#3.0fc3 - 6/18/2004 - MAP - fixed bug in _changeEncodingDeclaration that
# failed to parse utf-16 encoded feeds; made source into a FeedParserDict;
# duplicate admin:generatorAgent/@rdf:resource in generator_detail.url;
# added support for image; refactored parse() fallback logic to try other
# encodings if SAX parsing fails (previously it would only try other encodings
# if re-encoding failed); remove unichr madness in normalize_attrs now that
# we're properly tracking encoding in and out of BaseHTMLProcessor; set
# feed.language from root-level xml:lang; set entry.id from rdf:about;
# send Accept header
#3.0 - 6/21/2004 - MAP - don't try iso-8859-1 (can't distinguish between
# iso-8859-1 and windows-1252 anyway, and most incorrectly marked feeds are
# windows-1252); fixed regression that could cause the same encoding to be
# tried twice (even if it failed the first time)
#3.0.1 - 6/22/2004 - MAP - default to us-ascii for all text/* content types;
# recover from malformed content-type header parameter with no equals sign
# ('text/xml; charset:iso-8859-1')
#3.1 - 6/28/2004 - MAP - added and passed tests for converting HTML entities
# to Unicode equivalents in illformed feeds (aaronsw); added and
# passed tests for converting character entities to Unicode equivalents
# in illformed feeds (aaronsw); test for valid parsers when setting
# XML_AVAILABLE; make version and encoding available when server returns
# a 304; add handlers parameter to pass arbitrary urllib2 handlers (like
# digest auth or proxy support); add code to parse username/password
# out of url and send as basic authentication; expose downloading-related
# exceptions in bozo_exception (aaronsw); added __contains__ method to
# FeedParserDict (aaronsw); added publisher_detail (aaronsw)
#3.2 - 7/3/2004 - MAP - use cjkcodecs and iconv_codec if available; always
# convert feed to UTF-8 before passing to XML parser; completely revamped
# logic for determining character encoding and attempting XML parsing
# (much faster); increased default timeout to 20 seconds; test for presence
# of Location header on redirects; added tests for many alternate character
# encodings; support various EBCDIC encodings; support UTF-16BE and
# UTF16-LE with or without a BOM; support UTF-8 with a BOM; support
# UTF-32BE and UTF-32LE with or without a BOM; fixed crashing bug if no
# XML parsers are available; added support for 'Content-encoding: deflate';
# send blank 'Accept-encoding: ' header if neither gzip nor zlib modules
# are available
#3.3 - 7/15/2004 - MAP - optimize EBCDIC to ASCII conversion; fix obscure
# problem tracking xml:base and xml:lang if element declares it, child
# doesn't, first grandchild redeclares it, and second grandchild doesn't;
# refactored date parsing; defined public registerDateHandler so callers
# can add support for additional date formats at runtime; added support
# for OnBlog, Nate, MSSQL, Greek, and Hungarian dates (ytrewq1); added
# zopeCompatibilityHack() which turns FeedParserDict into a regular
# dictionary, required for Zope compatibility, and also makes command-
# line debugging easier because pprint module formats real dictionaries
# better than dictionary-like objects; added NonXMLContentType exception,
# which is stored in bozo_exception when a feed is served with a non-XML
# media type such as 'text/plain'; respect Content-Language as default
# language if not xml:lang is present; cloud dict is now FeedParserDict;
# generator dict is now FeedParserDict; better tracking of xml:lang,
# including support for xml:lang='' to unset the current language;
# recognize RSS 1.0 feeds even when RSS 1.0 namespace is not the default
# namespace; don't overwrite final status on redirects (scenarios:
# redirecting to a URL that returns 304, redirecting to a URL that
# redirects to another URL with a different type of redirect); add
# support for HTTP 303 redirects
#4.0 - MAP - support for relative URIs in xml:base attribute; fixed
# encoding issue with mxTidy (phopkins); preliminary support for RFC 3229;
# support for Atom 1.0; support for iTunes extensions; new 'tags' for
# categories/keywords/etc. as array of dict
# {'term': term, 'scheme': scheme, 'label': label} to match Atom 1.0
# terminology; parse RFC 822-style dates with no time; lots of other
# bug fixes
#4.1 - MAP - removed socket timeout; added support for chardet library
#4.2 - MAP - added support for parsing microformats within content elements:
# currently supports rel-tag (maps to 'tags'), rel-enclosure (maps to
# 'enclosures'), XFN links within content elements (maps to 'xfn'),
# and hCard (parses as vCard); bug [ 1481975 ] Misencoded utf-8/win-1252 | zgeo.atom | /zgeo.atom-0.4.1.tar.gz/zgeo.atom-0.4.1/zgeo/atom/feedparser.py | feedparser.py |
from zope.interface import Attribute, Interface
from zope.deprecation import deprecated
class IGeoreferenceable(Interface):
"""Marks classes that may be annotated with georeferencing properties.
"""
class IGeoInterface(Interface):
"""Provides the Python geo interface.
See http://trac.gispython.org/projects/PCL/wiki/PythonGeoInterface
for details.
"""
__geo_interface__ = Attribute("""Python Geo Interface""")
class IGeoreferenced(Interface):
"""A geographically referenced object.
The spatial reference system is implicitly long, lat WGS84. Geometry types
and coordinates shall follow the Python geo interface specification, which
itself tracks the GeoJSON draft specification at http://geojson.org.
"""
type = Attribute(
"""The name of the geometry type: 'Point', 'LineString', 'Polygon'"""
)
coordinates = Attribute("""A sequence of coordinate tuples""")
crs = Attribute("""A coordinate reference system as a dict.
The default is decimal degree longitude and latitude using the
WGS 1984 reference system.""")
class IWritableGeoreference(Interface):
def setGeoInterface(type, coordinates, crs):
"""Set the geometry via the geo interface."""
class IWriteGeoreferenced(IGeoreferenced, IWritableGeoreference):
"""Supports read/write georeferencing.
"""
# TODO: deprecate the interfaces below. they really aren't needed. IGeoItem
# and IGeoCollection will be better implement in views of other packages.
class IGeometry(IGeoInterface):
"""A geometry property with a geographic or projected coordinate system.
The spatial reference system is implicitly long, lat WGS84. Geometry types
and coordinates shall follow the Python geo interface specification, which
itself tracks the GeoJSON draft specification at http://geojson.org.
"""
type = Attribute(
'The name of the geometry type: "Point", "LineString", or "Polygon"'
)
coordinates = Attribute('A sequence of coordinate tuples')
class IGeoItem(IGeoInterface):
"""A simple georeferenced object, analogous to an entry in GeoRSS, or a
KML placemark.
"""
id = Attribute('Unique identifier for the item')
properties = Attribute('Mapping of item properties')
geometry = Attribute('An object that provides IGeometry (above)')
class IGeoCollection(IGeoInterface):
"""A collection of objects that provide IGeoItem, analogous to an Atom
feed or a KML folder.
"""
features = Attribute('Iterator over objects that provide IGeoItem')
deprecated(
'IGeometry',
'IGeometry will be removed from zgeo.geographer 1.0'
)
deprecated(
'IGeoItem',
'IGeoItem will be removed from zgeo.geographer 1.0'
)
deprecated(
'IGeoCollection',
'IGeoCollection will be removed from zgeo.geographer 1.0'
) | zgeo.geographer | /zgeo.geographer-0.3.1.tar.gz/zgeo.geographer-0.3.1/zgeo/geographer/interfaces.py | interfaces.py |
zgeo.geographer Package Readme
==============================
Overview
--------
The goal of zgeo.geographer is to standardize geographic location metadata.
This document will explain and exercise the zgeo.geographer interfaces and geo
location annotator.
Any object that implements zope.annotation.interfaces.IAttributeAnnotatable and
zgeo.geographer.interfaces.IGeoreferenceable can be adapted and geo-referenced.
The former marker is standard for Zope content objects, and the latter can be
easily configured via ZCML.
Tests
-----
Let's test with an example placemark, which provides both of the marker
interfaces mentioned above.
>>> from zgeo.geographer.example import Placemark
>>> placemark = Placemark()
Adapt it to IGeoreferenced
>>> from zgeo.geographer.interfaces import IGeoreferenced
>>> geo = IGeoreferenced(placemark)
Its properties should all be None
>>> geo.type is None
True
>>> geo.coordinates is None
True
>>> geo.crs is None
True
Now set the location geometry to type "Point" and coordinates 105.08 degrees
West, 40.59 degrees North using setGeoInterface()
>>> geo.setGeoInterface('Point', (-105.08, 40.59))
A georeferenced object has "type" and "coordinates" attributes which should
give us back what we put in.
>>> geo.type
'Point'
>>> geo.coordinates
(-105.08, 40.590000000000003)
>>> geo.crs is None
True
An event should have been sent
>>> from zope.component.eventtesting import getEvents
>>> from zope.lifecycleevent.interfaces import IObjectModifiedEvent
>>> events = getEvents(IObjectModifiedEvent)
>>> len(events)
1
>>> events[0].object is placemark
True
| zgeo.geographer | /zgeo.geographer-0.3.1.tar.gz/zgeo.geographer-0.3.1/zgeo/geographer/README.txt | README.txt |
import time
from zope.traversing.browser.interfaces import IAbsoluteURL
from zope.dublincore.interfaces import ICMFDublinCore
try:
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
raise Exception, "Five's ViewPageTemplateFile doesn't work with named templating"
except:
from zope.app.pagetemplate import ViewPageTemplateFile
from zope.interface import implements
from zope.publisher.browser import BrowserPage
from zope.formlib.namedtemplate import NamedTemplate
from zope.formlib.namedtemplate import NamedTemplateImplementation
from zope.component import getMultiAdapter
from zgeo.geographer.interfaces import IGeoreferenced
from zgeo.kml.interfaces import IFeature, IPlacemark, IContainer
def coords_to_kml(geom):
gtype = geom.type
if gtype == 'Point':
coords = (geom.coordinates,)
elif gtype == 'Polygon':
coords = geom.coordinates[0]
else:
coords = geom.coordinates
if len(coords[0]) == 2:
tuples = ('%f,%f,0.0' % tuple(c) for c in coords)
elif len(coords[0]) == 3:
tuples = ('%f,%f,%f' % tuple(c) for c in coords)
else:
raise ValueError, "Invalid dimensions"
return ' '.join(tuples)
def rfc3339(date):
# Convert ISO or RFC 3339 datetime strings to RFC 3339
# Zope's ZDCAnnotatableAdapter gives RFC 3339. Lose the seconds precision
if str(date).find('T') == 10:
s = date.split('.')[0]
# Plone's AT content types give ISO
else:
t = time.strptime(date, '%Y-%m-%d %H:%M:%S')
s = time.strftime('%Y-%m-%dT%H:%M:%S', t)
tz = '%03d:00' % -int(time.timezone/3600)
return s + tz
def absoluteURL(ob, request):
return getMultiAdapter((ob, request), IAbsoluteURL)()
class NullGeometry(object):
type = None
coordinates = None
class Feature(BrowserPage):
"""Not to be instantiated.
"""
implements(IFeature)
@property
def id(self):
return '%s/@@%s' % (absoluteURL(self.context, self.request), self.__name__)
@property
def name(self):
return self.dc.Title()
@property
def description(self):
return self.dc.Description()
@property
def author(self):
return {
'name': self.dc.Creator(),
'uri': '',
'email': ''
}
@property
def alternate_link(self):
return absoluteURL(self.context, self.request)
class Placemark(Feature):
implements(IPlacemark)
__name__ = 'kml-placemark'
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = ICMFDublinCore(self.context)
try:
self.geom = IGeoreferenced(self.context)
except:
self.geom = NullGeometry()
@property
def hasPoint(self):
return int(self.geom.type == 'Point')
@property
def hasLineString(self):
return int(self.geom.type == 'LineString')
@property
def hasPolygon(self):
return int(self.geom.type == 'Polygon')
@property
def coords_kml(self):
return coords_to_kml(self.geom)
def __call__(self):
return self.template().encode('utf-8')
class Folder(Feature):
implements(IContainer)
__name__ = 'kml-folder'
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = ICMFDublinCore(self.context)
@property
def features(self):
for item in self.context.values():
yield Placemark(item, self.request)
class Document(Feature):
implements(IContainer)
__name__ = 'kml-document'
template = NamedTemplate('template-kml-document')
def __init__(self, context, request):
self.context = context
self.request = request
self.dc = ICMFDublinCore(self.context)
@property
def features(self):
for item in self.context.values():
yield getMultiAdapter((item, self.request), IFeature)
def __call__(self):
return self.template().encode('utf-8')
document_template = NamedTemplateImplementation(
ViewPageTemplateFile('kml_document.pt')
) | zgeo.kml | /zgeo.kml-0.4.tar.gz/zgeo.kml-0.4/zgeo/kml/browser.py | browser.py |
zgeo.atom Package Readme
=========================
Objects that provide IGeoItem can be represented as Atom entries using this
package's link entry view.
Test the KML Placemark view of the placemark
>>> from zgeo.kml.browser import Placemark
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> view = Placemark(placemark, request)
>>> view
<zgeo.kml.browser.Placemark object at ...>
>>> view.id
'http://127.0.0.1/places/a/@@kml-placemark'
>>> view.name
'A'
>>> view.description
"Place marked 'A'"
>>> view.alternate_link
'http://127.0.0.1/places/a'
>>> view.hasPolygon
0
>>> view.hasLineString
0
>>> view.hasPoint
1
>>> view.coords_kml
'-105.080000,40.590000,0.0'
Test the feed view
>>> from zgeo.kml.browser import Document
>>> view = Document(places, request)
>>> view
<zgeo.kml.browser.Document object at ...>
>>> view.name
'Places'
>>> view.alternate_link
'http://127.0.0.1/places'
>>> view.features
<generator object at ...>
>>> [e for e in view.features][0]
<zgeo.kml.browser.Placemark object at ...>
| zgeo.kml | /zgeo.kml-0.4.tar.gz/zgeo.kml-0.4/zgeo/kml/README.txt | README.txt |
from zope.dublincore.interfaces import ICMFDublinCore
from zope.interface import implements
import zope.security.proxy
from Products.CMFCore.utils import getToolByName
from zgeo.geographer.interfaces import IGeoreferenced
from zgeo.atom.interfaces import IWriteAtomMetadata
from zgeo.atom.browser import rfc3339, NullGeometry, LinkEntry, SubscriptionFeed
from zgeo.atom.link import Link
class Geometry(object):
implements(IGeoreferenced)
def __init__(self, type, coordinates):
self.type = type
self.coordinates = coordinates
class BrainLinkEntry(LinkEntry):
def __init__(self, context, request):
self.context = context
self.request = request
try:
g = self.context.zgeo_geometry
self.geom = Geometry(g['type'], g['coordinates'])
except:
self.geom = NullGeometry()
@property
def id(self):
return 'urn:uuid:%s' % self.context.UID
@property
def title(self):
return self.context.Title
@property
def updated(self):
return rfc3339(self.context.ModificationDate)
@property
def author(self):
return {
'name': self.context.Creator,
'uri': '',
'email': ''
}
@property
def published(self):
return rfc3339(self.context.CreationDate)
@property
def summary(self):
return self.context.Description
@property
def links(self):
alt_url = '/'.join(
[self.request['BASE1']]
+ self.request.physicalPathToVirtualPath(self.context.getPath())
)
items = {
'alternate': Link(
alt_url,
rel='alternate',
type='text/html')
}
return items
class TopicSubscriptionFeed(SubscriptionFeed):
@property
def updated(self):
return rfc3339(self.context.CreationDate())
@property
def entries(self):
for brain in self.context.queryCatalog():
yield BrainLinkEntry(brain, self.request) | zgeo.plone.atom | /zgeo.plone.atom-0.2.tar.gz/zgeo.plone.atom-0.2/zgeo/plone/atom/browser.py | browser.py |
Geo Annotation for Plone
========================
Make a topic in our folder
>>> self.setRoles(('Manager',))
>>> folder = self.folder
>>> oid = self.folder.invokeFactory('Topic', 'topic')
>>> topic = self.folder[oid]
>>> c = topic.addCriterion('getGeometry', 'ATBooleanCriterion')
Add geo-referenced content
>>> oid = self.folder.invokeFactory('Document', 'doc')
>>> doc = self.folder[oid]
>>> from zgeo.geographer.interfaces import IWriteGeoreferenced
>>> geo = IWriteGeoreferenced(doc)
>>> geo.setGeoInterface('Point', (-100, 40))
Check the topic
>>> brain = [b for b in topic.queryCatalog() if b.id == 'doc'][0]
>>> brain.zgeo_geometry['type']
'Point'
>>> brain.zgeo_geometry['coordinates']
(-100, 40)
| zgeo.plone.geographer | /zgeo.plone.geographer-0.1.1.tar.gz/zgeo.plone.geographer-0.1.1/zgeo/plone/geographer/README.txt | README.txt |
**********************
zgeo.recipe.openlayers
**********************
.. contents::
The recipe provides the means to create `custom build profiles`_ for the
OpenLayers_ javascript library. A compressed, single OpenLayers.js file, and
accompanying ``img`` and ``theme`` directories are created in the named
directory under the buildout's ``parts`` directory.
For example, consider the following buildout.cfg file::
[buildout]
parts = openlayers-2.7-wms
[openlayers-2.7-wms]
recipe = zgeo.recipe.openlayers
url = http://openlayers.org/download/OpenLayers-2.7.tar.gz
include =
OpenLayers/Map.js
OpenLayers/Layer/WMS.js
Building it out::
$ bin/buildout -c buildout.cfg
Installing openlayers.
openlayers-2.7-wms: Creating download directory: /Users/seang/code/ol-recipes/downloads
openlayers-2.7-wms: Extracting package to /Users/seang/code/ol-recipes/parts/openlayers__compile__
Merging libraries.
Importing: OpenLayers.js
Importing: OpenLayers/BaseTypes.js
Importing: OpenLayers/Map.js
Importing: OpenLayers/SingleFile.js
Importing: OpenLayers/Util.js
Importing: OpenLayers/BaseTypes/Class.js
Importing: OpenLayers/Layer/WMS.js
Resolution pass 1...
Importing: OpenLayers/BaseTypes/Bounds.js
...
Re-ordering files...
Exporting: OpenLayers/SingleFile.js
Exporting: OpenLayers.js
...
Exporting: OpenLayers/Layer/WMS.js
Total files merged: 22
Compressing using jsmin.
Adding license file.
Writing to OpenLayers.js.
Done.
Produces these files::
$ ls -l parts/openlayers-2.7-wms/
total 224
-rw-r--r-- 1 seang staff 112535 Jun 3 13:41 OpenLayers.js
drwxr-xr-x 25 seang staff 850 Jun 3 13:41 img
drwxr-xr-x 3 seang staff 102 Jun 3 13:41 theme
.. _custom build profiles: http://docs.openlayers.org/library/deploying.html#custom-build-profiles
.. _OpenLayers: http://openlayers.org
| zgeo.recipe.openlayers | /zgeo.recipe.openlayers-0.2.tar.gz/zgeo.recipe.openlayers-0.2/README.txt | README.txt |
Supported options
=================
url
URL to the package that will be downloaded and extracted. The
supported package formats are .tar.gz, .tar.bz2, and .zip. The
value must be a full URL,
e.g. http://openlayers.org/download/OpenLayers-2.7.tar.gz. The
``path`` option can not be used at the same time with ``url``.
path
Path to a local directory containing the source code to be built
and installed. The directory must contain the ``configure``
script. The ``url`` option can not be used at the same time with
``path``.
filename
Name of javascript file to be written. Defaults to ``OpenLayers.js``.
md5sum
MD5 checksum for the package file. If available the MD5
checksum of the downloaded package will be compared to this value
and if the values do not match the execution of the recipe will
fail.
patch-binary
Path to the ``patch`` program. Defaults to 'patch' which should
work on any system that has the ``patch`` program available in the
system ``PATH``.
patch-options
Options passed to the ``patch`` program. Defaults to ``-p0``.
patches
List of patch files to the applied to the extracted source. Each
file should be given on a separate line.
keep-compile-dir
Switch to optionally keep the temporary directory where the
package was compiled. This is mostly useful for other recipes that
use this recipe to compile a software but wish to do some
additional steps not handled by this recipe. The location of the
compile directory is stored in ``options['compile-directory']``.
Accepted values are 'true' or 'false', defaults to 'false'.
Additionally, the recipe honors the ``download-directory`` option set
in the ``[buildout]`` section and stores the downloaded files under
it. If the value is not set a directory called ``downloads`` will be
created in the root of the buildout and the ``download-directory``
option set accordingly.
The recipe will first check if there is a local copy of the package
before downloading it from the net. Files can be shared among
different buildouts by setting the ``download-directory`` to the same
location.
Example usage
=============
We'll use a simple tarball to demonstrate the recipe
>>> import os.path
>>> src = join(os.path.dirname(__file__), 'testdata')
>>> ls(src)
- OpenLayers-2.7.tar.gz
- README.txt
Let's create a buildout to build and install the package
>>> write('buildout.cfg',
... """
... [buildout]
... parts = package
...
... [package]
... recipe = zgeo.recipe.openlayers
... url = file://%s/OpenLayers-2.7.tar.gz
... include =
... OpenLayers/Map.js
... OpenLayers/Layer/WMS.js
... """ % src)
This will download, extract and build our demo package with the
default build options
>>> print system(buildout)
Installing package.
package: Creating download directory: /sample-buildout/downloads
package: Extracting package to /sample-buildout/parts/package__compile__
Merging libraries.
Importing: OpenLayers.js
Importing: OpenLayers/BaseTypes.js
Importing: OpenLayers/Map.js
Importing: OpenLayers/SingleFile.js
Importing: OpenLayers/Util.js
Importing: OpenLayers/BaseTypes/Class.js
Importing: OpenLayers/Layer/WMS.js
<BLANKLINE>
Resolution pass 1...
Importing: OpenLayers/BaseTypes/Bounds.js
Importing: OpenLayers/BaseTypes/Element.js
Importing: OpenLayers/BaseTypes/LonLat.js
Importing: OpenLayers/BaseTypes/Pixel.js
Importing: OpenLayers/BaseTypes/Size.js
Importing: OpenLayers/Events.js
Importing: OpenLayers/Lang/en.js
Importing: OpenLayers/Layer/Grid.js
Importing: OpenLayers/Tile/Image.js
Importing: OpenLayers/Tween.js
<BLANKLINE>
Resolution pass 2...
Importing: OpenLayers/Lang.js
Importing: OpenLayers/Layer/HTTPRequest.js
Importing: OpenLayers/Tile.js
<BLANKLINE>
Resolution pass 3...
Importing: OpenLayers/Layer.js
<BLANKLINE>
Resolution pass 4...
Importing: OpenLayers/Projection.js
<BLANKLINE>
Resolution pass 5...
<BLANKLINE>
Re-ordering files...
<BLANKLINE>
Exporting: OpenLayers/SingleFile.js
Exporting: OpenLayers.js
Exporting: OpenLayers/BaseTypes.js
Exporting: OpenLayers/BaseTypes/Class.js
Exporting: OpenLayers/Util.js
Exporting: OpenLayers/BaseTypes/Bounds.js
Exporting: OpenLayers/BaseTypes/Element.js
Exporting: OpenLayers/BaseTypes/LonLat.js
Exporting: OpenLayers/BaseTypes/Pixel.js
Exporting: OpenLayers/BaseTypes/Size.js
Exporting: OpenLayers/Lang.js
Exporting: OpenLayers/Tween.js
Exporting: OpenLayers/Events.js
Exporting: OpenLayers/Lang/en.js
Exporting: OpenLayers/Projection.js
Exporting: OpenLayers/Tile.js
Exporting: OpenLayers/Map.js
Exporting: OpenLayers/Tile/Image.js
Exporting: OpenLayers/Layer.js
Exporting: OpenLayers/Layer/HTTPRequest.js
Exporting: OpenLayers/Layer/Grid.js
Exporting: OpenLayers/Layer/WMS.js
<BLANKLINE>
Total files merged: 22
Compressing using jsmin.
Adding license file.
Writing to OpenLayers.js.
Done.
<BLANKLINE>
Installing checkouts
====================
Sometimes instead of downloading and building an existing tarball we
need to work with code that is already available on the filesystem,
for example an SVN checkout.
Instead of providing the ``url`` option we will provide a ``path``
option to the directory containing the source code.
Let's demonstrate this by first unpacking our test package to the
filesystem and building that
>>> checkout_dir = tmpdir('checkout')
>>> import setuptools.archive_util
>>> setuptools.archive_util.unpack_archive('%s/OpenLayers-2.7.tar.gz' % src,
... checkout_dir)
>>> ls(checkout_dir)
d OpenLayers-2.7
>>> write('buildout.cfg',
... """
... [buildout]
... parts = package
...
... [package]
... recipe = zgeo.recipe.openlayers
... path = %s/OpenLayers-2.7
... filename = ol-wms.js
... include =
... OpenLayers/Map.js
... OpenLayers/Layer/WMS.js
... """ % checkout_dir)
>>> print system(buildout)
Uninstalling package.
Installing package.
package: Using local source directory: /checkout/OpenLayers-2.7
Merging libraries.
Importing: OpenLayers.js
Importing: OpenLayers/BaseTypes.js
Importing: OpenLayers/Map.js
Importing: OpenLayers/SingleFile.js
Importing: OpenLayers/Util.js
Importing: OpenLayers/BaseTypes/Class.js
Importing: OpenLayers/Layer/WMS.js
<BLANKLINE>
Resolution pass 1...
Importing: OpenLayers/BaseTypes/Bounds.js
Importing: OpenLayers/BaseTypes/Element.js
Importing: OpenLayers/BaseTypes/LonLat.js
Importing: OpenLayers/BaseTypes/Pixel.js
Importing: OpenLayers/BaseTypes/Size.js
Importing: OpenLayers/Events.js
Importing: OpenLayers/Lang/en.js
Importing: OpenLayers/Layer/Grid.js
Importing: OpenLayers/Tile/Image.js
Importing: OpenLayers/Tween.js
<BLANKLINE>
Resolution pass 2...
Importing: OpenLayers/Lang.js
Importing: OpenLayers/Layer/HTTPRequest.js
Importing: OpenLayers/Tile.js
<BLANKLINE>
Resolution pass 3...
Importing: OpenLayers/Layer.js
<BLANKLINE>
Resolution pass 4...
Importing: OpenLayers/Projection.js
<BLANKLINE>
Resolution pass 5...
<BLANKLINE>
Re-ordering files...
<BLANKLINE>
Exporting: OpenLayers/SingleFile.js
Exporting: OpenLayers.js
Exporting: OpenLayers/BaseTypes.js
Exporting: OpenLayers/BaseTypes/Class.js
Exporting: OpenLayers/Util.js
Exporting: OpenLayers/BaseTypes/Bounds.js
Exporting: OpenLayers/BaseTypes/Element.js
Exporting: OpenLayers/BaseTypes/LonLat.js
Exporting: OpenLayers/BaseTypes/Pixel.js
Exporting: OpenLayers/BaseTypes/Size.js
Exporting: OpenLayers/Lang.js
Exporting: OpenLayers/Tween.js
Exporting: OpenLayers/Events.js
Exporting: OpenLayers/Lang/en.js
Exporting: OpenLayers/Projection.js
Exporting: OpenLayers/Tile.js
Exporting: OpenLayers/Map.js
Exporting: OpenLayers/Tile/Image.js
Exporting: OpenLayers/Layer.js
Exporting: OpenLayers/Layer/HTTPRequest.js
Exporting: OpenLayers/Layer/Grid.js
Exporting: OpenLayers/Layer/WMS.js
<BLANKLINE>
Total files merged: 22
Compressing using jsmin.
Adding license file.
Writing to ol-wms.js.
Done.
<BLANKLINE>
>>> ls('parts/package')
d img
- ol-wms.js
d theme
Since using the ``path`` implies that the source code has been
acquired outside of the control of the recipe also the responsibility
of managing it is outside of the recipe.
| zgeo.recipe.openlayers | /zgeo.recipe.openlayers-0.2.tar.gz/zgeo.recipe.openlayers-0.2/zgeo/recipe/openlayers/README.txt | README.txt |
"""Recipe openlayers"""
import zc.buildout
import urlparse
import tempfile
import logging
import urllib
import shutil
import md5
import imp
import os
import hexagonit.recipe.download
class Recipe:
"""zc.buildout recipe for compiling and installing software"""
def __init__(self, buildout, name, options):
self.options = options
self.buildout = buildout
self.name = name
log = logging.getLogger(self.name)
options['location'] = os.path.join(
buildout['buildout']['parts-directory'],
self.name)
options['prefix'] = options['location']
options['url'] = options.get('url', '').strip()
options['path'] = options.get('path', '').strip()
if options['url'] and options['path']:
raise zc.buildout.UserError('You must use either "url" or "path", not both!')
if not (options['url'] or options['path']):
raise zc.buildout.UserError('You must provide either "url" or "path".')
if options['url']:
options['compile-directory'] = '%s__compile__' % options['location']
else:
options['compile-directory'] = options['path']
options['filename'] = options.get('filename', 'OpenLayers.js').strip()
def update(self):
pass
def call_script(self, script):
"""This method is copied from z3c.recipe.runscript.
See http://pypi.python.org/pypi/z3c.recipe.runscript for details.
"""
filename, callable = script.split(':')
filename = os.path.abspath(filename)
module = imp.load_source('script', filename)
# Run the script with all options
getattr(module, callable.strip())(self.options, self.buildout)
def run(self, cmd):
log = logging.getLogger(self.name)
if os.system(cmd):
log.error('Error executing command: %s' % cmd)
raise zc.buildout.UserError('System error')
def install(self):
log = logging.getLogger(self.name)
parts = []
patch_cmd = self.options.get('patch-binary', 'patch').strip()
patch_options = ' '.join(self.options.get('patch-options', '-p0').split())
patches = self.options.get('patches', '').split()
# Download the source using hexagonit.recipe.download
if self.options['url']:
compile_dir = self.options['compile-directory']
os.mkdir(compile_dir)
try:
opt = self.options.copy()
opt['destination'] = compile_dir
hexagonit.recipe.download.Recipe(
self.buildout, self.name, opt).install()
except:
shutil.rmtree(compile_dir)
raise
else:
log.info('Using local source directory: %s' \
% self.options['path']
)
compile_dir = self.options['path']
os.mkdir(self.options['location'])
os.chdir(compile_dir)
try:
if patches:
log.info('Applying patches')
for patch in patches:
self.run('%s %s < %s' % (patch_cmd, patch_options, patch))
# Build
contents = os.listdir(compile_dir)
if 'build' in contents:
os.chdir('build')
else:
os.chdir(contents[0])
os.chdir('build')
cfg = """# Generated by zgeo.recipe.openlayers
[first]
OpenLayers/SingleFile.js
OpenLayers.js
OpenLayers/BaseTypes.js
OpenLayers/BaseTypes/Class.js
OpenLayers/Util.js
[last]
[include]
%s
[exclude]
""" % self.options['include']
f = open('recipe.cfg', 'w')
f.write(cfg)
f.close()
self.run('./build.py recipe %s' % self.options['filename'])
shutil.copy(
self.options['filename'],
os.path.join(
self.options['location'], self.options['filename']
)
)
shutil.copytree(
'../img',
os.path.join(
self.options['location'], 'img'
)
)
shutil.copytree(
'../theme',
os.path.join(
self.options['location'], 'theme'
)
)
except:
log.error('Build error. The package is left as is at %s where '
'you can inspect what went wrong' % os.getcwd())
raise
if self.options['url']:
if self.options.get('keep-compile-dir', '').lower() in ('true', 'yes', '1', 'on'):
# If we're keeping the compile directory around, add it to
# the parts so that it's also removed when this recipe is
# uninstalled.
parts.append(self.options['compile-directory'])
else:
shutil.rmtree(compile_dir)
del self.options['compile-directory']
parts.append(self.options['location'])
return parts | zgeo.recipe.openlayers | /zgeo.recipe.openlayers-0.2.tar.gz/zgeo.recipe.openlayers-0.2/zgeo/recipe/openlayers/__init__.py | __init__.py |
from zope.component import getUtility
from zope.interface import implements
from zope.app.intid.interfaces import IIntIds
from zope.app.intid.interfaces import IntIdRemovedEvent
from zope.app.intid.interfaces import IntIdAddedEvent
from zope.location.interfaces import ILocation
from zope.component import adapter, getAllUtilitiesRegisteredFor
from zope.app.keyreference.interfaces import IKeyReference
from zope.app.container.interfaces import IObjectRemovedEvent
from zope.app.container.interfaces import IObjectAddedEvent
from zope.event import notify
from zgeo.spatialindex.site import get_catalog, createLocalSpatialCatalog
from zgeo.spatialindex.interfaces import IAddSpatialContainerEvent
from zope.security.proxy import removeSecurityProxy
from zgeo.geographer.interfaces import IGeoreferenceable
from zope.app.generations.utility import findObjectsProviding
class AddSpatialContainerEvent(object):
implements(IAddSpatialContainerEvent)
def __init__(self, container):
self.object = container
@adapter(IAddSpatialContainerEvent)
def addSpatialContainerSubscriber(event):
createLocalSpatialCatalog(event.object)
for ob in findObjectsProviding(event.object, IGeoreferenceable):
utilities = tuple(getAllUtilitiesRegisteredFor(IIntIds, context=ob))
if utilities: # assert that there are any utilites
key = IKeyReference(ob, None)
if key is not None:
for utility in utilities:
utility.register(key)
cat = get_catalog(ob)
if cat is not None:
id = getUtility(IIntIds, context=ob).getId(ob)
cat.index_doc(id, ob)
def indexDocSubscriber(event):
"""A subscriber to IntIdAddedEvent"""
ob = event.object
cat = get_catalog(ob)
if cat is not None:
id = getUtility(IIntIds, context=ob).getId(ob)
cat.index_doc(id, ob)
def reindexDocSubscriber(event):
"""A subscriber to ObjectModifiedEvent"""
ob = event.object
cat = get_catalog(ob)
if cat is not None:
id = getUtility(IIntIds, context=ob).queryId(ob)
if id is not None:
cat.index_doc(id, ob)
def unindexDocSubscriber(event):
"""A subscriber to IntIdRemovedEvent"""
ob = event.object
cat = get_catalog(ob)
if cat is not None:
id = getUtility(IIntIds, context=cat).queryId(ob)
if id is not None:
cat.unindex_doc(id)
@adapter(ILocation, IObjectRemovedEvent)
def removeIntIdSubscriber(ob, event):
"""A subscriber to ObjectRemovedEvent
Removes the unique ids registered for the object in the local utility.
"""
utilities = tuple(getAllUtilitiesRegisteredFor(IIntIds, context=ob))
if utilities:
key = IKeyReference(ob, None)
# Register only objects that adapt to key reference
if key is not None:
# Notify the catalogs that this object is about to be removed.
notify(IntIdRemovedEvent(ob, event))
for utility in utilities:
try:
utility.unregister(key)
except KeyError:
pass
@adapter(ILocation, IObjectAddedEvent)
def addIntIdSubscriber(ob, event):
"""A subscriber to ObjectAddedEvent
Registers the object added in the local id utilities and fires
an event for the catalog.
"""
utilities = tuple(getAllUtilitiesRegisteredFor(IIntIds, context=ob))
if utilities: # assert that there are any utilites
key = IKeyReference(ob, None)
# Register only objects that adapt to key reference
if key is not None:
for utility in utilities:
utility.register(key)
# Notify the catalogs that this object was added.
notify(IntIdAddedEvent(ob, event)) | zgeo.spatialindex | /zgeo.spatialindex-0.3.tar.gz/zgeo.spatialindex-0.3/zgeo/spatialindex/event.py | event.py |
Spatial Indexes
===============
zgeo.spatialindex provides a spatial bounding box index that plugs into the
Zope Catalog.
ISpatiallyBounded is an interface essential to the bounding box index
>>> from zope.interface import implements
>>> from zope.location.interfaces import ILocation
>>> import zgeo.geographer.example
>>> class Placemark(zgeo.geographer.example.Placemark):
... implements(ILocation)
>>> placemark1 = Placemark()
>>> from zgeo.geographer.interfaces import IGeoreferenced
>>> geo = IGeoreferenced(placemark1)
>>> geo.setGeoInterface('Point', (-105.08, 40.59))
>>> from zgeo.spatialindex.interfaces import IBounded
>>> bounded = IBounded(placemark1)
>>> bounded.bounds
(-105.08, 40.590000000000003, -105.08, 40.590000000000003)
Next, create a catalog:
>>> from zgeo.spatialindex.catalog import Catalog
>>> cat = Catalog()
and add a bounding box index:
>>> from zgeo.spatialindex.index import BoundsIndex
>>> cat['bounds'] = BoundsIndex('bounds', IBounded)
Index 2 placemark objects
>>> placemark2 = Placemark()
>>> geo = IGeoreferenced(placemark2)
>>> geo.setGeoInterface('Point', (0.0, 0.0))
>>> cat.index_doc(1, placemark1)
>>> cat.index_doc(2, placemark2)
Query using bounds that encompass neither
>>> list(cat.apply({'bounds': (-110, 30, -105, 35)}))
[]
Using bounds that encompass only the first
>>> list(cat.apply({'bounds': (-110, 40, -105, 45)}))
[1L]
And with bounds that encompass both
>>> list(cat.apply({'bounds': (-180, -90, 180, 90)}))
[1L, 2L]
We can unindex objects:
>>> cat.unindex_doc(1)
>>> list(cat.apply({'bounds': (-180, -90, 180, 90)}))
[2L]
and reindex objects:
>>> geo = IGeoreferenced(placemark2)
>>> geo.setGeoInterface('Point', (-105.0, 40.0))
>>> cat.index_doc(2, placemark2)
>>> list(cat.apply({'bounds': (-110, 40, -105, 45)}))
[2L]
Clear
>>> cat.clear()
>>> len(cat['bounds'].backward)
0
>>> list(cat.apply({'bounds': (-180, -90, 180, 90)}))
[]
Finally, let's test our spatial catalog factory:
>>> from zope.app.folder import Folder
>>> places = Folder()
>>> from zgeo.spatialindex.site import createLocalSpatialCatalog
>>> createLocalSpatialCatalog(places)
>>> sm = places.getSiteManager()
Can has an IIntIds utility?
>>> from zope.app.intid.interfaces import IIntIds
>>> intids = sm.getUtility(IIntIds)
>>> intids
<zope.app.intid.IntIds object at ...>
Can has a catalog?
>>> from zope.app.catalog.interfaces import ICatalog
>>> cat = sm.getUtility(ICatalog)
>>> cat
<zgeo.spatialindex.catalog.Catalog object at ...>
>>> cat['bounds']
<zgeo.spatialindex.index.BoundsIndex object at ...>
Adding a placemark to the folder should result in new intid items, but no
catalog entry because the placemark isn't georeferenced. First, however, we need to provide subscribers to IObjectAddedEvent:
>>> from zope.component import provideHandler
>>> from zope.app.intid import addIntIdSubscriber, removeIntIdSubscriber
>>> provideHandler(addIntIdSubscriber)
>>> provideHandler(removeIntIdSubscriber)
>>> placemark3 = Placemark()
>>> placemark3.__name__ = '3'
>>> placemark3.__parent__ = places
>>> places['3'] = placemark3
>>> list(intids.refs)
[]
| zgeo.spatialindex | /zgeo.spatialindex-0.3.tar.gz/zgeo.spatialindex-0.3/zgeo/spatialindex/README.txt | README.txt |
from zope.interface import implements
from Acquisition import aq_chain
from zope.event import notify
from shapely.geometry.polygon import Polygon
from shapely.geometry import asShape
from shapely import wkt
from zgeo.geographer.geo import GeoreferencingAnnotator
from interfaces import IWebFeatureServiceable,IWebFeatureService, IWFSGeoItem
from events import WFSGeoreferencedEvent, afterObjectCreated
import logging
logger = logging.getLogger('WFS')
class WFSGeoItem(GeoreferencingAnnotator):
""" A georeferenced object exposable through WFS
"""
implements(IWFSGeoItem)
def __init__(self, context):
"""Initialize adapter."""
self.context = context
GeoreferencingAnnotator.__init__(self, context)
self._geom = None
@property
def id(self):
return self.context.id
@property
def name(self):
return self.context.title_or_id()
@property
def featureType(self):
if hasattr(self.context, 'featureType'):
return self.context.featureType
if hasattr(self.context, 'getFeatureType'):
return self.context.getFeatureType()
return 'default'
@property
def uri(self):
return self.context.absolute_url()
@property
def geometry(self):
return self.getGeometry()
@property
def geometryAsWKT(self):
""" return geometry as WKT string
"""
if self.isGeoreferenced():
return self.getGeometry().wkt
else:
return None
@property
def description(self):
return getattr(self.context,'description', 'No description')
def getSRS(self):
# is it correct ??
srs=self.crs
if srs is None:
srs='EPSG:4326'
return srs
# def setSRS(self, srs):
# try:
# assert (srs.startswith('EPSG') or srs.find('proj') >= 0)
# except AssertionError:
# raise ValueError, \
# "%s is invalid. Spatial reference system definition must be in EPSG or PROJ.4 form" % (srs)
# self.georef['srs'] = srs
def getGeometry(self):
if self._geom is None:
self._geom = asShape(self.geo)
return self._geom
def setGeoInterface(self, type, coordinates, crs=None):
GeoreferencingAnnotator.setGeoInterface(self, type, coordinates, crs)
notify(WFSGeoreferencedEvent(self))
#notify(afterObjectCreated(self))
def setGeometryFromWKT(self, fromwkt):
geometry = wkt.loads(fromwkt)
type = geometry.type
if type=='Point':
coords=geometry.coords[0]
elif type=='Polygon':
coords=[list(geometry.exterior.coords)]
logger.info(coords)
else:
coords=list(geometry.coords)
self.setGeoInterface(type, coords)
# def setGeometry(self, geomtype=None, coords=None, geometry=None, fromwkt=None):
# if geomtype is None and coords is None and geometry is None and fromwkt is None:
# raise ValueError, "No parameter provided"
# if geomtype is not None and coords is not None:
# try:
# self._geom = asShape({'type': geomtype,
# 'coordinates': coords})
# except:
# raise ValueError, "geomtype and coords are inconsistent"
# elif geometry is not None:
# try:
# assert isinstance(geometry, BaseGeometry)
# self._geom = geometry
# except AssertionError:
# raise ValueError, "geometry is not a Shapely object"
# elif fromwkt is not None:
# try:
# self._geom = wkt.loads(fromwkt)
# except:
# raise ValueError, "wkt string is inconsistent"
# self.georef['geometryType'] = self._geom.geometryType()
# if self.georef['geometryType']=='Point':
# self.georef['spatialCoordinates'] = self._geom.coords
# elif self.georef['geometryType']=='Polygon':
# self.georef['spatialCoordinates'] = None
# else:
# self.georef['spatialCoordinates'] = list(self._geom.coords)
#
# notify(GeoreferencedEvent(self))
def isGeoreferenced(self):
"""Return True if the object is "on the map"."""
return self.coordinates is not None
def getGML(self):
""" return geometry as GML string
"""
if self.isGeoreferenced():
coords=self.coordinates
logger.info(str(coords))
#PART TO FACTORIZE WITH GETGMLBOUNDINGBOX (WFS.py)
bboxTuple=bboxAsTuple(self.getGeometry())
strbbox=str(bboxTuple[0])+','+str(bboxTuple[1])+' '+str(bboxTuple[2])+','+str(bboxTuple[3])
wfs=self.getWFSParent()
if self.type == 'Polygon':
outerCoords=coords[0]
outerPoints=[str(p[0])+","+str(p[1]) for p in outerCoords]
logger.info((" ").join(outerPoints))
gml = '<myns:'+self.featureType+' id="'+self.id+'">'
gml += '<gml:boundedBy> <gml:Box srsName="'+wfs.srs+'"> <gml:coordinates>'+strbbox+'</gml:coordinates> </gml:Box></gml:boundedBy>'
gml += '<myns:msGeometry><gml:'+self.type+' srsName="'+wfs.srs+'">'
gml += '<gml:outerBoundaryIs><gml:LinearRing><gml:coordinates>'+(" ").join(outerPoints)+'</gml:coordinates></gml:LinearRing></gml:outerBoundaryIs>'
gml += '</gml:'+self.type+'> </myns:msGeometry>'+self.getGMLElement()+'</myns:'+self.featureType+'>'
elif self.type == 'Point':
coords=[coords]
points=[str(p[0])+","+str(p[1]) for p in coords]
gml = '<myns:'+self.featureType+' id="'+self.id+'"> <gml:boundedBy> <gml:Box srsName="'+wfs.srs+'"> <gml:coordinates>'+strbbox+'</gml:coordinates> </gml:Box> </gml:boundedBy> <myns:msGeometry> <gml:'+self.type+' srsName="'+wfs.srs+'"> <gml:coordinates>'+(" ").join(points)+'</gml:coordinates> </gml:'+self.type+'> </myns:msGeometry>'+self.getGMLElement()+'</myns:'+self.featureType+'>'
else:
points=[str(p[0])+","+str(p[1]) for p in coords]
gml = '<myns:'+self.featureType+' id="'+self.id+'"> <gml:boundedBy> <gml:Box srsName="'+wfs.srs+'"> <gml:coordinates>'+strbbox+'</gml:coordinates> </gml:Box> </gml:boundedBy> <myns:msGeometry> <gml:'+self.type+' srsName="'+wfs.srs+'"> <gml:coordinates>'+(" ").join(points)+'</gml:coordinates> </gml:'+self.type+'> </myns:msGeometry>'+self.getGMLElement()+'</myns:'+self.featureType+'>'
return gml
else:
#No geometry for this object
return ''
def getGMLElement(self):
gml=''
wfs=self.getWFSParent()
for element in wfs.getElements(self.featureType):
attr=getattr(self, element)
if callable(attr):
attr=attr()
gml=gml+'<myns:'+element+'>'+str(attr)+'</myns:'+element+'>'
return gml
def __getattr__(self,name):
"""Overloads getattr to return context attibutes
"""
if hasattr(self.context, name):
return getattr(self.context, name)
elif hasattr(self.context, 'get'+name.capitalize()):
return getattr(self.context, 'get'+name.capitalize())()
# else:
# return self.__dict__[name]
def getWFSParent(self):
"""
"""
parents = self.context.aq_chain
isWFSenabled = False
for o in parents:
if IWebFeatureServiceable.providedBy(o):
isWFSenabled = True
break
if isWFSenabled:
return IWebFeatureService(o)
else:
return None
def bboxAsTuple(geometry):
""" return the geometry bbox as tuple
"""
envelope=geometry.envelope
if envelope.geometryType()=="Point":
x=envelope.coords[0][0]
y=envelope.coords[0][1]
return (x,y,x,y)
else:
return envelope.bounds
def bboxFromTuple(bbox_tuple):
coords = ((bbox_tuple[0], bbox_tuple[1]),
(bbox_tuple[0], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[1]))
return Polygon(coords) | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/geoitem.py | geoitem.py |
from zope.interface import implements
from shapely import wkt
from zgeo.geographer.interfaces import IGeoCollection
from geoitem import bboxAsTuple, bboxFromTuple
from interfaces import IWebFeatureService, IWebFeatureServiceable
from geocatalog.catalog import GeoCatalog
from Products.AdvancedQuery.AdvancedQuery import *
import re
import logging
logger = logging.getLogger('WFS')
class WebFeatureService(object):
"""An OGC Web Feature Service adapter
"""
implements(IWebFeatureService, IGeoCollection)
def __init__(self, context):
"""Initialize adapter."""
self.context = context
@property
def name(self):
return self.context.id
@property
def title(self):
return self.context.Title()
@property
def abstract(self):
if hasattr(self.context, 'getAbstract'):
return self.context.getAbstract()
else:
return ''
@property
def onlineresource(self):
return self.context.absolute_url()+"/wfs?"
@property
def srs(self):
if hasattr(self.context, 'getSrs'):
return self.context.getSrs()
else:
return 'EPSG:4326'
@property
def featuretypes(self):
return self.getFeatureTypesDict()
def getFeatureTypesDict(self):
"""
"""
if not hasattr(self.context, 'geoFeaturetypes'):
ft_dict={'default':
{'elements':
{'Title': 'string'},
'boundingbox':
None
}
}
self.context.geoFeaturetypes = ft_dict
return self.context.geoFeaturetypes
def updateFeatureTypesDict(self, ft_dict):
"""
"""
self.context.geoFeaturetypes = ft_dict
def getGeoCatalog(self):
if not hasattr(self.context, 'geoCatalog'):
cat=GeoCatalog('geoCatalog')
self.context._setObject('geoCatalog',cat)
return self.context.geoCatalog
def getElements(self,ftname):
ft_dict=self.getFeatureTypesDict()
if ftname in ft_dict:
return ft_dict[ftname]['elements'].keys()
return None
def getElementType(self,element):
""" return the element type from a feature type
"""
ft_dict=self.getFeatureTypesDict()
for ft in ft_dict:
Elements=self.getElements(ft)
if element in Elements:
return ft_dict[ft]['elements'][element]
return None
def stringConversionType(self,stringElement,type):
""" transform a stringElement in a type
"""
if type=='integer':
return int(stringElement)
elif type=='float':
return float(stringElement)
return stringElement
def getQuerySorting(self,sortList):
""" construct the parameter for sorting in AdvancedQuery from a list
(for example:Field1 D,Field2,Field3 D will return ((Field1,'desc'),(Field2,'asc'),(Field3','desc')))
"""
querySorting=[]
if sortList is not None:
for e in sortList:
if e.endswith(' D') is True:
# This element sorting is descending
querySorting.append((e.strip(' D'),'desc'))
else:
# This element sorting is ascending
if e.endswith(' A') is True:
querySorting.append((e.strip(' A'),'asc'))
else:
querySorting.append((e,'asc'))
return querySorting
########################
## IGeoCollection overwriting
########################
def geoItems(self):
"""Return georeferenced items in the container """
featuretypes=self.featuretypes.keys()
return [i.getObject() for i in self.getFeatureTypeItems(featuretypes)[0]]
def getBoundingBox(self):
"""Get the bounding box of contained items as a tuple
(minx, miny, maxx, maxy)."""
featuretypes=self.featuretypes.keys()
return bboxAsTuple(self.computeBoundingBoxListFT(featuretypes))
########################
## WFS processing
########################
def getFeatureTypeItems(self, ListFTname, bbox=None, filter=None, maxItems=None, sortBy=None):
errorMessage=''
items=[]
sortList=self.getQuerySorting(sortBy)
for i in sortList:
if i[0] not in self.getGeoCatalog().indexes():
errorMessage='ERROR: sortBy element does not exist'
if errorMessage=='':
if bbox==None and filter==None:
#getFeatureTypeItems without bbox and filter
items=self.getGeoCatalog().evalAdvancedQuery(Generic('featureType',ListFTname),sortList)
elif filter==None:
#getFeatureTypeItems with bbox
#items=self.getGeoCatalog().evalAdvancedQuery(Generic('geometry',{'query':BoundingBox(bbox[0], bbox[1], bbox[2], bbox[3]),'geometry_operator':'within'}))
advQuery=Generic('featureType',ListFTname)&Generic('geometry',{'query':bboxFromTuple(bbox),'geometry_operator':'intersects'})
items=self.getGeoCatalog().evalAdvancedQuery(advQuery,sortList)
elif bbox==None:
#getFeatureTypeItems with filter
List_Nodes=filter
List_ChildNodes=List_Nodes[0]._get_childNodes()
(advQuery, errorMessage)=self.buildCompleteQuery(List_ChildNodes,ListFTname)
items=self.getGeoCatalog().evalAdvancedQuery(advQuery,sortList)
elif bbox is not None and filter is not None:
#FILTER QUERY
List_Nodes=filter
List_ChildNodes=List_Nodes[0]._get_childNodes()
(advQuery, errorMessage)=self.buildCompleteQuery(List_ChildNodes,ListFTname)
if errorMessage=='':
#BBOX QUERY
#query['Advanced']=query['Advanced']&Generic('geometry',{'query':BoundingBox(bbox[0], bbox[1], bbox[2], bbox[3]),'geometry_operator':'within'})
advQuery=advQuery&Generic('geometry',{'query':bboxFromTuple(bbox),'geometry_operator':'within'})
items=self.getGeoCatalog().evalAdvancedQuery(advQuery,sortList)
if errorMessage=='' and maxItems is not None:
if len(items)<=maxItems:
return (items,errorMessage)
else:
#len(items)>maxItems
return (items[0:maxItems],errorMessage)
return (items,errorMessage)
def buildSimpleFilter(self,List_Nodes,filter):
""" build a Filter hmap from a nodes list provided by a xml document
and return an error message ('' if there is not error)
"""
ErrorMessage=''
logicalOperators=('AND','OR','NOT')
generalComparisonOperators=('PropertyIsEqualTo','PropertyIsNotEqualTo','PropertyIsLessThan','PropertyIsGreaterThan','PropertyIsLessThanOrEqualTo','PropertyIsGreaterThanOrEqualTo')
arithmeticOperators=('Add','Sub','Mul','Div')
numericTypes=('integer','float')
filter['ArithmeticOperator']=None
List_ChildNodes=List_Nodes._get_childNodes()
if List_Nodes.tagName not in logicalOperators:
# Simple Filter
filter['FilterType']=List_Nodes.tagName
if List_Nodes._get_localName() == 'FeatureId':
filter['FilterType'] = 'PropertyIsEqualTo'
filter['PropertyName'] = 'id'
filter['Literal'] = List_Nodes.getAttribute('fid')
logger.info(filter['PropertyName']+' = '+filter['Literal'])
elif List_Nodes.tagName=='PropertyIsNull':
if len(List_ChildNodes)==1:
theChildNode=List_ChildNodes[0]
if theChildNode.tagName=='PropertyName':
filter['PropertyName']=theChildNode.childNodes[0].nodeValue
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must only contain PropertyName tag'
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must only contain PropertyName tag'
elif List_Nodes.tagName=='PropertyIsBetween':
if len(List_ChildNodes)==3:
firstChildNode=List_ChildNodes[0]
secondChildNode=List_ChildNodes[1]
thirdChildNode=List_ChildNodes[2]
if firstChildNode.tagName=='PropertyName' and secondChildNode.tagName=='LowerBoundary' and thirdChildNode.tagName=='UpperBoundary':
filter['PropertyName']=firstChildNode.childNodes[0].nodeValue
filter['LowerBoundary']=secondChildNode.childNodes[0].nodeValue
filter['UpperBoundary']=thirdChildNode.childNodes[0].nodeValue
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must contain PropertyName, LowerBoundary and UpperBoundary tags'
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must contain PropertyName, LowerBoundary and UpperBoundary tags'
elif List_Nodes.tagName in generalComparisonOperators:
if len(List_ChildNodes)==2:
#Retrieval of PropertyName and Literal Tags
firstChildNode=List_ChildNodes[0]
secondChildNode=List_ChildNodes[1]
if str(firstChildNode.tagName)=='PropertyName' and str(secondChildNode.tagName)=='Literal':
filter['PropertyName']=firstChildNode.childNodes[0].nodeValue
filter['Literal']=secondChildNode.childNodes[0].nodeValue
elif str(firstChildNode.tagName)=='PropertyName' and str(secondChildNode.tagName) in arithmeticOperators:
filter['PropertyNameA']=firstChildNode.childNodes[0].nodeValue
filter['ArithmeticOperator']=str(secondChildNode.tagName)
littleChildNodes=secondChildNode._get_childNodes()
filter['PropertyNameB']=littleChildNodes[0].childNodes[0].nodeValue
filter['Literal']=littleChildNodes[1].childNodes[0].nodeValue
if filter['PropertyNameA']==filter['PropertyNameB']:
ErrorMessage='ERROR : ARITHMETIC CONSTRUCTION, The same element is used twice'
if self.getElementType(filter['PropertyNameA']) not in numericTypes or self.getElementType(filter['PropertyNameB']) not in numericTypes:
ErrorMessage='ERROR : ELEMENT TYPE, It is not possible to compare these elements (not numeric type)'
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must contain PropertyName and Literal tags or PropertyName and Arithmetic tags'
else:
ErrorMessage='ERROR : TAG CONSTRUCTION, This comparison operator must contain PropertyName and Literal tags'
else :
ErrorMessage='The accepted comparison operators are : PropertyIsEqualTo, PropertyIsNotEqualTo, PropertyIsLessThan, PropertyIsGreaterThan, PropertyIsLessThanOrEqualTo, PropertyIsGreaterThanOrEqualTo, PropertyIsNull, PropertyIsBetween'
return ErrorMessage
def applyComparisonOperator(self,filter,ListFTname):
""" return a query built from a simple filter (Hmap) with comparison operator
"""
query=None
filterType=filter['FilterType']
if filter['ArithmeticOperator']==None:
# Only one feature type is considered
element=filter['PropertyName']
if filterType=='PropertyIsEqualTo':
query=Eq(str(element),self.stringConversionType(filter['Literal'],self.getElementType(element)))
elif filterType=='PropertyIsLessThanOrEqualTo':
maxValue=self.stringConversionType(filter['Literal'],self.getElementType(element))
query=Le(str(element),maxValue)
elif filterType=='PropertyIsGreaterThanOrEqualTo':
minValue=self.stringConversionType(filter['Literal'],self.getElementType(element))
query=Ge(str(element),minValue)
elif filterType=='PropertyIsBetween':
minValue=self.stringConversionType(filter['LowerBoundary'],self.getElementType(element))
maxValue=self.stringConversionType(filter['UpperBoundary'],self.getElementType(element))
query=Between(str(element),minValue,maxValue)
elif filterType=='PropertyIsNull':
query=Eq(str(element),None)
elif filterType=='PropertyIsNotEqualTo':
query=~Eq(str(element),self.stringConversionType(filter['Literal'],self.getElementType(element)))
elif filterType=='PropertyIsLessThan':
maxValue=self.stringConversionType(filter['Literal'],self.getElementType(element))
query=~Eq(str(element),maxValue)&Le(str(element),maxValue)
elif filterType=='PropertyIsGreaterThan':
minValue=self.stringConversionType(filter['Literal'],self.getElementType(element))
query=~Eq(str(element),minValue)&Ge(str(element),minValue)
else:
# Comparison and arithmetic operators
query=self.applyArithmeticOperator(filter,ListFTname)
return query
def applyArithmeticOperator(self,filter,ListFTname):
""" return a query built from a simple filter (Hmap) with comparison and arithmetic operators such as
filter['PropertyNameA'] COMPARISON_OPERATOR filter['PropertyNameB'] ARITHMETIC_OPERATOR filter['Literal']
"""
query=None
# Operator Determination
if filter['ArithmeticOperator']=='Add':
op="+"
elif filter['ArithmeticOperator']=='Sub':
op="-"
elif filter['ArithmeticOperator']=='Mul':
op="*"
elif filter['ArithmeticOperator']=='Div':
op="/"
valB=[getattr(o,filter['PropertyNameB']) for o in self.getFeatureTypeItems(ListFTname)[0]]
l=[(i,eval('i'+op+"self.stringConversionType(filter['Literal'],self.getElementType(filter['PropertyNameB']))")) for i in valB]
queryComp=[]
for v in l:
filterComp={}
filterComp['ArithmeticOperator']=None
filterComp['FilterType']=filter['FilterType']
filterComp['PropertyName']=filter['PropertyNameA']
filterComp['Literal']=v[1]
queryComp.append(Eq(filter['PropertyNameB'],v[0])& self.applyComparisonOperator(filterComp,ListFTname))
query=Or(*[q for q in queryComp])
return query
def buildQuery(self,ANode,ListFTname):
""" return the query built from a nodes List, provided by a xml document, containing only ONE logical operator
"""
advanced=None
op=None
FilterError=''
if ANode.tagName=='AND':
op="And"
elif ANode.tagName=='OR':
op="Or"
if op=="And" or op=="Or":
List_ChildNodes=ANode._get_childNodes()
queryComp=[]
for n in List_ChildNodes:
filter={}
FilterError=self.buildSimpleFilter(n, filter)
if FilterError=='':
queryComp.append(self.applyComparisonOperator(filter,ListFTname))
advanced=eval(op+'(*[q for q in queryComp])')
elif ANode.tagName=='NOT':
List_ChildNodes=ANode._get_childNodes()
for n in List_ChildNodes:
filter={}
FilterError=self.buildSimpleFilter(n, filter)
if FilterError=='':
simpleQuery=self.applyComparisonOperator(filter,ListFTname)
if advanced==None:
advanced=~simpleQuery
else:
advanced=advanced&~simpleQuery
else:
# NO LOGICAL OPERATOR
filter={}
FilterError=self.buildSimpleFilter(ANode, filter)
if FilterError=='':
advanced=self.applyComparisonOperator(filter,ListFTname)
return (advanced, FilterError)
def buildCompleteQuery(self,List_ChildNodes,ListFTname):
""" return the complete query built from a nodes List provided by a xml document
"""
error=''
query=None
logicalOperators=('AND','OR','NOT')
if List_ChildNodes[0].tagName not in logicalOperators:
# the filter doesn't contain logical operators
(filterQuery,error)=self.buildQuery(List_ChildNodes[0],ListFTname)
else:
# the filter contains at least one logical operator
List_LittleChildNodes=List_ChildNodes[0]._get_childNodes()
childTag=[i.tagName for i in List_LittleChildNodes]
childLogicalTag=[t for t in childTag if t in logicalOperators]
if childLogicalTag==[]:
# Only one logical operator
(filterQuery,error)=self.buildQuery(List_ChildNodes[0],ListFTname)
else:
# Several logical operators
queryComp=None
# principal logical operator
if List_ChildNodes[0].tagName=='AND':
principalOp="&"
(filterQuery,error)=self.buildQuery(List_LittleChildNodes[0], ListFTname)
elif List_ChildNodes[0].tagName=='OR':
principalOp="|"
(filterQuery,error)=self.buildQuery(List_LittleChildNodes[0], ListFTname)
elif List_ChildNodes[0].tagName=='NOT':
principalOp="&~"
(filterQuery,error)=self.buildQuery(List_LittleChildNodes[0], ListFTname)
if error=='':
filterQuery=~filterQuery
if error=='':
for n in List_LittleChildNodes[1:len(List_LittleChildNodes)]:
(queryComp,error)=self.buildQuery(n, ListFTname)
if error=='':
filterQuery=eval('filterQuery'+principalOp+'queryComp')
if error=='':
query=Or(*[Eq('featureType',str(ft)) for ft in ListFTname])&filterQuery
return (query,error)
########################
## Spatial processing
########################
def getFeatureTypeBoundingBox(self, ftname):
if ftname in self.featuretypes:
if self.featuretypes[ftname]['boundingbox'] is None:
return None
else:
t = self.featuretypes[ftname]['boundingbox']
if len(t)==2:
return bboxFromTuple((t[0], t[1], t[0], t[1]))
if len(t)==4:
return bboxFromTuple(t)
return None
def computeFeatureTypeBoundingBox(self, ftname):
""" compute entirely the BBOX
"""
(items, error) = self.getFeatureTypeItems(ftname)
ft_dict=self.getFeatureTypesDict()
if ftname in ft_dict:
bbox = self.computeBoundingBox(items)
if bbox is None:
ft_dict[ftname]['boundingbox'] = None
else:
ft_dict[ftname]['boundingbox'] = bboxAsTuple(bbox)
self.updateFeatureTypesDict(ft_dict)
def refreshFeatureTypeBoundingBox(self, ftname, geom):
""" update BBOX if geom not contained
"""
if geom is not None:
ft_dict=self.getFeatureTypesDict()
if ftname in ft_dict:
bbox = self.getFeatureTypeBoundingBox(ftname)
if bbox is None:
#bbox = geom.envelope()
bbox = geom.envelope
elif geom.within(bbox)==0:
#bbox = bbox.union(geom).envelope()
bbox = bbox.union(geom).envelope
ft_dict[ftname]['boundingbox'] = bboxAsTuple(bbox)
self.updateFeatureTypesDict(ft_dict)
def computeBoundingBox(self, items):
bbox = None
for i in items:
if hasattr(i, 'geometryAsWKT') and getattr(i, 'geometryAsWKT') is not None:
#geom = i.geometry
geom = wkt.loads(i.geometryAsWKT)
if geom is not None:
if bbox is None:
#bbox = geom.envelope()
bbox = geom.envelope
else:
if geom.within(bbox)==0:
#bbox = bbox.union(geom).envelope()
bbox = bbox.union(geom.envelope).envelope
return bbox
def computeBoundingBoxListFT(self, ListFTname):
"""
"""
total_bbox=None
for FT in ListFTname:
bbox=self.getFeatureTypeBoundingBox(FT)
if total_bbox is None:
total_bbox = bbox
else:
if bbox is not None:
total_bbox=total_bbox.union(bbox).envelope
return total_bbox
########################
## Configuration management
########################
def addFeatureType(self, ftname):
"""
"""
ft_dict=self.getFeatureTypesDict()
if not ftname in ft_dict:
ft_dict[ftname] = {'elements':
{'Title': 'string'},
'boundingbox':
None
}
self.updateFeatureTypesDict(ft_dict)
def removeFeatureTypes(self, ft_list):
"""
"""
ft_dict=self.getFeatureTypesDict()
for ftname in ft_list:
if ftname in ft_dict:
del ft_dict[ftname]
self.updateFeatureTypesDict(ft_dict)
def addElementToFeatureType(self, ftname, elem, type):
"""
"""
ft_dict=self.getFeatureTypesDict()
if ftname in ft_dict:
elements = ft_dict[ftname]['elements']
elements[elem]=type
ft_dict[ftname]['elements']=elements
self.updateFeatureTypesDict(ft_dict)
self.getGeoCatalog().declareFTElement(elem, type)
def removeElementsFromFeatureType(self, ftname, elem_list):
"""
"""
ft_dict=self.getFeatureTypesDict()
# collect all elements from other FT to check if catalog index and columns can be removed
otherelements = []
for f in ft_dict.keys():
if f != ftname:
for e in ft_dict[f]['elements'].keys():
otherelements.append(e)
if ftname in ft_dict:
elements = ft_dict[ftname]['elements']
for elem in elem_list:
if elem in elements:
del elements[elem]
if not elem in otherelements:
self.getGeoCatalog().removeFTElement(elem)
ft_dict[ftname]['elements']=elements
self.updateFeatureTypesDict(ft_dict) | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/webfeatureservice.py | webfeatureservice.py |
from zope.interface import implements
from Products.ZCatalog.Catalog import CatalogError
from interfaces import IWebFeatureService, IWebFeatureServiceable, IWFSGeoreferencedEvent
from interfaces import IWFSGeoItem
import logging
logger = logging.getLogger('WFS')
class WFSGeoreferencedEvent(object):
"""Event to notify that object has been georeferenced.
"""
implements(IWFSGeoreferencedEvent)
def __init__(self, context):
self.context = context
def afterObjectCreated(obj, event):
"""
"""
geoitem = IWFSGeoItem(obj)
wfs = geoitem.getWFSParent()
if wfs is not None:
cat = wfs.getGeoCatalog()
#cat.catalog_object(geoitem, obj.absolute_url_path())
cat.catalog_object(geoitem.context, '/'.join(obj.getPhysicalPath()))
if geoitem.isGeoreferenced():
wfs.refreshFeatureTypeBoundingBox(geoitem.featureType, geoitem.getGeometry())
def afterObjectModified(obj, event):
"""
"""
geoitem = IWFSGeoItem(obj)
wfs = geoitem.getWFSParent()
if wfs is not None:
cat = wfs.getGeoCatalog()
#cat.catalog_object(geoitem,obj.absolute_url_path())
cat.catalog_object(geoitem.context,'/'.join(obj.getPhysicalPath()))
wfs.computeFeatureTypeBoundingBox(geoitem.featureType)
def afterGeometryModified(event):
"""
"""
geoitem=event.context
wfs = geoitem.getWFSParent()
if wfs is not None:
cat = wfs.getGeoCatalog()
#cat.catalog_object(geoitem,obj.absolute_url_path())
#cat.catalog_object(geoitem,'/'.join(geoitem.context.getPhysicalPath()))
cat.catalog_object(geoitem.context,'/'.join(geoitem.context.getPhysicalPath()))
wfs.computeFeatureTypeBoundingBox(geoitem.featureType)
def beforeObjectRemoved(obj, event):
"""
"""
geoitem = IWFSGeoItem(obj)
wfs = geoitem.getWFSParent()
if wfs is not None:
try:
cat = wfs.getGeoCatalog()
#cat.uncatalog_object(obj.asolute_url_path())
uid = '/'.join(obj.getPhysicalPath())
if cat.getrid(uid):
cat.uncatalog_object(uid)
wfs.computeFeatureTypeBoundingBox(geoitem.featureType)
except:
logger.info("cannot remove") | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/events.py | events.py |
from zope.lifecycleevent import ObjectModifiedEvent
from zope.event import notify
from xml.dom import minidom
from xml.dom.minidom import getDOMImplementation
from Products.Five import BrowserView
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
#from Products.Archetypes.utils import make_uuid
from zgeo.wfs.geoitem import bboxAsTuple
from zgeo.wfs.interfaces import IWebFeatureService, IWFSGeoItem
from zgeo.wfs.events import WFSGeoreferencedEvent
import logging
logger = logging.getLogger('WFSView')
WFS_NS = 'http://www.opengis.net/wfs'
GML_NS = 'http://www.opengis.net/gml'
OGC_NS = 'http://www.opengis.net/ogc'
class WebFeatureServiceView(BrowserView):
""" View on an object to get WFS services
"""
capabilities = ViewPageTemplateFile('wfs_capabilities.xml')
describefeaturetype = ViewPageTemplateFile('wfs_featuretype.xml')
getfeature = ViewPageTemplateFile('wfs_feature.xml')
config_screen = ViewPageTemplateFile('editFeatureTypes.pt')
error_message = ViewPageTemplateFile('error.xml')
@property
def name(self):
adapted = IWebFeatureService(self.context)
return adapted.name
@property
def title(self):
adapted = IWebFeatureService(self.context)
return adapted.title
@property
def abstract(self):
adapted = IWebFeatureService(self.context)
return adapted.abstract
@property
def onlineresource(self):
adapted = IWebFeatureService(self.context)
return adapted.onlineresource
@property
def srs(self):
adapted = IWebFeatureService(self.context)
return adapted.srs
########################
## WFS requests handling
########################
def wfs(self):
""" wfs service view
"""
adapted = IWebFeatureService(self.context)
# get parameters from querystring
# (handles case insensitive issues)
self.readParams()
# WFS request is contained in a querysting parameter named REQUEST
req = self.getParam('REQUEST', '').lower()
postBody = self.request.get('BODY')
logger.info('wfs request : '+req)
logger.info('wfs body : ')
logger.info(postBody)
# Request Type
if req == 'getcapabilities':
self.request.response.setHeader('Content-type','text/xml;charset=utf-8')
return self.capabilities()
elif req == 'describefeaturetype':
self.request.response.setHeader('Content-type','text/xml;charset=utf-8')
return self.describefeaturetype()
elif req=='getfeature':
self.request.response.setHeader('Content-type','text/xml;charset=utf-8')
strftname=self.getParam('typename')
if strftname is None:
return self.reportError('typename field is mandatory')
else:
List_ftname=strftname.split(',')
for ft in List_ftname:
if ft not in self.getFeaturetypesInfo():
return self.reportError('This typename is not managed by this service')
error=''
# Request Parameters
strbbox=self.getParam('BBOX')
strfilter=self.getParam('FILTER')
strmaxFeatures=self.getParam('MAXFEATURES')
strsortBy=self.getParam('SORTBY')
#MAXFEATURES
if strmaxFeatures is not None:
maxFeatures=int(strmaxFeatures)
else:
maxFeatures=None
#SORTBY
if strsortBy is not None:
sortBy=strsortBy.split(',')
else:
sortBy=None
if strbbox is None and strfilter is None:
#provide all items of Feature Types belonging to List_ftname
(results,error) = adapted.getFeatureTypeItems(List_ftname,None,None,maxFeatures,sortBy)
if error is '':
if len(List_ftname)==1:
boundingbox=adapted.getFeatureTypeBoundingBox(List_ftname[0])
else:
boundingbox=adapted.computeBoundingBoxListFT(List_ftname)
else:
return self.reportError(error)
elif strfilter is None:
#provide items of Feature Types belonging to List_ftname inside the BBOX
bbox=map(float,strbbox.split(','))
if len(bbox)!=4:
return self.reportError('BBOX must contain 4 float numbers')
(results,error) = adapted.getFeatureTypeItems(List_ftname, bbox,None,maxFeatures,sortBy)
boundingbox=adapted.computeBoundingBox(results)
elif strbbox is None:
#provide items of Feature Types belonging to List_ftname respecting the filter
xmldom=minidom.parseString(strfilter)
List_Nodes=xmldom.getElementsByTagName('Filter')
if len(List_Nodes)!=1:
return self.reportError('FILTER Expression Not Well-Formed')
(results,error)=adapted.getFeatureTypeItems(List_ftname,None,List_Nodes,maxFeatures,sortBy)
if error is '':
boundingbox=adapted.computeBoundingBox(results)
else:
return self.reportError(error)
elif strbbox is not None and strfilter is not None:
xmldom=minidom.parseString(strfilter)
List_Nodes=xmldom.getElementsByTagName('Filter')
bbox=map(float,strbbox.split(','))
if len(bbox)!=4:
return self.reportError('BBOX must contain 4 float numbers')
elif len(List_Nodes)!=1:
return self.reportError('FILTER Expression Not Well-Formed')
(results,error)=adapted.getFeatureTypeItems(List_ftname,bbox,List_Nodes,maxFeatures,sortBy)
if error is '':
boundingbox=adapted.computeBoundingBox(results)
else:
return self.reportError(error)
return self.getfeature(items=results,bbox=boundingbox)
elif postBody is not None:
xml = minidom.parseString(postBody)
childs = xml._get_childNodes()
root = childs[0]
req = root._get_localName().lower()
if req == 'getcapabilities':
self.request.response.setHeader('Content-type','text/xml;charset=utf-8')
return self.capabilities()
elif req == 'transaction':
self.request.response.setHeader('Content-type','text/xml;charset=utf-8')
return self.transaction(root)
# if no REQUEST, assume getcapabilities
return self.capabilities()
def getParam(self,param,default=None):
"""
"""
return self._caseinsensitive.get(param.lower(), default)
def readParams(self):
"""(from Kai Lautaportti Primagis WMS implementation)
"""
logger.info('READPARAM')
self._caseinsensitive = {}
for key in self.request.form.keys():
logger.info('Key : '+key)
self._caseinsensitive[key.lower()] = self.request.form.get(key)
logger.info('Value : '+self.request.form.get(key))
logger.info('Caseinsensitive : '+self._caseinsensitive[key.lower()])
def getGMLFeatureTypeBoundingBox(self,ftname):
""" return Feature Type Bounding Box as GML string
"""
adapted = IWebFeatureService(self.context)
bbox=adapted.getFeatureTypeBoundingBox(ftname)
if bbox is None:
return ''
else:
return self.getGMLBoundingBox(bbox)
def getGMLBoundingBox(self,bbox):
""" return Bounding Box bbox as GML string
"""
if bbox is not None:
bboxTuple=bboxAsTuple(bbox)
strbbox=str(bboxTuple[0])+','+str(bboxTuple[1])+' '+str(bboxTuple[2])+','+str(bboxTuple[3])
gml='<gml:Box srsName="'+self.srs+'"> <gml:coordinates>'+strbbox+'</gml:coordinates> </gml:Box>'
return gml
def reportError(self, message):
"""
"""
return self.error_message(msg=message)
def getTupleBoundingBoxFeatureType(self,ftname):
""" return feature type bounding box as a tuple
"""
adapted = IWebFeatureService(self.context)
bbox=adapted.getFeatureTypeBoundingBox(ftname)
if bbox is None:
return [-180,-90,180,90]
else:
return bboxAsTuple(bbox)
########################
## Configuration management screen
########################
def __call__(self):
action = self.request.get('ACTION')
if action=='ADD_FT':
self.manage_addFeatureType()
if action=='REMOVE_FT':
self.manage_removeFeatureTypes()
if action=='ADD_ELEMENT':
self.manage_addElementToFeatureType()
if action=='REMOVE_ELEMENT':
self.manage_removeElementsFromFeatureType()
return self.config_screen()
def getFeaturetypesInfo(self):
adapted = IWebFeatureService(self.context)
return adapted.featuretypes
def transaction(self, transactionNode):
""" implements the 'transaction' operation
"""
lockIdNodes = transactionNode.getElementsByTagNameNS(WFS_NS, 'LockId')
insertNodes = transactionNode.getElementsByTagNameNS(WFS_NS, 'Insert')
updateNodes = transactionNode.getElementsByTagNameNS(WFS_NS, 'Update')
deleteNodes = transactionNode.getElementsByTagNameNS(WFS_NS, 'Delete')
alterations = {}
alterations['inserted'] = []
alterations['updated'] = []
alterations['deleted'] = []
for n in insertNodes:
# getting idgen attribute (GenerateNew|UseExisting|ReplaceDuplicate)
idgen = 'GenerateNew'
if n.hasAttribute('idgen'):
idgen = n.attributes['idgen']
features = n._get_childNodes()
alterations['inserted'].append(self.insertFeaturesGML(idgen, features))
for n in updateNodes:
if n.hasAttribute('typeName'):
typename = n.attributes['typeName'].value
# TODO: test typeName value
propertyNodes = n.getElementsByTagNameNS(WFS_NS, 'Property')
filterNodes = n.getElementsByTagNameNS(OGC_NS, 'Filter')
alterations['updated'].append(self.updateFeaturesGML(typename, propertyNodes, filterNodes))
for n in deleteNodes:
if n.hasAttribute('typeName'):
typename = n.attributes['typeName'].value
# TODO: test typeName value
filterNodes = n.getElementsByTagNameNS(OGC_NS, 'Filter')
alterations['deleted'].append(self.deleteFeaturesGML(typename, filterNodes))
countInserted = 0
countUpdated = 0
countDeleted = 0
for inserted in alterations['inserted']:
countInserted += len(inserted)
for updated in alterations['updated']:
countUpdated += len(updated)
for deleted in alterations['deleted']:
countDeleted += len(deleted)
impl = getDOMImplementation()
doc = impl.createDocument(WFS_NS, "wfs:TransactionResponse", None)
root = doc.documentElement
root.setAttribute("xmlns:wfs", WFS_NS)
root.setAttribute("xmlns:ogc", OGC_NS)
root.setAttribute("version", "1.1.0")
summaryNode = doc.createElementNS(WFS_NS, 'wfs:TransactionSummary')
totalInsertedNode = doc.createElementNS(WFS_NS, 'wfs:totalInserted')
totalInsertedNode.appendChild(doc.createTextNode(str(countInserted)))
totalUpdatedNode = doc.createElementNS(WFS_NS, 'wfs:totalUpdated')
totalUpdatedNode.appendChild(doc.createTextNode(str(countUpdated)))
totalDeletedNode = doc.createElementNS(WFS_NS, 'wfs:totalDeleted')
totalDeletedNode.appendChild(doc.createTextNode(str(countDeleted)))
summaryNode.appendChild(totalInsertedNode)
summaryNode.appendChild(totalUpdatedNode)
summaryNode.appendChild(totalDeletedNode)
root.appendChild(summaryNode)
if countInserted > 0:
insertResultNode = doc.createElementNS(WFS_NS, 'wfs:InsertResults')
for inserted in alterations['inserted']:
for fid in inserted:
featureNode = doc.createElementNS(WFS_NS, 'wfs:Feature')
featureIdNode = doc.createElementNS(OGC_NS, 'ogc:FeatureId')
featureIdNode.setAttribute ('fid', fid)
featureNode.appendChild(featureIdNode)
insertResultNode.appendChild(featureNode)
root.appendChild(insertResultNode)
response = doc.toprettyxml()
logger.info(response)
return response
def insertFeaturesGML(self, idgen, featureNodes):
""" insert features (insert node from transaction operation)
"""
adapted = IWebFeatureService(self.context)
inserted = []
for n in featureNodes:
typeName = n._get_localName()
if idgen == 'UseExisting':
fid = n.getAttributeNS(GML_NS, 'id')
obj = getattr(self.context, fid)
# exception if exists
if obj is not None:
return self.reportError('Feature '+fid+' already exists')
elif idgen == 'ReplaceDuplicate':
fid = n.getAttributeNS(GML_NS, 'id')
obj = getattr(self.context, fid)
# no exception if exists
else:
#fid = self.context.invokeFactory('WFSDocument', id=make_uuid(), Title='new Feature')
fid = self.context.addWFSFeature()
obj = getattr(self.context, fid)
obj.featureType = typeName
propertieNodes = n.childNodes
for propertieNode in propertieNodes:
if propertieNode._get_localName() == 'msGeometry':
wkt = self.getWKTFromGML(propertieNode.firstChild)
logger.info('insert '+wkt)
geoObj = IWFSGeoItem(obj)
geoObj.setGeometryFromWKT(wkt)
else:
propertyName = propertieNode._get_localName()
if len(propertieNode.childNodes) > 0:
propertyValue = propertieNode.childNodes[0].nodeValue
if hasattr(obj, 'set'+propertyName.capitalize()):
logger.info('set'+propertyName.capitalize() + "(" + propertyValue+")")
getattr(obj, 'set'+propertyName.capitalize())(propertyValue)
inserted.append(fid)
# reindex
obj.reindexObject()
geoObj = IWFSGeoItem(obj)
notify(WFSGeoreferencedEvent(geoObj))
return inserted
def updateFeaturesGML(self, typename, propertyNodes, filterNodes):
""" update features (insert node from transaction operation)
"""
adapted = IWebFeatureService(self.context)
if len(filterNodes)!=1:
return self.reportError('FILTER Expression Not Well-Formed')
updated = []
List_ftname = [typename]
(results,error)=adapted.getFeatureTypeItems(List_ftname, None, filterNodes)
if error is not '':
return self.reportError(error)
for propertyNode in propertyNodes:
# get property name
propertyNameNode = propertyNode.getElementsByTagNameNS(WFS_NS, 'Name')[0]
propertyValueNode = propertyNode.getElementsByTagNameNS(WFS_NS, 'Value')[0]
propertyName = propertyNameNode.childNodes[0].nodeValue
logger.info('property '+propertyName.capitalize())
for result in results:
obj = result.getObject()
geoObj = IWFSGeoItem(obj)
fUpdated = False
# update geometry
if propertyName == 'msGeometry':
geomNode = propertyValueNode.firstChild
wkt = self.getWKTFromGML(geomNode)
if wkt is not None:
geoObj.setGeometryFromWKT(wkt)
fUpdated = True
if propertyName == 'Title':
propertyValue = propertyValueNode.childNodes[0].nodeValue
obj.setTitle(propertyValue)
fUpdated = True
else:
propertyValue = propertyValueNode.childNodes[0].nodeValue
if hasattr(obj, 'set'+propertyName.capitalize()) and propertyValue is not None:
getattr(obj, 'set'+propertyName.capitalize())(propertyValue)
fUpdated = True
if fUpdated and obj.id not in updated:
logger.info("update "+obj.id)
updated.append(obj.id)
# reindex
for result in results:
obj = result.getObject()
geoObj = IWFSGeoItem(obj)
obj.reindexObject()
notify(WFSGeoreferencedEvent(geoObj))
return updated
def deleteFeaturesGML(self, typename, filterNodes):
""" delete features (insert node from transaction operation)
"""
adapted = IWebFeatureService(self.context)
if len(filterNodes)!=1:
return self.reportError('FILTER Expression Not Well-Formed')
deleted = []
List_ftname = [typename]
(results,error)=adapted.getFeatureTypeItems(List_ftname, None, filterNodes)
if error is not '':
return self.reportError(error)
for result in results:
obj = result.getObject()
geoObj = IWFSGeoItem(obj)
geoObj.setGeoInterface(None, None, None)
deleted.append(obj.id)
# reindex
obj.reindexObject()
notify(WFSGeoreferencedEvent(geoObj))
return deleted
def getWKTFromGML(self, geomNode):
"""
"""
gmlType = geomNode._get_localName().lower()
#multipointNodes = featureNode.getElementsByTagNameNS(GML_NS, 'MultiPoint')
#pointNodes = featureNode.getElementsByTagNameNS(GML_NS, 'Point')
coords = None
if gmlType == 'multipoint':
coordinates = geomNode.getElementsByTagNameNS(GML_NS, 'coordinates')
coords = ''
for coordinate in coordinates:
value = self.getWKTCoordinateFromGML(coordinate)
if (coords != ''):
coords += ', '+value
else:
coords = value
coords = 'MULTIPOINT('+coords+')'
elif gmlType == 'point':
coordinates = geomNode.getElementsByTagNameNS(GML_NS, 'coordinates')
coordinate = coordinates[0]
coords = self.getWKTCoordinateFromGML(coordinate)
coords = 'POINT('+coords+')'
elif gmlType == 'linestring':
coordinates = geomNode.getElementsByTagNameNS(GML_NS, 'coordinates')
coordinate = coordinates[0]
coords = self.getWKTCoordinateFromGML(coordinate)
coords = 'LINESTRING('+coords+')'
elif gmlType == 'polygon':
coords = ''
outer = geomNode.getElementsByTagNameNS(GML_NS, 'outerBoundaryIs')[0]
coordinates = outer.getElementsByTagNameNS(GML_NS, 'coordinates')
for coordinate in coordinates:
value = '('+self.getWKTCoordinateFromGML(coordinate)+')'
if (coords != ''):
coords += ', '+value
else:
coords = value
inners = geomNode.getElementsByTagNameNS(GML_NS, 'innerBoundaryIs')
for inner in inners:
coordinates = inner.getElementsByTagNameNS(GML_NS, 'coordinates')
for coordinate in coordinates:
value = '('+self.getWKTCoordinateFromGML(coordinate)+')'
if (coords != ''):
coords += ', '+value
else:
coords = value
coords = 'POLYGON('+coords+')'
return coords
def getWKTCoordinateFromGML(self, coordinateNode):
decimal = '.'
cs = ','
ts = ' '
if coordinateNode.hasAttribute('decimal'):
decimal = coordinateNode.attributes['decimal'].value
if coordinateNode.hasAttribute('cs'):
cs = coordinateNode.attributes['cs'].value
if coordinateNode.hasAttribute('ts'):
ts = coordinateNode.attributes['ts'].value
value = coordinateNode.childNodes[0].nodeValue
return value.replace(ts, '*').replace(cs, ' ').replace('*', ',').replace(decimal, '.')
def getFeatureInfo(self):
"""
"""
info = ""
if hasattr(self.context, "getWFSFeatureInfo"):
info = self.context.getWFSFeatureInfo(self.request.get('fid'))
return info
def manage_addFeatureType(self):
"""
"""
ft=self.request.get('NewFT')
IWebFeatureService(self.context).addFeatureType(ft)
def manage_removeFeatureTypes(self):
"""
"""
ft_list=self.request.get('FTsToRemove')
if type(ft_list)==str:
ft_list=[ft_list]
IWebFeatureService(self.context).removeFeatureTypes(ft_list)
def manage_addElementToFeatureType(self):
"""
"""
ft=self.request.get('FT')
elem=self.request.get('NewElem')
type=self.request.get('NewType')
IWebFeatureService(self.context).addElementToFeatureType(ft, elem, type)
def manage_removeElementsFromFeatureType(self):
"""
"""
ft=self.request.get('FT')
elem_list=self.request.get('ElemToRemove')
if type(elem_list)==str:
elem_list=[elem_list]
IWebFeatureService(self.context).removeElementsFromFeatureType(ft, elem_list) | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/browser/wfs.py | wfs.py |
from Products.ZCatalog.ZCatalog import ZCatalog
from Products.ZCatalog.Catalog import CatalogError
from Products.ZCatalog.ZCatalog import Catalog
from Missing import MV
from Products.PluginIndexes.FieldIndex.FieldIndex import FieldIndex
from Products.CMFCore.ActionProviderBase import ActionProviderBase
from Products.CMFCore.utils import UniqueObject
from zgeo.wfs.geocatalog.geometryindex import GeometryIndex
from zgeo.wfs.geocatalog.geofeatureindex import GeoFeatureIndex
from zgeo.wfs.interfaces import IWFSGeoItem
class GeoCatalog(UniqueObject, ZCatalog, ActionProviderBase):
""" ZCatalog to index all the geo items
"""
def __init__(self,oid,**kw):
"""
"""
ZCatalog.__init__(self, oid)
self._catalog = GeoGMLCatalog()
# indexes creation
self._catalog.addIndex('featureType',GeoFeatureIndex('featureType'))
self._catalog.addIndex('name',GeoFeatureIndex('name'))
self._catalog.addIndex('geometry',GeometryIndex('geometry'))
self.addIndex('id','FieldIndex')
#metadata creation
self.addColumn('getGML')
self.addColumn('geometryAsWKT')
self.addColumn('Title')
self.addColumn('name')
def declareFTElement(self, elementname, elementtype):
"""
"""
# elementtype is useless at the moment, but will be used to choose the index type
try:
self.addIndex(elementname,'FieldIndex')
self.addColumn(elementname)
self.refreshCatalog()
except CatalogError:
# already exists
pass
def removeFTElement(self, elementname):
"""
"""
try:
self.delIndex(elementname)
self.delColumn(elementname)
self.refreshCatalog()
except CatalogError:
# doesn't exist
pass
try:
from DocumentTemplate.cDocumentTemplate import safe_callable
except ImportError:
# Fallback to python implementation to avoid dependancy on DocumentTemplate
def safe_callable(ob):
# Works with ExtensionClasses and Acquisition.
if hasattr(ob, '__class__'):
return hasattr(ob, '__call__') or isinstance(ob, types.ClassType)
else:
return callable(ob)
class GeoGMLCatalog(Catalog):
"""(just overloads recordify method)
"""
def recordify(self, object):
""" turns an object into a record tuple """
geoitem=IWFSGeoItem(object)
#geoitem=object
record = []
# the unique id is allways the first element
for x in self.names:
if hasattr(geoitem, x):
attr=getattr(geoitem, x, MV)
else:
attr=getattr(object, x, MV)
if(attr is not MV and safe_callable(attr)): attr=attr()
record.append(attr)
return tuple(record) | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/geocatalog/catalog.py | catalog.py |
from OFS.SimpleItem import SimpleItem
from zope.interface import implements
from Products.PluginIndexes.common.util import parseIndexRequest
from Products.PluginIndexes.interfaces import IPluggableIndex
from Products.PluginIndexes.interfaces import ISortIndex
from Products.PluginIndexes.interfaces import IUniqueValueIndex
from BTrees.IIBTree import IITreeSet
from shapely import wkt
from index import BaseIndex
from zgeo.wfs.geoitem import bboxAsTuple
from zgeo.wfs.interfaces import IWFSGeoItem
import logging
logger = logging.getLogger('WFSCatalog')
class GeometryIndex(SimpleItem, BaseIndex):
"""Index for geometry attribute provided by IWFSGeoItem adapter
"""
implements(IPluggableIndex, IUniqueValueIndex, ISortIndex)
meta_type="GeometryIndex"
query_options = ('query','geometry_operator')
def __init__(self, id):
self.id = id
BaseIndex.__init__(self)
self.clear()
self.operators = ('equals', 'disjoint', 'intersects', 'touches', 'crosses', 'within', 'contains', 'overlaps')
self.useOperator = 'within'
def index_object(self, documentId, obj, threshold=None):
"""Index an object.
'documentId' is the integer ID of the document.
'obj' is the object to be indexed.
"""
returnStatus = 0
geoitem=IWFSGeoItem(obj)
if geoitem.isGeoreferenced():
geometry = getattr(geoitem, self.id)
newValue = geometry.wkt
if newValue is callable:
newValue = newValue()
oldValue = self.backward.get(documentId, None )
if newValue is None:
if oldValue is not None:
self.rtree.delete(documentId, wkt.loads(oldValue).bounds)
try:
del self.backward[documentId]
except ConflictError:
raise
except:
pass
else:
if oldValue is not None and newValue!=oldValue:
self.rtree.delete(documentId, wkt.loads(oldValue).bounds)
self.rtree.add(documentId, geometry.bounds)
self.backward[documentId] = newValue
returnStatus = 1
return returnStatus
def unindex_object( self, documentId ):
"""
Remove the object corresponding to 'documentId' from the index.
"""
datum = self.backward.get( documentId, None )
if datum is None:
return
self.rtree.delete(documentId, wkt.loads(datum).bounds)
del self.backward[ documentId ]
def _apply_index(self, request, cid='', type=type):
"""
"""
record = parseIndexRequest(request, self.id, self.query_options)
if record.keys==None: return None
r = None
operator = record.get('geometry_operator',self.useOperator)
if not operator in self.operators :
raise RuntimeError,"operator not valid: %s" % operator
if operator=='disjoint':
raise RuntimeError,"DISJOINT not supported yet"
# we only process one key
key = record.keys[0]
bbox = bboxAsTuple(key)
intersection=self.rtree.intersection(bbox)
set = []
for d in [int(l) for l in intersection]:
geom_wkt = self.backward.get( d, None )
if geom_wkt is not None:
geom = wkt.loads(geom_wkt)
if geom is not None:
opr=getattr(geom, operator)
if opr(key)==1:
set.append(d)
r = IITreeSet(set)
return r, (self.id,)
def destroy_spatialindex(self):
"""
"""
self.clear() | zgeo.wfs | /zgeo.wfs-0.2.1.tar.gz/zgeo.wfs-0.2.1/zgeo/wfs/geocatalog/geometryindex.py | geometryindex.py |
zgitignore
==========
zgitignore is a small library to check if a file has been excluded by a ``.zgitignore`` file (those are compatible with ``.gitignore`` / ``.dockerignore`` files).
Installation
------------
As simple as it can be via pip::
$ pip install zgitignore
Or direct installation from source::
$ git clone git://github.com/zb3/zgitignore.git
$ cd zgitignore
$ python setup.py install
Usage
-----
.. code:: python
import zgitignore
# ZgitIgnore class stores the patterns, optionally takes two parameters: ignore_case and docker
# by default, it is case sensitive to match .gitignore behaviour
# for .dockerignore compatibility, use docker=True
f = zgitignore.ZgitIgnore(['build/', 'dist/', '*egg-info'])
# Patterns ending with / will match folders only:
print('build file ignored?: ', f.is_ignored('build')) # False
# When matching directories, set second parameter to True:
print('build folder ignored?: ', f.is_ignored('build', True)) # True
# It is case sensitive by default:
print('BUILD folder ignored?: ', f.is_ignored('BUILD', True)) # False
# Want it to be case-insensitive? No problem
f = zgitignore.ZgitIgnore(['*pycache*', '*pyc'], True) # second parameter is ignore_case
print('PYCACHE file ignored?', f.is_ignored('PYCACHE')) # True
# You can also add patterns later
ignorefile = zgitignore.ZgitIgnore(ignore_case=True, docker=True) # this is compatible with .dockerignore files
try:
with open('.gitignore', 'r') as f:
ignorefile.add_patterns(f.read().splitlines())
except:
pass
# You can start paths with ./ or not.
# Paths are normalized to match Unix style paths
print('./a/b/c/d/e ignored?', ignorefile.is_ignored('./a/b/c/d/e'))
# But by default, parent directories aren't checked recursively
# To check them, use check_parents=True
f = zgitignore.ZgitIgnore(['build/'])
print('build/test ignored?', f.is_ignored('build/test')) # False
print('build/test ignored when check_parents=True?', f.is_ignored('build/test', check_parents=True)) # True
Format
------
zgitignore supports format similar to ``.gitignore`` file format. Differences are:
- ``**`` works everywhere
::
``aaa**ooo``
Will match ``aaapotato2000/beeeee/llllll/sdsdooo``
- It can embed custom regex via ``{}``. You can use ``\}`` to pass ``}`` to regex and ``\\`` to pass ``\`` to regex
::
aaa{12(34|56|78)oo(aa|bb|dd)ii}888
Will match ``aaa1256oobbii888``
::
aaa{#[0-9a-f]{3,6\}}888
Will match ``aaa#00ffff888``
| zgitignore | /zgitignore-1.0.0.tar.gz/zgitignore-1.0.0/README.rst | README.rst |
import os
import re
def normalize_path(path, sep=os.path.sep):
path = path.replace(sep, '/')
while path.startswith('/'):
path = path[1:]
while path.startswith('./'):
path = path[2:]
if path[-1] == '/':
path = path[:-1]
return path
def convert_pattern(pat, docker=False, ignore_case=False):
if not pat or pat[0] == '#' or pat == '/':
return None
ptr, n = 0, len(pat)
regex = '^'
void = False # wow I can use this name, finally :O
negate = False
if pat[ptr] == '!':
negate = True
ptr += 1
# cut trailing spaces, sophisticated approach :)
ptr2 = n - 1
while pat[ptr2] == ' ':
num_backslashes = 0
ptr3 = ptr2 - 1
while ptr3 >= 0 and pat[ptr3] == '\\':
num_backslashes += 1
ptr3 -= 1
if not num_backslashes % 2:
ptr2 -= 1
else:
break
pat = pat[:ptr2 + 1]
dironly = pat[-1] == '/'
if (dironly):
pat = pat[:-1]
n = len(pat)
# because if it ends with a slash and doesn't contain any other ones, it's
# still for any directory
if '/' not in pat[:-1] and not docker:
regex += '(?:.+/)?'
# cut the **/
# we just didn't want that regex above, but didn't want this slash either
if pat[ptr] == '/':
ptr += 1
while ptr < n:
if void:
regex += re.escape(pat[ptr])
void = False
ptr += 1
elif pat[ptr] == '\\':
ptr += 1
void = True
elif pat[ptr] == '*':
# ** means anything, .*
if not ptr == n - 1 and pat[ptr + 1] == '*':
# **/ can also match nothing
if not ptr == n - 2 and pat[ptr + 2] == '/':
regex += '(?:.+/)?'
ptr += 3
else:
regex += '.*'
ptr += 2
else:
regex += '[^/]*'
ptr += 1
elif pat[ptr] == '?':
regex += '[^/]'
ptr += 1
elif pat[ptr] == '[':
ptr2 = ptr + 1
if ptr2 < n and pat[ptr2] == '!':
ptr2 += 1
if ptr2 < n and pat[ptr2] == ']':
ptr2 += 1
while ptr < n and pat[ptr2] != ']':
ptr2 += 1
if ptr2 < n:
ptr2 += 1
regex += '['
if pat[ptr + 1] == '!':
regex += '^'
ptr += 1
elif pat[ptr + 1] == '^':
regex += '\\^'
ptr += 1
regex += pat[ptr + 1:ptr2].replace('\\', '\\\\')
ptr = ptr2
else:
regex += '\\['
ptr += 1
# powerful and easy... idk why the [ stuff even exists.
elif pat[ptr] == '{':
ptr2 = ptr + 1
escape2 = False
while ptr2 < n:
if escape2:
# so \\ is \ in regex not \\. example => aaa{asas\\n\}fgfg}
regex += pat[ptr2]
escape2 = False
ptr2 += 1
elif pat[ptr2] == '\\':
escape2 = True
ptr2 += 1
elif pat[ptr2] == '}':
ptr2 += 1
break
else:
regex += pat[ptr2]
ptr2 += 1
# now ptr2 points to the thing after }
ptr = ptr2
else:
regex += re.escape(pat[ptr])
ptr += 1
regex += '$'
return regex, dironly, negate, re.compile(regex, re.DOTALL | (re.IGNORECASE if ignore_case else 0))
class ZgitIgnore():
def __init__(self, lines=None, ignore_case=False, docker=False):
self.lines = lines
self.ignore_case = ignore_case
self.patterns = [] # order is important
self.docker = docker
if lines:
self.add_patterns(lines)
def add_patterns(self, lines):
for line in lines:
pattern = convert_pattern(line, self.docker, self.ignore_case)
if pattern:
self.patterns.append(pattern)
def is_ignored(self, what, is_directory=False, check_parents=False):
what = normalize_path(what)
parent = os.path.dirname(what)
if check_parents and parent != '':
result = self.is_ignored(parent, is_directory=True, check_parents=True)
if result:
return True
ignored = False
for pattern, directory_only, negated, compiled_pattern in self.patterns:
if (not directory_only or is_directory) and re.match(
compiled_pattern, what):
ignored = not negated
return ignored
def __len__(self):
return len(self.patterns) | zgitignore | /zgitignore-1.0.0.tar.gz/zgitignore-1.0.0/zgitignore.py | zgitignore.py |
import functools
import logging
import sys
import time
import wx
import numpy
import Menu as MenuModule
MODIFIERS = {
'alt':'AltDown',
'cmd':'CmdDown',
'ctrl':'ControlDown',
'meta':'MetaDown',
'shift':'ShiftDown'
}
def getModifiers(wxEvent):
# alt, cmd, ctrl, meta, shift
modifiers = {}
for key, attr in MODIFIERS.iteritems():
modifiers[key] = getattr(wxEvent, attr)()
pass
return modifiers
def shouldResetMouseDownCallback(event, modifiers=None):
if modifiers is not None:
import Event as EventModule
newModifiers = EventModule.getModifiers(event)
if not newModifiers == modifiers:
return True
return False
class EventHandler(object):
def eventSource(self, value=None):
if value is not None:
self._eventSource = value
return self._eventSource
def OnTimerFraction(self, event):
# default do nothing
return
# END class EventHandler
pass
class CanvasEventHandler(wx.EvtHandler, EventHandler):
def __init__(self, eventSource):
wx.EvtHandler.__init__(self)
self.eventSource(eventSource)
pass
def bindEvents(self):
self.Bind(wx.EVT_SIZE, self.OnSize, source=self.eventSource())
self.Bind(wx.EVT_PAINT, self.OnPaint, source=self.eventSource())
# call the on_timer function
wx.EVT_TIMER(self.eventSource().GetParent(),
self.eventSource().TIMER_ID,
self.OnTimerFraction)
return
def OnSize(self, evt):
eventSource=self.eventSource()
w,h = eventSource.GetClientSize()
eventSource.width = w
eventSource.height = h
dc = wx.ClientDC(eventSource)
eventSource.Render(dc)
return
def OnPaint(self, evt):
dc = wx.PaintDC(self.eventSource())
self.eventSource().Render(dc)
return
def OnTimerFraction(self, event):
self.eventSource().forceRedraw()
return
# END class CanvasEventHandler
pass
class MouseEventHandler(wx.EvtHandler, EventHandler):
def __init__(self, eventSource):
wx.EvtHandler.__init__(self)
self.eventSource(eventSource)
pass
def bindEvents(self):
self.Bind(wx.EVT_LEFT_DOWN, self.OnLeftMouseButtonDown, source=self.eventSource())
self.Bind(wx.EVT_LEFT_UP, self.OnLeftMouseButtonUpDefault, source=self.eventSource())
self.Bind(wx.EVT_MOTION, self.OnMouseMovedDefault, source=self.eventSource())
self.Bind(wx.EVT_RIGHT_DOWN, self.OnRightMouseButtonDown, source=self.eventSource())
self.Bind(wx.EVT_RIGHT_UP, self.OnRightMouseButtonUp, source=self.eventSource())
return
def computeRawMouseMove(self, initialPosition, finalPosition):
initialMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(initialPosition.x, initialPosition.y))
finalMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(finalPosition.x, finalPosition.y))
return finalMatrix - initialMatrix
def OnMouseMovedDefault(self, event):
return
def OnPanViewport(self, event,
basePosition=None,
resetCallback=None,
shouldResetCallbackFunction=None):
if shouldResetCallbackFunction(event):
resetCallback()
baseCanvasMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(basePosition.x, basePosition.y))
baseWorldMatrix = self.eventSource().getWorldCoordinatesFromCanvasCoordinates(baseCanvasMatrix)
newCanvasMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(event.GetX(), event.GetY()))
newWorldMatrix = self.eventSource().getWorldCoordinatesFromCanvasCoordinates(newCanvasMatrix)
deltaMatrix = newWorldMatrix - baseWorldMatrix
self.eventSource().panMatrix = deltaMatrix + self.eventSource().initialPanMatrix
return
def OnZoomViewport(self, event,
basePosition=None,
resetCallback=None,
shouldResetCallbackFunction=None):
if shouldResetCallbackFunction(event):
resetCallback()
delta = self.computeRawMouseMove(basePosition, event.GetPosition())
delta_zoom = -0.005 * delta[3,0]
newZoomFactor = min(max(self.eventSource().initialZoomMatrix[0,0]+delta_zoom,0.5),10.0)
newZoomMatrix = numpy.matrix('%s 0 0 0; 0 %s 0 0; 0 0 %s 0; 0 0 0 %s' %
(newZoomFactor, newZoomFactor, newZoomFactor, newZoomFactor))
self.eventSource().zoomMatrix = newZoomMatrix
return
def OnBoundingBoxSelection(self, event,
basePosition=None,
resetCallback=None,
shouldResetCallbackFunction=None):
baseCanvasMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(basePosition.x, basePosition.y))
baseWorldMatrix = self.eventSource().getWorldCoordinatesFromCanvasCoordinates(baseCanvasMatrix)
newCanvasMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(event.GetX(), event.GetY()))
newWorldMatrix = self.eventSource().getWorldCoordinatesFromCanvasCoordinates(newCanvasMatrix)
deltaMatrix = newWorldMatrix - baseWorldMatrix
self.eventSource().selection_rectangle = (
baseWorldMatrix[3,0], baseWorldMatrix[3,1],
deltaMatrix[3,0], deltaMatrix[3,1]
)
#if self.eventSource().selection_rectangle is not None:
# # select the objects under the selection_rectagle
# pass
objectsInBoundBox = self.eventSource().getObjectsIntersectingRect(self.eventSource().selection_rectangle)
self.eventSource().setSelection(self.eventSource().initialSelection + objectsInBoundBox)
return
def OnLeftMouseButtonDown(self, event):
point = event.GetPosition()
# alt, cmd, ctrl, meta, shift
modifiers = getModifiers(event)
shouldResetCallbackFunction = functools.partial(
shouldResetMouseDownCallback,
modifiers=modifiers)
mouseMoveCallback = None
mouseUpCallback=self.OnLeftMouseButtonUpDefault
if modifiers['alt']:
mouseMoveCallback = self.OnPanViewport
self.eventSource().initialPanMatrix = self.eventSource().panMatrix
self.eventSource()
elif modifiers['shift']:
mouseMoveCallback = self.OnZoomViewport
self.eventSource().initialZoomMatrix = self.eventSource().zoomMatrix
else:
clickedObjects = [x for x in self.eventSource().getClickableObjectsAtCanvasCoordinates(point.x, point.y)]
clickedObject = None
if len(clickedObjects) is not 0:
clickedObject = clickedObjects[0]
if clickedObject is not None:
# every clickable object needs to have a OnDrag callback
mouseMoveCallback = functools.partial(
clickedObject.OnDrag,
canvas=self.eventSource())
if clickedObject.hasCustomDragReleaseCallback():
mouseUpCallback = functools.partial(
clickedObject.OnDragRelease,
canvas=self.eventSource(),
eventHandler=self)
pass
if (modifiers['ctrl'] or modifiers['cmd']) and \
clickedObject.isSelectable():
self.eventSource().addToSelection([clickedObject])
else:
self.eventSource().setSelection([clickedObject])
pass
objects = self.eventSource().getSelection()
mouseMoveCallback = functools.partial(
mouseMoveCallback,
objects=objects)
for object in objects:
object.initialX = object.x
object.initialY = object.y
pass
else:
if modifiers['ctrl'] or modifiers['cmd']:
# because the "add to" meta key is still active
# we don't want to reset the selection
pass
else:
self.eventSource().setSelection([])
self.eventSource().initialSelection = self.eventSource().getSelection()
mouseMoveCallback = self.OnBoundingBoxSelection
mouseUpCallback = self.resetSelectionBoundingBox
pass
mouseMoveCallback = functools.partial(
mouseMoveCallback,
basePosition=event.GetPosition(),
resetCallback=self.resetMouseCallbacks,
shouldResetCallbackFunction=shouldResetCallbackFunction)
self.Bind(wx.EVT_MOTION, mouseMoveCallback, source=self.eventSource())
self.Bind(wx.EVT_LEFT_UP, mouseUpCallback, source=self.eventSource())
return
def OnLeftMouseButtonUpDefault(self, event):
self.resetMouseMovedCallback()
return
def resetSelectionBoundingBox(self, event):
self.resetMouseCallbacks()
self.eventSource().selection_rectangle=None
return
def resetMouseCallbacks(self):
self.resetMouseMovedCallback()
self.resetMouseUpCallback()
return
def resetMouseUpCallback(self):
self.Bind(wx.EVT_MOTION, self.OnLeftMouseButtonUpDefault, source=self.eventSource())
return
def resetMouseMovedCallback(self):
self.Bind(wx.EVT_MOTION, self.OnMouseMovedDefault, source=self.eventSource())
return
def OnRightMouseButtonDown(self, event):
point = event.GetPosition()
clickedObjects = [
x for x in
self.eventSource().getClickableObjectsAtCanvasCoordinates(point.x, point.y)]
clickedObject = None
if len(clickedObjects) is not 0:
clickedObject = clickedObjects[0]
selection = self.eventSource().getSelection()[:]
if clickedObject is None:
pass
elif len(selection) is 0 or not clickedObject in selection:
selection = [clickedObject]
popupMenu = None
# create a new popup menu
if len(selection) is 0:
menuClass = self.eventSource().app().getResourceValue('canvas contextual menu class', MenuModule.CanvasContextualMenu)
popupMenu = menuClass(event, self, self.eventSource())
pass
else:
# get the contextual menu
# from the most recent item selected
popupMenu = selection[-1].getSelectionContextualMenu(
event, self, self.eventSource(), selection)
pass
popupMenu.bindEvents()
self.eventSource().PopupMenu(
popupMenu, event.GetPosition())
# according to http://wiki.wxpython.org/PopupMenuOnRightClick
# need to call destroy
popupMenu.Destroy()
return
def OnRightMouseButtonUp(self, event):
point = event.GetPosition()
print "right up at (%s,%s)" % (point.x, point.y)
return
# END class MouseEventHandler
pass
class KeyEventHandler(wx.EvtHandler, EventHandler):
def __init__(self, eventSource):
wx.EvtHandler.__init__(self)
self.eventSource(eventSource)
pass
def bindEvents(self):
self.Bind(wx.EVT_KEY_DOWN, self.OnKeyPressed)
self.Bind(wx.EVT_KEY_UP, self.OnKeyReleased)
return
def OnKeyPressed(self, event):
modifiers = getModifiers(event)
return
def OnKeyReleased(self, event):
modifiers = getModifiers(event)
return
# END class KeyEventHandler
pass
class MyPopupMenu(wx.Menu):
def __init__(self, parent):
wx.Menu.__init__(self)
self.parent = parent
minimize = wx.MenuItem(self, wx.NewId(), 'Minimize')
self.AppendItem(minimize)
self.Bind(wx.EVT_MENU, self.OnMinimize, id=minimize.GetId())
close = wx.MenuItem(self, wx.NewId(), 'Close')
self.AppendItem(close)
self.Bind(wx.EVT_MENU, self.OnClose, id=close.GetId())
def OnMinimize(self, event):
self.parent.Iconize()
def OnClose(self, event):
self.parent.Close() | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/Event.py | Event.py |
import logging
from Responder import *
import zgl.zglPrimitives as PrimitivesModule
import zgl_graphdrawer.Menu as MenuModule
class Port(Responder):
PART_BACKGROUND = "port background"
PORT_DIRECTION_INPUT = 1
PORT_DIRECTION_OUTPUT = 2
@staticmethod
def hasCustomDragReleaseCallback():
return True
@staticmethod
def OnDrag(event, canvas=None,
objects=None,
basePosition=None,
resetCallback=None,
shouldResetCallbackFunction=None):
if shouldResetCallbackFunction(event):
resetCallback()
pass
canvas.computeDynamicEdge(basePosition, event.GetPosition())
pass
def __init__(self, portName):
Responder.__init__(self)
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.children = []
self.name = portName
self.edge = None
self.basePosition = (0, 0)
return
def OnDragRelease(self, event, canvas=None,
objects=None,
eventHandler=None):
if canvas is None:
raise ValueError('canvas should not be None')
# TODO:
# should check if released on a port
# that can be connected to the initial port
# and if so, create an actual edge
releasePosition = event.GetPosition()
contextManager = canvas.contextManager()
if contextManager is None:
raise ValueError('canvas should have access to the context manager')
clickReleaseObjects = [
x for x in
canvas.getClickableObjectsAtCanvasCoordinates(
releasePosition.x, releasePosition.y)
if isinstance(x, Port)]
clickReleaseObject = None
if len(clickReleaseObjects) is not 0:
clickReleaseObject = clickReleaseObjects[0]
edge = None
if clickReleaseObject is not None and \
contextManager.canConnect(self, clickReleaseObject):
edge = contextManager.connect(self, clickReleaseObject)
pass
# reset the dynamic edge
# (because there won't be one anymore)
canvas._dynamicEdge = None
# reset the mouse moved callback
eventHandler.resetMouseMovedCallback()
return edge
def initialSetup(self):
"""
This is a placeholder
that subclasses can override and
hook in custom configuration
"""
return
def positionIndex(self, value=None):
if value is not None:
self._positionIndex = value
return self._positionIndex
def positionCount(self, value=None):
if value is not None:
self._positionCount = value
return self._positionCount
def isClickable(self):
return True
def updateColours(self):
# No color policy for Ports set up yet
#colours = self.parts["colours"]
background = self.parts[Port.PART_BACKGROUND]
if self.edge is not None:
background.borderColour = [0.8, 0.8, 0.0, 1.0]
# background.colour = [0.5, 0.5, 0.0, 1.0]
else:
background.borderColour = [0.8, 0.8, 0.0, 1.0]
background.colour = None
pass
return
def overlaps(self, point):
"""
TODO:
This is copied from Node.Node
Need to remove duplication
"""
selfX = self.x + self.node.x
selfY = self.y + self.node.y
if point.x < selfX:
return False
if point.x > selfX+self.width:
return False
if point.y < selfY:
return False
if point.y > selfY+self.height:
return False
return True
def draw(self):
glPushMatrix()
glTranslate(self.x, self.y, 0)
for child in self.children:
child.draw()
glPopMatrix()
return
def getSelectionContextualMenu(self, event,
eventHandler, canvas, selection):
popupMenu = SelectionContextualMenu(
event, eventHandler, canvas, selection)
return popupMenu
def createBackgroundPrimitive(self, portPolicy):
background = PrimitivesModule.zglRect()
background.position = [0, 0, 0]
background.size = [self.width, self.height, 1.0]
background.corner_mode = True
background.colour = None
background.borderColour = [0.5, 0.5, 0.0, 1.0]
return background
def setupPrimitives(self, portPolicy):
background = self.createBackgroundPrimitive(portPolicy)
self.parts = {}
self.parts[Port.PART_BACKGROUND] = background
self.children.append(background)
return
# END class Port
pass
class SelectionContextualMenu(MenuModule.SelectionContextualMenu):
def bindEvents(self):
pass
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/Port.py | Port.py |
import math
class MouseHandler(object):
def __init__(self, target):
self.target = target
self.isActive(False)
return
def isActive(self, value=None):
if value is not None:
self._isActive = value
return self._isActive
def updateLatestMousePosition(self, x, y):
self.last_mouse_x = x
self.last_mouse_y = y
return
def mouseButtonDown(self, button, x, y):
self.updateLatestMousePosition(x, y)
self.isActive(True)
def mouseMoved(self, x, y):
self.updateLatestMousePosition(x, y)
def mouseButtonUp(self, button, x, y):
self.isActive(False)
def mouseDragged(self, x, y):
pass
class CanvasSelectionMouseHandler (MouseHandler):
def __init__(self, target, selection):
MouseHandler.__init__(self,target)
self.selection = selection
self.is_drawing_selection_rect = False
def mouseButtonDown(self, button, x, y):
MouseHandler.mouseButtonDown(self, button, x, y)
clickedObject = self.target.getObjectAtViewCoordinates(x,y)
if clickedObject is not None:
if not clickedObject.isSelected():
self.selection.setSelection([clickedObject])
return clickedObject
else:
self.selection.setSelection([])
self.is_drawing_selection_rect = True
self.selection_rect_origin = self.target.getCanvasCoordinatesFromViewCoordinates(x,y)
return True
def mouseDragged(self, x, y):
if self.is_drawing_selection_rect:
self.selection_rect_endpoint = self.target.getCanvasCoordinatesFromViewCoordinates(x,y)
if self.hasSelectionRect():
self.target.selection_rectangle = self.getSelectionRect()
selectedObjects = self.target.getObjectsIntersectingRect(self.target.selection_rectangle)
self.selection.setSelection(selectedObjects)
self.updateLatestMousePosition(x, y)
def mouseButtonUp(self, button, x, y):
self.selection_rect_origin = None
self.selection_rect_endpoint = None
self.target.selection_rectangle = None
MouseHandler.mouseButtonUp(self, button, x, y)
def hasSelectionRect(self):
return self.selection_rect_endpoint is not None and \
self.selection_rect_origin is not None
def getSelectionRect(self):
if not self.hasSelectionRect():
raise NotImplementedError('selection rect is undefined')
x = min(self.selection_rect_endpoint[0],
self.selection_rect_origin[0])
y = min(self.selection_rect_endpoint[1],
self.selection_rect_origin[1])
width = max(self.selection_rect_endpoint[0],
self.selection_rect_origin[0]) - x
height = max(self.selection_rect_endpoint[1],
self.selection_rect_origin[1]) - y
return (x,y,width,height)
class CanvasScrollMouseHandler (MouseHandler):
# For Canvas MouseHandlers, the target is the canvas itself
def mouseDragged(self, x, y):
delta = (x - self.last_mouse_x, y - self.last_mouse_y)
if self.isActive():
self.target.scroll_position[0] = self.target.scroll_position[0] - delta[0]
self.target.scroll_position[1] = self.target.scroll_position[1] - delta[1]
self.updateLatestMousePosition(x, y)
class CanvasZoomMouseHandler (MouseHandler):
def mouseButtonDown(self, button, x, y):
MouseHandler.mouseButtonDown(self, button, x, y)
self.mouseDownX = x
self.mouseDownY = y
self.originalZoom = self.target.zoom
self.originalScroll = self.target.scroll_position[:]
self.zoom_center = self.target.getCanvasCoordinatesFromViewCoordinates(x,y)
def mouseDragged(self, x, y):
delta = (x - self.last_mouse_x, y - self.last_mouse_y)
if self.isActive() and math.fabs(delta[0]) > 0:
delta_zoom = -0.005 * delta[0]
newZoom = min(max(self.target.zoom + delta_zoom, 0.5), 2.0)
self.target.zoom = newZoom
new_zoom_center = self.target.getCanvasCoordinatesFromViewCoordinates(x, y)
delta_scroll = (self.zoom_center[0]-new_zoom_center[0], self.zoom_center[1]-new_zoom_center[1])
self.target.scroll_position = [self.target.scroll_position[0] + delta_scroll[0], self.target.scroll_position[1] + delta_scroll[1]]
self.updateLatestMousePosition(x, y)
class ObjectDragMoveMouseHandler (MouseHandler):
# Target is a list of objects being moved
# Canvas is the canvas containing the object (needed in case of zoom)
def __init__(self, target, canvas, delegate):
MouseHandler.__init__(self, target)
self.canvas = canvas
self.delegate = delegate
def mouseDragged(self, x, y):
delta = (x - self.last_mouse_x, y - self.last_mouse_y)
for object in self.target:
object.x += delta[0] * self.canvas.zoom
object.y += delta[1] * self.canvas.zoom
if self.delegate is not None:
self.delegate.objectsMoved(self.target)
self.updateLatestMousePosition(x, y) | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/MouseHandlers.py | MouseHandlers.py |
import copy
import math
import VisualPolicy as VisualPolicyModule
import zgl.zglPrimitives as PrimitivesModule
import zgl.zglText as TextModule
import zgl.zglUtils as zglUtils
import zgl_graphdrawer.Port as PortModule
class NodePolicy(object):
KEY_BORDER_SELECTION_TRUE = "selected border"
KEY_BORDER_SELECTION_FALSE = "not selected border"
KEY_BACKGROUND_SELECTION_TRUE = "selected background"
KEY_BACKGROUND_SELECTION_FALSE = "not selected background"
KEY_TEXT_SELECTION_TRUE = "selected text"
KEY_TEXT_SELECTION_FALSE = "not selected text"
@staticmethod
def constructDefaultColours(nodePolicy):
import Node as NodeModule
visualPolicy = nodePolicy.visualPolicy()
colourBorderSelectionTrue = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_BORDER,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_TRUE
)
)
colourBorderSelectionFalse = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_BORDER,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_FALSE
)
)
colourBackgroundSelectionTrue = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_BACKGROUND,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_TRUE
)
)
colourBackgroundSelectionFalse = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_BACKGROUND,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_FALSE
)
)
colourTextSelectionTrue = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_LABEL,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_TRUE
)
)
colourTextSelectionFalse = visualPolicy.colour(
"%s, %s" % (
NodeModule.Node.PART_LABEL,
VisualPolicyModule.VisualPolicy.KEY_SELECTION_FALSE
)
)
colours = {
NodePolicy.KEY_BORDER_SELECTION_TRUE:colourBorderSelectionTrue,
NodePolicy.KEY_BORDER_SELECTION_FALSE:colourBorderSelectionFalse,
NodePolicy.KEY_BACKGROUND_SELECTION_TRUE:colourBackgroundSelectionTrue,
NodePolicy.KEY_BACKGROUND_SELECTION_FALSE:colourBackgroundSelectionFalse,
NodePolicy.KEY_TEXT_SELECTION_TRUE:colourTextSelectionTrue,
NodePolicy.KEY_TEXT_SELECTION_FALSE:colourTextSelectionFalse
}
return colours
def __init__(self, contextManager):
self.contextManager(contextManager)
return
def contextManager(self, value=None):
if value is not None:
self._manager = value
return self._manager
def initializeColours(self):
self.resetColours()
return
def resetColours(self):
self._colours = NodePolicy.constructDefaultColours(self)
return
def colour(self, key, value=None):
if value is not None:
self._colours[key] = value
return self._colours[key]
def visualPolicy(self, value=None):
if value is not None:
self._visualPolicy = value
return self._visualPolicy
def portPolicy(self, value=None):
if value is not None:
self._portPolicy = value
return self._portPolicy
def getHorizontalAlignmentOfLabel(self):
return TextModule.ALIGN_CENTER
def getVerticalAlignmentOfLabel(self):
return TextModule.ALIGN_MIDDLE
def getPositionForNameLabel(self):
return [0, 0, 0]
def setupDimensions(self, node):
node.width=100
node.height=50
return
# END class NodePolicy
pass
class SimpleNodePolicy(NodePolicy):
def __init__(self, contextManager):
NodePolicy.__init__(self, contextManager)
return
def boundsCheck(self, node_x, node_y, node_width, node_height, x, y):
if x < node_x or x > node_width:
return False
if y < node_y or y > node_height:
return False
return True
def updateNode(self, node):
node.shouldUpdateDisplay(True)
return
def setPortPosition(self, node, port):
# TODO:
# some of the values in this function
# are calculated multiple times
# because this function is called once per port
# but some of the values,
# e.g. minimumPortAreaWidth
# is constant
portDirection = port.direction
yOffset = 0
if (portDirection == PortModule.Port.PORT_DIRECTION_INPUT):
yOffset = node.height + 5
elif (portDirection == PortModule.Port.PORT_DIRECTION_OUTPUT):
yOffset = -1*(port.height+5)
port.y = yOffset
minimumPortMargin = 5
portCount = port.positionCount()
minimumPortAreaWidth = \
port.width * portCount + minimumPortMargin * (portCount-1)
portIndex = port.positionIndex()
xOffset = 0
if minimumPortAreaWidth > node.width:
# Ports have to extend beyond the limit of the node
# because of size constraint
startLocation = (node.width-minimumPortAreaWidth)/2
xOffset = startLocation + portIndex*(portWidth + minimumPortMargin) - portWidth/2
else:
xOffset = node.width/portCount * (portIndex+0.5) - port.width/2
port.x = xOffset
return
# END class SimpleNodePolicy
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/NodePolicy.py | NodePolicy.py |
import math
import zgl.zglPrimitives as PrimitivesModule
class EdgePolicy(object):
"""
this defines the API for EdgePolicy
"""
KEY_SELECTION_TRUE = 'selected'
KEY_SELECTION_FALSE = 'not selected'
KEY_DYNAMIC = 'dynamic'
def visualPolicy(self, value=None):
if value is not None:
self._visualPolicy = value
return self._visualPolicy
# damn British spelling
def lineColour(self, value=None):
"""
TODO: modify this to make use of ColorPolicy
"""
if value is not None:
self._lineColour = value
return self._lineColour
"""
def setupEdge(self, edge):
raise NotImplementedError
"""
def updateEdge(self, edge):
raise NotImplementedError
def createPath(self, inputPoint, outputPoint, steps):
raise NotImplementedError
# END class EdgePolicy
pass
class SimpleEdgePolicy(EdgePolicy):
def __init__(self):
self.lineColour([1.0, 1.0, 1.0, 0.3])
return
"""
def setupEdge(self, edge):
edge.children = []
if edge.inputNode is None or edge.outputNode is None:
return
point1 = (edge.inputNode.x + edge.inputNode.width, edge.inputNode.y + edge.inputNode.height/2)
point2 = (edge.outputNode.x, edge.outputNode.y + edge.outputNode.height/2)
line = PrimitivesModule.zglLine()
line.points = self.createPath(point1, point2, 30)
line.position = (0.0, 0.0, -0.01)
edge.children.append(line)
return
"""
def updateEdge(self, edge):
pass
def createPath(self, inputPoint, outputPoint, steps=30):
points = []
delta = (outputPoint[0]-inputPoint[0], outputPoint[1]-inputPoint[1])
for i in xrange(steps+1):
theta = float(i)/steps
x = inputPoint[0] + delta[0] * theta
y = inputPoint[1] + delta[1] * (1-math.cos(theta*math.pi))/2
points.append( (x,y) )
return points
# END class SimpleEdgePolicy
pass
class SimpleVerticalEdgePolicy(EdgePolicy):
def __init__(self):
self.lineColour([0.75, 0.75, 0.0, 1.0])
return
def updateEdge(self, edge):
inputPort = edge.inputPort()
inputPort.edge = edge
inputPort.updateColours()
outputPort = edge.outputPort()
outputPort.edge = edge
outputPort.updateColours()
pass
def createPath(self, inputPoint, outputPoint, steps=30):
points = []
delta = (outputPoint[0]-inputPoint[0], outputPoint[1]-inputPoint[1])
for i in xrange(steps+1):
theta = float(i)/steps
x = inputPoint[0] + delta[0] * (1-math.cos(theta*math.pi))/2
y = inputPoint[1] + delta[1] * theta
points.append( (x,y) )
return points
# END class SimpleVerticalEdgePolicy
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/EdgePolicy.py | EdgePolicy.py |
import logging
import numpy
import zgl.zglPrimitives as PrimitivesModule
import zgl.zglText as TextModule
import zgl_graphdrawer.Menu as MenuModule
from zgl_graphdrawer.NodePolicy import *
import zgl_graphdrawer.Port as PortModule
from zgl_graphdrawer.Responder import *
class Node(Responder):
PART_BORDER = "node border"
PART_BACKGROUND = "node background"
PART_LABEL = "node label"
@staticmethod
def hasCustomDragReleaseCallback():
return False
@staticmethod
def OnDrag(event, canvas=None,
objects=None,
basePosition=None,
resetCallback=None,
shouldResetCallbackFunction=None):
if objects is None:
raise NotImplementedError('should not happen')
if shouldResetCallbackFunction(event):
resetCallback()
originalCanvasMatrix = numpy.matrix(
'0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(basePosition.x, basePosition.y))
eventPosition = event.GetPosition()
eventCanvasMatrix = numpy.matrix(
'0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' %
(eventPosition.x, eventPosition.y))
originalWorldMatrix = canvas.getWorldCoordinatesFromCanvasCoordinates(originalCanvasMatrix)
eventWorldMatrix = canvas.getWorldCoordinatesFromCanvasCoordinates(eventCanvasMatrix)
worldDeltaMatrix = eventWorldMatrix - originalWorldMatrix
delta = (worldDeltaMatrix[3,0], worldDeltaMatrix[3,1])
for object in objects:
object.setPosition(object.initialX + delta[0],
object.initialY + delta[1])
pass
edgesToProcess = filter(
lambda x: x.inputNode() in objects or x.outputNode() in objects,
canvas.edges
)
map(lambda x: x.setupPrimitives(canvas.edgePolicy()), edgesToProcess)
return
def __init__(self, nodeData):
Responder.__init__(self)
self.x = 0
self.y = 0
self.width = 0
self.height = 0
self.inputPorts = {}
self.outputPorts = {}
self.parts = {
"inputPorts":self.inputPorts,
"outputPorts":self.outputPorts
}
self.nodeData = nodeData
self.shouldUpdateDisplay(True)
self.shouldUpdateImage(False)
self.shouldUpdateColours(True)
return
def contextManager(self, value=None):
if value is not None:
self._contextManager = value
if not hasattr(self, '_contextManager'):
self._contextManager = None
return self._contextManager
def canvas(self):
return self.contextManager.canvas
def shouldUpdateDisplay(self, value=None):
if value is not None:
self._shouldUpdateDisplay = value
if not hasattr(self, "_shouldUpdateDisplay"):
self._shouldUpdateDisplay = False
return self._shouldUpdateDisplay
def shouldUpdateImage(self, value=None):
if value is not None:
self._shouldUpdateImage = value
if not hasattr(self, "_shouldUpdateImage"):
self._shouldUpdateImage = False
return self._shouldUpdateImage
def shouldUpdateColours(self, value=None):
if value is not None:
self._shouldUpdateColours = value
if not hasattr(self, "_shouldUpdateColours"):
self._shouldUpdateColours = False
return self._shouldUpdateColours
def isInside(self, x, y):
if x < self.x or x > self.x+self.width:
return False
if y < self.y or y > self.y+self.height:
return False
return True
def overlaps(self, point):
if point.x < self.x:
return False
if point.x > self.x+self.width:
return False
if point.y < self.y:
return False
if point.y > self.y+self.height:
return False
return True
def intersectsRect(self, rect):
if (rect[0]+rect[2] > self.x) and \
(rect[0] < self.x+self.width) and \
(rect[1]+rect[3] > self.y) and \
(rect[1] < self.y+self.height):
return True
return False
def isSelectable(self):
return True
def isClickable(self):
return True
def updateDisplay(self):
if self.shouldUpdateColours():
self.updateColours()
if self.shouldUpdateImage():
self.updateImage()
return
def updateImage(self):
"""
The generic version does nothing
"""
return
def updateColours(self):
colours = self.parts["colours"]
background = self.parts[Node.PART_BACKGROUND]
text = self.parts[Node.PART_LABEL]
if self.isSelected():
logging.debug("updating for selected node")
background.borderColour = colours[NodePolicy.KEY_BORDER_SELECTION_TRUE]
background.colour = colours[NodePolicy.KEY_BACKGROUND_SELECTION_TRUE]
text.colour = colours[NodePolicy.KEY_TEXT_SELECTION_TRUE]
else:
logging.debug("updating for unselected node")
background.borderColour = colours[NodePolicy.KEY_BORDER_SELECTION_FALSE]
background.colour = colours[NodePolicy.KEY_BACKGROUND_SELECTION_FALSE]
text.colour = colours[NodePolicy.KEY_TEXT_SELECTION_FALSE]
pass
return
def createBackgroundPrimitive(self, size=None):
background = PrimitivesModule.zglRect()
background.position = [0, 0, 0]
if size is None:
size = self.getBounds()
background.size = size
background.corner_mode = True
return background
def getBounds(self):
return (self.width, self.height)
def createNamePrimitive(self, nodePolicy):
# Fit node to text size or simply to
# what graphviz gives us, which may not look good
# depending on the NodePolicy
#stringBounds = font.getSize(node.nodeData.name)
#nodeSize = (stringBounds[0] + 2*10, stringBounds[1] + 2*10)
nodeSize = self.getBounds()
# initialize the name label
nameLabel = TextModule.zglText()
nameLabel.position = nodePolicy.getPositionForNameLabel()
nameLabel.size = [nodeSize[0], nodeSize[1], 0]
nameLabel.horizontal_align = nodePolicy.getHorizontalAlignmentOfLabel()
nameLabel.vertical_align = nodePolicy.getVerticalAlignmentOfLabel()
nameLabel.font = self.font
nameLabel.text = self.nodeData.name()
return nameLabel
def setupPrimitives(self, nodePolicy):
# self.setupDimensions()
nodePolicy.setupDimensions(self)
font = nodePolicy.visualPolicy().font(
VisualPolicyModule.VisualPolicy.KEY_FONT_DEFAULT)
self.font = font
background = self.createBackgroundPrimitive()
nameLabel = self.createNamePrimitive(nodePolicy)
self.parts[Node.PART_BACKGROUND] = background
self.parts[Node.PART_LABEL] = nameLabel
self.parts["colours"] = nodePolicy._colours
self.setupPorts(nodePolicy)
self.shouldUpdateDisplay(True)
return
def retrieveInputPortNamesFromDataObject(self):
if not hasattr(self.nodeData, 'inputPorts'):
return []
return self.nodeData.inputPorts
def retrieveOutputPortNamesFromDataObject(self):
if not hasattr(self.nodeData, 'outputPorts'):
return []
return self.nodeData.outputPorts
def getResourceKeyForPort(self, portName):
return 'gui port class'
def setupPorts(self, nodePolicy):
portPolicy = nodePolicy.portPolicy()
inputPortNames = self.retrieveInputPortNamesFromDataObject()
outputPortNames = self.retrieveOutputPortNamesFromDataObject()
for portDirection, portNames, portMap in [
(PortModule.Port.PORT_DIRECTION_INPUT, inputPortNames, self.inputPorts),
(PortModule.Port.PORT_DIRECTION_OUTPUT, outputPortNames, self.outputPorts)]:
positionCount = len(portNames)
for index, portName in enumerate(portNames):
resourceKey = self.getResourceKeyForPort(portName)
portClass = self.contextManager().app().getResourceValue(
resourceKey, PortModule.Port)
port = portClass(portName)
port.node = self
port.direction = portDirection
port.positionIndex(index)
port.positionCount(positionCount)
port.initialSetup()
portPolicy.setupDimensions(port)
# we need to first know the port's dimensions
# beefore we can set its position
nodePolicy.setPortPosition(self, port)
port.setupPrimitives(portPolicy)
portMap[portName] = port
pass
pass
return
def getPosition(self):
return (self.x, self.y)
def setPosition(self, x, y):
self.x = x
self.y = y
return
def _buildChildren(self):
return [
self.parts[Node.PART_BACKGROUND],
self.parts[Node.PART_LABEL]
] + self.inputPorts.values() + self.outputPorts.values()
def children(self):
"""
A subclass can override this
"""
if not hasattr(self, '_children'):
# cache this so that we dont have to recompute
# for every redraw
self._children = self._buildChildren()
pass
return self._children
def draw(self):
if self.shouldUpdateDisplay():
logging.debug("updating node")
self.updateDisplay()
self.shouldUpdateDisplay(False)
glPushMatrix()
glTranslate(self.x, self.y, 0)
for child in self.children():
child.draw()
glPopMatrix()
return
def getInputPort(self, id):
return self.inputPorts[id]
def getOutputPort(self, id):
return self.outputPorts[id]
def getSelectionContextualMenu(self, event, eventHandler, canvas, selection):
popupMenu = SelectionContextualMenu(
event, eventHandler, canvas, selection)
return popupMenu
# END class Node
pass
class SelectionContextualMenu(MenuModule.SelectionContextualMenu):
def bindEvents(self):
self.addMenuItem('Edit', self.OnEdit)
self.addMenuItem('Delete', self.OnDelete)
return
def OnDelete(self, event):
# first remove all the node's incoming and outgoing edges
objectsToRemove = []
for edge in self.canvas.edges:
if edge.outputNode in self.objects or \
edge.inputNode in self.objects:
objectsToRemove.append(edge)
objectsToRemove.extend(self.objects)
# remove the objects
map(self.canvas.removeChild, objectsToRemove)
return
def OnEdit(self, event):
print "%s should display editor for nodes" % self.__class
return
# END class SelectionContextualMenu
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/Node.py | Node.py |
import logging
import sys
import zgl_graphdrawer.Edge as EdgeModule
import zgl_graphdrawer.Node as NodeModule
class ContextManager(object):
def __init__(self):
# initialize the dictionry that holds
# the locations of the commands that may be called
self._commands = {}
return
def app(self, value=None):
if value is not None:
self._app = value
return self._app
def initializeCanvas(self, canvas):
self.canvas = canvas
return
def initializeEventHandlers(self):
self.canvas.initializeEventHandlers(contextManager=self)
pass
def resourcePath(self, value=None):
"""
this sets the path from which the resources should be loaded
"""
if value is not None:
self._resourcePath = value
if not hasattr(self, '_resourcePath'):
self._resourcePath = '.'
return self._resourcePath
def commandPath(self, key, value=None):
"""
this sets the path of a command
"""
if value is not None:
self._commands[key] = value
return self._commands[key]
def objectsMoved(self, objects):
for edge in self.edges.values():
if edge.inputNode in objects or \
edge.outputNode in objects:
edge.setupPrimitives(self.canvas.edgePolicy())
pass
pass
return
def draw(self):
"""
TODO: move out of context manager class
"""
try:
self.canvas.draw()
except Exception, e:
logging.error(e)
sys.exit(1)
return
def canConnect(self, port1Data, port2Data):
print "need to determine if can actually connect ports"
return True
def connect(self, event, port1Data, port2Data):
print "should connect %s -> %s" % (
port1Data[0].getInputPort(port1Data[1]),
port2Data[0].getOutputPort(port2Data[1])
)
return
# END class ContextManager
pass
class GraphDrawerManager(ContextManager):
MOUSEMODE_SELECT = 0
MOUSEMODE_CANVAS_PAN = 1
MOUSEMODE_CANVAS_ZOOM = 2
MOUSEMODE_DRAG_OBJECT = 3
MOUSEMODE_HOTKEYS = {
' ':MOUSEMODE_CANVAS_PAN,
'z':MOUSEMODE_CANVAS_ZOOM
}
def initialize(self, canvas=None):
if canvas is None:
import zgl_graphdrawer.canvas.zgl_canvas as CanvasModule
canvas = CanvasModule.Canvas()
# Selection Service
self.selection = NodeSelection(self)
self.initializeCanvas(canvas)
pass
def updateFrame(self, frame):
self.canvas.updateFrame(frame)
def step(self):
""" called every frame. For animation purposes. """
return
def keyDown(self, key):
mouseHandler = self.canvas.activeMouseHandler()
if not mouseHandler.isActive():
self.canvas.activeMouseMode = GraphDrawerManager.MOUSEMODE_HOTKEYS.get(key, None)
pass
return
def keyUp(self, key):
"""
default to selection
"""
self.canvas.activeMouseMode = GraphDrawerManager.MOUSEMODE_SELECT
return
def mouseMoved(self, x, y):
self.canvas.mouseMoved(x, y)
def mouseButtonDown(self, button, x, y):
result = self.canvas.mouseButtonDown(button, x, y)
if result is not None and result != True:
# selection clicked on an object
self.canvas.mouseHandlers[GraphDrawerManager.MOUSEMODE_DRAG_OBJECT] = \
ObjectDragMoveMouseHandler(self.selection.selection, self.canvas, self)
# need to cache the previous mouse mode
self.prevMouseMode = self.canvas.activeMouseMode
self.canvas.activeMouseMode = GraphDrawerManager.MOUSEMODE_DRAG_OBJECT
self.canvas.mouseButtonDown(button, x, y)
return
def mouseDragged(self, x, y):
self.canvas.mouseDragged(x, y)
def mouseButtonUp(self, button, x, y):
self.canvas.mouseButtonUp(button, x, y)
# if the mousedown had changed the mouse mode
# we restore it here
if self.prevMouseMode is not None:
self.canvas.activeMouseMode = self.prevMouseMode
self.prevMouseMode = None
return
def setDataContext(self, nodes, edges):
"""
This is only used by the zglCanvas side
"""
self.nodes = {}
self.edges = {}
for nodeData in nodes:
uiNode = NodeModule.Node(nodeData)
self.nodes[nodeData] = uiNode
for edgeData in edges:
uiEdge = EdgeModule.Edge(edgeData)
if edgeData.inputNode() is not None:
uiEdge.inputNode(self.nodes[edgeData.inputNode()])
if edgeData.outputNode() is not None:
uiEdge.outputNode(self.nodes[edgeData.outputNode()])
self.edges[edgeData] = uiEdge
pass
self.canvas.resetDrawables()
map(self.canvas.addDrawable, self.nodes.itervalues())
map(self.canvas.addDrawable, self.edges.itervalues())
logging.debug("GraphDrawer setup %d nodes and %d edges" %
(len(self.nodes), len(self.edges)))
return
# END class GraphDrawerManager
pass
class NodeSelection:
def __init__(self, delegate):
self.selection = []
self.delegate = delegate
def count(self):
return len(self.selection)
def setSelection(self, newSelection):
oldSelection = self.selection
self.selection = newSelection
self.delegate.selectionChanged(oldSelection, newSelection)
return | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/GraphDrawerManager.py | GraphDrawerManager.py |
import logging
import os
import re
import subprocess
import tempfile
import numpy
class LayoutPolicy(object):
def __init__(self, contextManager):
self._manager = contextManager
return
def layoutNodes(self, nodes, edges):
raise NotImplementedError('need to implement %s.layoutNodes' % self.__class)
# END class LayoutPolicy
pass
class GraphVizLayoutPolicy(LayoutPolicy):
def temporaryFilePath(self, value=None):
if value is not None:
self._tmpFile = None
if not hasattr(self, '_tmpFile'):
self._tmpFile = os.sep.join(
[tempfile.gettempdir(), 'temp_dotFile.dot'])
return self._tmpFile
def generateDotFile(self, path, nodes, edges):
dotFile = "digraph G {\n"
#for node in nodes:
# dotFile += "\t\"%s\"\n" % node.nodeData.name()
for edge in edges:
dotFile += "\t\"%s\" -> \"%s\"\n" % (edge.inputNode().nodeData.name(), edge.outputNode().nodeData.name())
dotFile += "}"
logging.debug(dotFile)
fp = open(path, 'w')
fp.write(dotFile)
fp.close()
return
def layoutNodes(self, nodes, edges):
tmpFile = self.temporaryFilePath()
self.generateDotFile(tmpFile, nodes, edges)
# use the subprocess module to execute
layoutFile = '/tmp/pomset.layout'
command = [self._manager.commandPath('dot'),
tmpFile, '-o%s' % layoutFile]
try:
ret = subprocess.call(command)
except OSError:
pass
boundingbox = ""
with open(layoutFile, 'r') as f:
for line in f.readlines():
logging.debug(line)
match = re.search('^\s*graph \[(.*)\];$', line)
if match is not None:
attributeString = match.group(1)
logging.debug("Graph has attributes: %s" % match.group(1))
attributes = self.getAttributesDict(attributeString)
if attributes.has_key('bb'):
bb = map(float, attributes['bb'].split(','))
continue;
foundNode = False
for node in nodes:
nodeName = "%s" % (node.nodeData.name())
match = re.search('^\s*"{0,1}'+nodeName+'"{0,1}\s*\[(.*)\];$', line)
if match is not None:
attributeString = match.group(1)
attributes = self.getAttributesDict(attributeString)
if attributes.has_key('pos'):
position = 1.5 * numpy.matrix(
' '.join(attributes['pos'].split(',')))
logging.debug('position >> %s' % position)
node.setPosition(position[0,0], position[0,1])
pass
if attributes.has_key('width'):
(node.width) = float(attributes['width']) * 80
if attributes.has_key('height'):
(node.height) = float(attributes['height']) * 80
foundNode = True;
if foundNode:
break
return (bb[2],bb[3])
def getAttributesDict(self, attributeString):
dict = {}
matches = re.findall('(\w+)="(.*?)"',attributeString)
if matches is not None:
for match in matches:
attributeName = match[0]
attributeValue = match[1]
dict[attributeName] = attributeValue
return dict | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/LayoutPolicy.py | LayoutPolicy.py |
import logging
from Responder import *
import zgl.zglPrimitives as PrimitivesModule
# An Edge defines a connection between
#
class Edge(Responder):
def __init__(self, edgeData):
Responder.__init__(self)
self.edgeData = edgeData
self._inputNode = None
self._outputNode = None
return
def sourceNode(self, *args, **kwds):
return self.inputNode(*args, **kwds)
def targetNode(self, *args, **kwds):
return self.outputNode(*args, **kwds)
def outputNode(self, value=None):
if value is not None:
self._outputNode = value
return self._outputNode
def inputNode(self, value=None):
if value is not None:
self._inputNode = value
return self._inputNode
def inputPort(self):
if self.inputNode() is None:
raise AttributeError('no attribute "inputNode" found')
return self.inputNode().getOutputPort(self.edgeData.inputPort)
def outputPort(self):
if self.outputNode() is None:
raise AttributeError('no attribute "outputNode" found')
return self.outputNode().getInputPort(self.edgeData.outputPort)
def setupPrimitives(self, edgePolicy):
self.children = []
try:
inputPort = self.inputPort()
outputPort = self.outputPort()
point1 = (self.inputNode().x + inputPort.x + inputPort.width/2,
self.inputNode().y + inputPort.y + inputPort.height/2)
point2 = (self.outputNode().x + outputPort.x + outputPort.width/2,
self.outputNode().y + outputPort.y + outputPort.height/2)
except AttributeError, e:
logging.error(e)
return
line = PrimitivesModule.zglLine()
points = edgePolicy.createPath(point1, point2, 30)
line.points= points
self.points = points
self.children.append(line)
edgePolicy.updateEdge(self)
self.setupColors(edgePolicy)
return
def setupColors(self, edgePolicy):
line.colour = edgePolicy._lineColour
return
def draw(self):
for child in self.children:
child.draw()
# Override hit detection
# Do not allow edges to be selected for now
def isInside(self, x, y):
return False
def intersectsRect(self, rect):
return False
# END class Edge
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/Edge.py | Edge.py |
import logging
import math, sys, os, time
import OpenGL
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import zgl.zglUtils as zglUtilsModule
import zgl.zglApplication as ZglApplicationModule
from zgl.zglPrimitives import zglMouse
from zgl_graphdrawer.EdgePolicy import *
from zgl_graphdrawer.GraphDrawerManager import GraphDrawerManager
from zgl_graphdrawer.LayoutPolicy import *
from zgl_graphdrawer.NodePolicy import *
from zgl_graphdrawer.PortPolicy import *
from zgl_graphdrawer.VisualPolicy import *
DEFAULT_WIDTH = 800
DEFAULT_HEIGHT = 600
class Application(object):
def __init__(self):
self._resourceValues = {}
return
def setResourceValue(self, key, value):
self._resourceValues[key] = value
return
def getResourceValue(self, key, default=None):
if not key in self._resourceValues:
return default
return self._resourceValues[key]
def setDefaultResourcePath(self):
cwd = os.getcwd()
resourcePath = os.path.sep.join(
os.getenv('ZGL_HOME').split('/')+['resources'])
self.setResourcePath(resourcePath)
return
def setResourcePath(self, resourcePath):
if not os.path.exists(resourcePath):
raise IOError('resource path %s does not exist' % resourcePath)
self._resourcePath = resourcePath
return
def contextManager(self, value=None):
if value is not None:
self.graphDrawerManager = value
return self.graphDrawerManager
def initializePolicies(self):
contextManager = self.contextManager()
visualPolicy = self.initializeVisualPolicy(contextManager)
nodePolicy = self.initializeNodePolicy(contextManager)
edgePolicy = self.initializeEdgePolicy(contextManager)
layoutPolicy = self.initializeLayoutPolicy(contextManager)
portPolicy = self.initializePortPolicy(contextManager)
nodePolicy.portPolicy(portPolicy)
return
def initializeEdgePolicy(self, contextManager):
canvas = contextManager.canvas
# create and set the edge policy
key = 'edge policy'
classObject = self.getResourceValue(
"%s class" % key, default=SimpleVerticalEdgePolicy)
edgePolicy = classObject()
canvas.edgePolicy(edgePolicy)
visualPolicy = canvas.visualPolicy()
edgePolicy.visualPolicy(visualPolicy)
# set the colors for the node policy
# we can instead set it on the visual policy first
for key, value in [
(EdgePolicy.KEY_DYNAMIC, [0.85, 0.85, 0.85, 1.0]),
(EdgePolicy.KEY_SELECTION_TRUE, [0.85, 0.85, 0.0, 1.0]),
(EdgePolicy.KEY_SELECTION_FALSE, [0.7, 0.7, 0.0, 1.0])]:
visualPolicy.colour(key, value)
pass
return edgePolicy
def initializeLayoutPolicy(self, contextManager):
canvas = contextManager.canvas
# create and set the layout policy
key = 'layout policy'
classObject = self.getResourceValue(
"%s class" % key, default=GraphVizLayoutPolicy)
layoutPolicy = classObject(contextManager)
canvas.layoutPolicy(layoutPolicy)
return layoutPolicy
def initializeNodePolicy(self, contextManager):
canvas = contextManager.canvas
# create the node policy for this application
key = 'node policy'
classObject = self.getResourceValue(
"%s class" % key, default=SimpleNodePolicy)
nodePolicy = classObject(contextManager)
# set the visual policy
visualPolicy = canvas.visualPolicy()
nodePolicy.visualPolicy(visualPolicy)
# set the colors for the node policy
# we can instead set it on the visual policy first
nodePolicy.initializeColours()
for key, value in [
(NodePolicy.KEY_BORDER_SELECTION_TRUE, [1.0, 1.0, 1.0, 1.0]),
(NodePolicy.KEY_BORDER_SELECTION_FALSE, [0.6, 0.6, 0.6, 1.0]),
(NodePolicy.KEY_BACKGROUND_SELECTION_TRUE, [0.4, 0.4, 0.6, 1.0]),
(NodePolicy.KEY_BACKGROUND_SELECTION_FALSE, [0.3, 0.3, 0.5, 0.8]),
(NodePolicy.KEY_TEXT_SELECTION_TRUE, [1.0, 1.0, 1.0, 1.0]),
(NodePolicy.KEY_TEXT_SELECTION_FALSE, [0.8, 0.8, 0.8, 1.0])]:
nodePolicy.colour(key, value)
pass
# set the node policy
canvas.nodePolicy(nodePolicy)
return nodePolicy
def initializeVisualPolicy(self, contextManager):
# create the visual policy
key = 'visual policy'
classObject = self.getResourceValue(
"%s class" % key, default=VisualPolicy)
visualPolicy = classObject(contextManager)
# seupt the default font
pathForDefaultFont = visualPolicy.getPathForDefaultFont()
font = TextModule.zglFontManager.getFont(pathForDefaultFont, 16)
visualPolicy.font(VisualPolicy.KEY_FONT_DEFAULT, font)
canvas = contextManager.canvas
# set the visual policy
canvas.visualPolicy(visualPolicy)
return visualPolicy
def initializePortPolicy(self, contextManager):
key = 'port policy'
classObject = self.getResourceValue(
"%s class" % key, default=PortPolicy)
portPolicy = classObject(contextManager)
canvas = contextManager.canvas
canvas.portPolicy(portPolicy)
return portPolicy
# END class Application
pass
class zglApplication(Application, ZglApplicationModule.zglApplication):
mouse_drag = False
last_mouse_x = 0
last_mouse_y = 0
def __init__(self, name=None,
width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT):
if name is None:
name = 'Application'
ZglApplicationModule.zglApplication.__init__(
self, width=width, height=height, app_name=name)
Application.__init__(self)
glClearColor(0.1, 0.1, 0.2, 0.5)
return
def initGL(self):
ZglApplicationModule.zglApplication.initGL(self)
# Enable blending
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # Really Nice Perspective Calculations
glShadeModel(GL_SMOOTH) # Enables Smooth Color Shading
glClearDepth(1.0) # Enables Clearing Of The Depth Buffer
glEnable(GL_DEPTH_TEST) # Enables Depth Testing
glDepthFunc(GL_LEQUAL) # The Type Of Depth Test To Do
glDisable(GL_LIGHTING)
glColor3f(1.0, 1.0, 1.0)
self.updateProjection()
return
def updateProjection(self):
""" Called by reshapeFunc when window size has changed """
self.updateProjection2DOrtho()
def reshapeFunc(self, width, height):
ZglApplicationModule.zglApplication.reshapeFunc(self, width, height)
self.contextManager().updateFrame((0.0, 0.0, self.width, self.height))
def displayFunc(self):
glDrawBuffer(GL_BACK)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDisable(GL_LIGHTING)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
"""main drawing stuff goes here"""
glPushMatrix()
self.contextManager().step()
self.contextManager().draw()
glPopMatrix()
x = 30
y = 30
glColor3f(1.0, 1.0, 1.0)
self.drawFPS()
glutSwapBuffers()
return
def drawFPS(self):
if hasattr(self, "last_t"):
# self.fps = 1.0/float(time.time()-self.last_t)
self.fps = float(time.time()-self.last_t)
glColor3f(1.0, 1.0, 1.0)
zglUtilsModule.zglUtils.drawBitmapText(
"FPS: %f" % (self.fps),
# x, y
30, self.height-30,
font=GLUT_BITMAP_HELVETICA_10)
self.last_t = time.time()
return
def keyboardFunc(self, key, x, y):
# If escape is pressed, kill everything.
if key == '\033':
logging.debug("Quitting!")
sys.exit()
else:
self.contextManager().keyDown(key)
def keyboardUpFunc(self, key, x, y):
self.contextManager().keyUp(key)
def specialFunc(self, *args):
key = args[0]
if key == GLUT_KEY_UP:
logging.debug("Up! GLUT_KEY_UP=" + str(GLUT_KEY_UP))
elif key == GLUT_KEY_DOWN:
logging.debug("Down! GLUT_KEY_DOWN=" + str(GLUT_KEY_DOWN))
elif key == GLUT_KEY_LEFT:
logging.debug("Left! GLUT_KEY_LEFT=" + str(GLUT_KEY_LEFT))
elif key == GLUT_KEY_RIGHT:
logging.debug("Right! GLUT_KEY_RIGHT=" + str(GLUT_KEY_RIGHT))
def mousePassiveFunc(self, x, y):
zglMouse.setPosition(x, y)
self.last_mouse_x = x
self.last_mouse_y = y
self.contextManager().mouseMoved(x, self.height-y)
def mouseFunc(self, button, state, x, y):
if state == GLUT_DOWN:
self.contextManager().mouseButtonDown(button, x, self.height-y)
elif state == GLUT_UP:
self.contextManager().mouseButtonUp(button, x, self.height-y)
def mouseMotionFunc(self, x, y):
self.contextManager().mouseDragged(x, self.height-y)
return
# END class Application
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/application/__init__.py | __init__.py |
import logging
import sys
import time
from zgl_graphdrawer.Edge import *
from zgl_graphdrawer.Node import *
from zgl_graphdrawer.MouseHandlers import *
import zgl_graphdrawer.Event as EventModule
import zgl_graphdrawer.canvas as CanvasModule
import zgl_graphdrawer.GraphDrawerManager as ContextManagerModule
from zgl.zglUtils import zglUtils
import OpenGL
from OpenGL.GL import *
import numpy
class Canvas(CanvasModule.Canvas):
def __init__(self):
CanvasModule.Canvas.__init__(self)
self.frame = (0.0, 0.0, 0.0, 0.0) # x_min, y_min, x_max, y_max of the layout
self.scroll_bounds = (0.0, 0.0)
self.resetDrawables()
# self.currentMouseHandler = None
self.mouseHandlers = {}
self.defaultMouseHandler = None
self.activeMouseMode = None
self.is_dragging = False
self.is_zooming = False
return
def activeMouseHandler(self):
return self.mouseHandlers.get(
self.activeMouseMode,
self.defaultMouseHandler)
def draw(self):
CanvasModule.Canvas.draw(self)
return
def updateFrame(self, frame):
self.frame = frame
self.validateScrollPosition()
def children(self):
for node in self.nodes:
yield node
for edge in self.edges:
yield edge
raise StopIteration
def mouseMoved(self, x, y):
self.activeMouseHandler().mouseMoved(x, y)
def mouseButtonDown(self, button, x, y):
# Check if any of the child has buttons that
# consumed this event
for child in self.children():
if child.mouseButtonDown(button, x, y):
return True
result = self.activeMouseHandler().mouseButtonDown(button, x, y)
if result is not None and result != False:
return result
return
def mouseDragged(self, x, y):
self.activeMouseHandler().mouseDragged(x, y)
return
def mouseButtonUp(self, button, x, y):
for child in self.children():
if child.mouseButtonUp(button, x, y):
return True
self.activeMouseHandler().mouseButtonUp(button, x, y)
return
def initializeEventHandlers(self, contextManager=None):
self.activeMouseMode = ContextManagerModule.GraphDrawerManager.MOUSEMODE_SELECT
# Mouse Handlers
scrollMouseHandler = CanvasScrollMouseHandler(self)
zoomMouseHandler = CanvasZoomMouseHandler(self)
selectionMouseHandler = CanvasSelectionMouseHandler(
self, contextManager.selection)
mouseHandlers = {
ContextManagerModule.GraphDrawerManager.MOUSEMODE_SELECT:selectionMouseHandler,
ContextManagerModule.GraphDrawerManager.MOUSEMODE_CANVAS_PAN:scrollMouseHandler,
ContextManagerModule.GraphDrawerManager.MOUSEMODE_CANVAS_ZOOM:zoomMouseHandler
}
# Default Mouse Handler
self.defaultMouseHandler = selectionMouseHandler
self.mouseHandlers = mouseHandlers
return
def getObjectAtViewCoordinates(self, x, y):
(canvas_x, canvas_y) = self.getCanvasCoordinatesFromViewCoordinates(x, y)
for child in self.children():
if child.isInside(x, y):
return child
return None
def getViewCoordinatesFromCanvasCoordinates(self, x, y):
view_x = x/self.zoom - self.scroll_position[0];
view_y = y/self.zoom - self.scroll_position[1];
return (view_x, view_y)
def getCanvasCoordinatesFromViewCoordinates(self, x, y):
canvas_x = int( (self.scroll_position[0] + x)*self.zoom )
canvas_y = int( (self.scroll_position[1] + y)*self.zoom )
return (canvas_x, canvas_y)
def validateScrollPosition(self):
if self.scroll_position[0] + self.frame[0] > self.scroll_bounds[0]:
self.scroll_position[0] = self.scroll_bounds[0] - self.frame[0]
elif self.scroll_position[1] > self.scroll_bounds[1]:
pass
'''
def displayFunc(self):
glDrawBuffer(GL_BACK)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDisable(GL_LIGHTING)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
"""main drawing stuff goes here"""
glPushMatrix()
self.graphDrawerManager.step()
self.graphDrawerManager.draw()
glPopMatrix()
x = 30
y = 30
glColor3f(1.0, 1.0, 1.0)
zglUtils.zglUtils.drawBitmapText("Hello World", x, y, )
self.drawFPS()
glutSwapBuffers()
def drawFPS(self):
if hasattr(self, "last_t"):
self.fps = 1.0/float(time.time()-self.last_t)
glColor3f(1.0, 1.0, 1.0)
zglUtils.zglUtils.drawBitmapText("FPS: %f" % (self.fps), 30, self.height-30, font=GLUT_BITMAP_HELVETICA_10)
self.last_t = time.time()
'''
# END class zglCanvas
pass
class CanvasLayer(object):
def __init__(self, name):
self.name = name
self._children = []
return
def addChild(self, child):
self.getChildren().append(child)
return
def removeChild(self, child):
while child in self.getChildren():
self.getChildren().remove(child)
return
def getChildren(self):
return self._children
# END class CanvasLayer
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/canvas/zgl_canvas.py | zgl_canvas.py |
import logging
import sys
import time
from zgl_graphdrawer.Edge import *
from zgl_graphdrawer.Node import *
from zgl_graphdrawer.MouseHandlers import *
import zgl_graphdrawer.Event as EventModule
from zgl.zglUtils import zglUtils
import OpenGL
from OpenGL.GL import *
import numpy
class Canvas(object):
def __init__(self):
self.scroll_position = [0.0, 0.0]
# set up the zoom and pan matrices
self.zoomMatrix = numpy.matrix('1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 1')
self.panMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 0')
self.selection_rectangle = None
self.resetDrawables()
return
def contextManager(self, value=None):
if value is not None:
self._contextManager = value
return self._contextManager
def nodePolicy(self, value=None):
if value is not None:
self._nodePolicy = value
return self._nodePolicy
def edgePolicy(self, value=None):
if value is not None:
self._edgePolicy = value
return self._edgePolicy
def layoutPolicy(self, value=None):
if value is not None:
self._layoutPolicy = value
return self._layoutPolicy
def visualPolicy(self, value=None):
if value is not None:
self._visualPolicy = value
return self._visualPolicy
def portPolicy(self, value=None):
if value is not None:
self._portPolicy = value
return self._portPolicy
def resetDrawables(self):
self._dynamicEdge = None
self.nodes = []
self.edges = []
self.selectables = set([])
self.clickables = set([])
self.layers = {}
for layerName in ['edges', 'nodes']:
layer = CanvasLayer(layerName)
self.layers[layerName] = layer
pass
return
def addDrawable(self, drawable):
if isinstance(drawable, Node):
self.nodes.append(drawable)
self.layers['nodes'].addChild(drawable)
ports = drawable.inputPorts.values() + drawable.outputPorts.values()
for port in ports:
self.determineSelectable(port)
self.determineClickable(port)
if isinstance(drawable, Edge):
self.edges.append(drawable)
self.layers['edges'].addChild(drawable)
self.determineSelectable(drawable)
self.determineClickable(drawable)
return
def determineSelectable(self, object):
if object.isSelectable():
self.selectables.add(object)
return
def determineClickable(self, object):
if object.isClickable():
self.clickables.add(object)
return
def removeDrawable(self, drawable):
return self.removeChild(drawable)
def removeChild(self, drawable):
# remove from layers
for layer in self.layers.values():
layer.removeChild(drawable)
if isinstance(drawable, Node):
ports = drawable.inputPorts.values() + drawable.outputPorts.values()
for port in ports:
self.selectables.discard(port)
self.clickables.discard(port)
self.selectables.discard(drawable)
self.clickables.discard(drawable)
return
def getSelectables(self):
return self.selectables
def getClickables(self):
return self.clickables
def computeLayout(self):
if not len(self.nodes):
# no need to compute layout if self has no nodes
return
(x_max, y_max) = self.layoutPolicy().layoutNodes(
self.nodes, self.edges)
self.scroll_bounds = (x_max, y_max)
self.updateLayout()
return
def updateLayout(self):
map(lambda x: x.setupPrimitives(self.nodePolicy()), self.nodes)
map(lambda x: x.setupPrimitives(self.edgePolicy()), self.edges)
return
def setScrollPosition(self, new_scroll_position):
self.scroll_position = new_scroll_position
self.validateScrollPosition()
def validateScrollPosition(self):
# this is really only used by zglCanvas
# need to refactor to reflect that
pass
def computeDynamicEdge(self, initialPosition, finalPosition):
line = PrimitivesModule.zglLine()
edgePolicy = self.edgePolicy()
initialCanvasMatrix = numpy.matrix('1 0 0 0; 0 1 0 0; 0 0 1 0; %s %s 0 1' % (initialPosition.x, initialPosition.y))
finalCanvasMatrix = numpy.matrix('1 0 0 0; 0 1 0 0; 0 0 1 0; %s %s 0 1' % (finalPosition.x, finalPosition.y))
initialWorldMatrix = self.getWorldCoordinatesFromCanvasCoordinates(initialCanvasMatrix)
finalWorldMatrix = self.getWorldCoordinatesFromCanvasCoordinates(finalCanvasMatrix)
point1 = (initialWorldMatrix[3,0], initialWorldMatrix[3,1])
point2 = (finalWorldMatrix[3,0], finalWorldMatrix[3,1])
line.points = edgePolicy.createPath(point1, point2)
line.position = (0.0, 0.0, 0.01)
line.colour = [0.7, 0.7, 1.0, 0.3]
edgeClass = self.contextManager().app().getResourceValue(
'gui edge class', default=Edge)
self._dynamicEdge = edgeClass()
self._dynamicEdge.children = [line]
self._dynamicEdge.setupColors(self.edgePolicy())
self.points = line.points
pass
def drawLayers(self):
# Draw nodes
for layerName, layer in self.layers.iteritems():
for child in layer.getChildren():
child.draw()
return
def drawDynamicEdge(self):
if self._dynamicEdge is not None:
self._dynamicEdge.draw()
return
def draw(self):
glPushMatrix()
glTranslate(self.panMatrix[3,0], self.panMatrix[3,1], self.panMatrix[3,2])
glScale(1.0/self.zoomMatrix[0,0], 1.0/self.zoomMatrix[1,1], 1.0/self.zoomMatrix[2,2])
self.drawLayers()
self.drawDynamicEdge()
# Draw selection rectangle
if self.selection_rectangle is not None:
zglUtils.drawRect(
self.selection_rectangle[0],
self.selection_rectangle[1],
self.selection_rectangle[2],
self.selection_rectangle[3], (0.8, 0.5, 0.2, 0.2)
)
glPopMatrix()
return
def getObjectsIntersectingRect(self, rect):
objects = []
# recalculate the rect so that its in the form
# where width and height are both positive
baseX = rect[0]
baseY = rect[1]
diffX = baseX + rect[2]
diffY = baseY + rect[3]
minX = min(baseX, diffX)
minY = min(baseY, diffY)
maxX = max(baseX, diffX)
maxY = max(baseY, diffY)
rect = (minX, minY, maxX-minX, maxY-minY)
for child in self.nodes + self.edges:
if child.intersectsRect(rect):
objects.append(child)
return objects
# END class Canvas
pass
class CanvasLayer(object):
def __init__(self, name):
self.name = name
self._children = []
return
def addChild(self, child):
self.getChildren().append(child)
return
def removeChild(self, child):
while child in self.getChildren():
self.getChildren().remove(child)
return
def getChildren(self):
return self._children
# END class CanvasLayer
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/canvas/__init__.py | __init__.py |
import logging
import sys
import time
from zgl_graphdrawer.Edge import *
from zgl_graphdrawer.Node import *
from zgl_graphdrawer.MouseHandlers import *
import zgl_graphdrawer.canvas as CanvasModule
import zgl_graphdrawer.Event as EventModule
import zgl_graphdrawer.GraphDrawerManager as ContextManagerModule
from zgl.zglUtils import zglUtils
import OpenGL
from OpenGL.GL import *
import numpy
class Point(object):
def __init__(self, x, y):
self.x = x
self.y = y
return
class Canvas(CanvasModule.Canvas):
def __init__(self, parent, width, height, *args, **kwds):
CanvasModule.Canvas.__init__(self)
self.width = width
self.height = height
self.initGL()
self.initLighting()
self.initViewport()
self._selected = []
return
def resetDrawables(self):
CanvasModule.Canvas.resetDrawables(self)
self.resetDataToGuiObjectMapping()
return
def resetDataToGuiObjectMapping(self):
self._dataToGuiObjectMapping = {}
return
def addDataToGuiObjectMapping(self, dataObject, guiObject):
self._dataToGuiObjectMapping[dataObject] = guiObject
return
def getGuiObjectForDataObject(self, dataObject):
return self._dataToGuiObjectMapping[dataObject]
def initializeEventHandlers(self, contextManager=None):
self.contextManager(contextManager)
for key, default in [
('mouse event handler', EventModule.MouseEventHandler),
('key event handler', EventModule.KeyEventHandler),
('canvas event handler', EventModule.CanvasEventHandler),
]:
classObject = contextManager.app().getResourceValue(
"%s class" % key, default=default)
eventHandler = classObject(self)
eventHandler.bindEvents()
self.PushEventHandler(eventHandler)
# TODO:
# remove this once the event handler mechanism works
contextManager.app().setResourceValue(
key, eventHandler)
pass
return
def getHeight(self):
return self.height
def getWidth(self):
return self.width
def resize(self, width, height):
self.width = width
self.height = height
self.setViewport(width, height)
return
def initViewport(self):
return self._initViewport(self.getWidth(), self.getHeight())
def _initViewport(self, width, height):
self.setViewport(width, height)
self.panMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 0')
return
def setViewport(self, width, height):
# Reset The Current Viewport And Perspective Transformation
glViewport(0, 0, width, height)
self._updateProjection(width, height)
return
def initLighting(self):
return self._initLighting()
def _initLighting(self):
return
def initGL(self):
return self._initGL()
def _initGL(self):
glClearDepth (1.0)
glEnable (GL_DEPTH_TEST)
# glClearColor (0.0, 0.0, 0.0, 0.0)
glClearColor (0.6, 0.6, 0.6, 1.0)
glShadeModel (GL_SMOOTH)
glMatrixMode (GL_PROJECTION)
# Enable blending
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
# Really Nice Perspective Calculations
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST)
# Enables Smooth Color Shading
glShadeModel(GL_SMOOTH)
# Enables Clearing Of The Depth Buffer
glClearDepth(1.0)
# Enables Depth Testing
glEnable(GL_DEPTH_TEST)
# The Type Of Depth Test To Do
glDepthFunc(GL_LEQUAL)
glDisable(GL_LIGHTING)
glColor3f(1.0, 1.0, 1.0)
self.updateProjection()
return
def Render(self, dc):
self.SetCurrent()
self.displayFunc()
self.SwapBuffers()
return
def displayFunc(self):
return self._displayFunc()
def _displayFunc(self):
glDrawBuffer(GL_BACK)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glDisable(GL_LIGHTING)
glColor3f(1.0, 1.0, 1.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
"""main drawing stuff goes here"""
self.draw()
self.drawMetaInfo()
return
def drawMetaInfo(self):
x = 30
y = self.getHeight() - 30
self.drawFPS(x, y)
return
def updateProjection(self):
return self._updateProjection(self.getWidth(), self.getHeight())
def _updateProjection(self, width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluOrtho2D(0.0, width, 0.0, height)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
return
def drawFPS(self, x, y):
if hasattr(self, "last_t"):
self.fps = 1.0/float(time.time()-self.last_t)
glColor3f(1.0, 1.0, 1.0)
zglUtils.drawBitmapText(
"FPS: %f" % (self.fps),
x, y,
font=GLUT_BITMAP_HELVETICA_10)
self.last_t = time.time()
return
def getClickableObjectsAtCanvasCoordinates(self, x, y):
canvasMatrix = numpy.matrix('0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' % (x, y))
worldMatrix = self.getWorldCoordinatesFromCanvasCoordinates(canvasMatrix)
world_x = worldMatrix[3,0]
world_y = worldMatrix[3,1]
point = Point(world_x, world_y)
for clickable in self.getClickables():
if clickable.overlaps(point):
yield clickable
raise StopIteration
def getSelectableObjectAtCanvasCoordinates(self, x, y):
"""
this should instead be a composite filter
# object is selectable
# object contains point
"""
canvasMatrix = numpy.matrix(
'0 0 0 0; 0 0 0 0; 0 0 0 0; %s %s 0 0' % (x, y))
worldMatrix = self.getWorldCoordinatesFromCanvasCoordinates(canvasMatrix)
world_x = worldMatrix[3,0]
world_y = worldMatrix[3,1]
point = Point(world_x, world_y)
for selectable in self.getSelectables():
if selectable.overlaps(point):
return selectable
return None
def getViewportCoordinatesFromCanvasCoordinates(self, canvasMatrix):
rotationMatrix = numpy.matrix(
'1 0 0 0; 0 -1 0 0; 0 0 -1 0; 0 0 0 1')
translationMatrix = numpy.matrix(
'0 0 0 0; 0 0 0 0; 0 0 0 0; 0 %s 0 0' % self.getHeight())
viewportMatrix = (canvasMatrix*rotationMatrix) +translationMatrix
return viewportMatrix
def getWorldCoordinatesFromViewportCoordinates(self, viewportMatrix):
worldMatrix = self.zoomMatrix * (viewportMatrix - self.panMatrix)
return worldMatrix
def getWorldCoordinatesFromCanvasCoordinates(self, canvasMatrix):
viewportMatrix = self.getViewportCoordinatesFromCanvasCoordinates(canvasMatrix)
worldMatrix = self.getWorldCoordinatesFromViewportCoordinates(viewportMatrix)
return worldMatrix
def setSelection(self, selected):
oldSelection = self._selected
self._selected = selected
self.contextManager().selectionChanged(oldSelection, self._selected)
return
def addToSelection(self, selected):
self._selected.extend(selected)
self.contextManager().selectionChanged([], self._selected)
pass
def getSelection(self):
return self._selected
def getResourceKeyForNode(self, dataNode):
return 'gui node class'
def addNode(self, dataNode, worldMatrix=None):
resourceKey = self.getResourceKeyForNode(dataNode)
nodeClass = self.contextManager().app().getResourceValue(
resourceKey, default=Node)
uiNode = nodeClass(dataNode)
# need to do this so that the node
# will have access to app contextual information
uiNode.contextManager(self.contextManager())
uiNode.setupPrimitives(self.nodePolicy())
self.addDrawable(uiNode)
if worldMatrix is None:
worldMatrix = numpy.matrix('1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 1')
uiNode.setPosition(worldMatrix[3,0], worldMatrix[3,1])
dataObjectFunction = self.addDataToGuiObjectMapping
dataObjectFunction(dataNode, uiNode)
ports = uiNode.inputPorts.values()+uiNode.outputPorts.values()
for port in ports:
portName = port.name
# parameters have __eq__ defined
# such that they are equivalent if their ids are equivalent
# and since the ids are not unique (and are contextualized)
# we need to include the context, ie the data node
dataObjectFunction((dataNode, portName), port)
pass
return uiNode
def addEdge(self, dataEdge):
# find the gui objects
# for the outputNode, outputPort, inputPort, inputNode
dataPath = dataEdge.path()
dataObjectFunction = self.getGuiObjectForDataObject
guiPath = map(
dataObjectFunction,
[
dataPath[0], # source node
(dataPath[0], dataPath[1]), # source port
(dataPath[-1], dataPath[-2]), # target port
dataPath[-1] # target node
]
)
edgeClass = self.contextManager().app().getResourceValue(
'gui edge class', default=Edge)
edge = edgeClass(dataPath, guiPath)
edge.setupPrimitives(self.edgePolicy())
self.addDrawable(edge)
return
# END class Canvas
pass | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/zgl_graphdrawer/canvas/gl_canvas.py | gl_canvas.py |
import logging
import math, sys, os, time
import zgl_graphdrawer.Application as ApplicationModule
def createMockData():
mockNodes = []
for i in range(8):
nodeData = MockNodeData()
nodeData.name("Job %d" % (i))
if i < 4:
nodeData.inputPorts = ['input1', 'input2', 'input3']
else:
nodeData.inputPorts = ['input1']
nodeData.outputPorts = ['result']
mockNodes.append(nodeData)
mockEdges = []
mockEdge = MockEdgeData()
mockEdge.inputNode = lambda: mockNodes[0]
mockEdge.inputPort = 'result'
mockEdge.outputNode = lambda: mockNodes[1]
mockEdge.outputPort = 'input1'
mockEdges.append(mockEdge)
mockEdge = MockEdgeData()
mockEdge.inputNode = lambda: mockNodes[0]
mockEdge.inputPort = 'result'
mockEdge.outputNode = lambda: mockNodes[2]
mockEdge.outputPort = 'input2'
mockEdges.append(mockEdge)
mockEdge = MockEdgeData()
mockEdge.inputNode = lambda: mockNodes[0]
mockEdge.inputPort = 'result'
mockEdge.outputNode = lambda: mockNodes[3]
mockEdge.outputPort = 'input3'
mockEdges.append(mockEdge)
mockEdge = MockEdgeData()
mockEdge.inputNode = lambda: mockNodes[0]
mockEdge.inputPort = 'result'
mockEdge.outputNode = lambda: mockNodes[4]
mockEdge.outputPort = 'input1'
mockEdges.append(mockEdge)
for source, target in zip(mockNodes[4:-1], mockNodes[5:]):
mockEdge = MockEdgeData()
mockEdge.inputNode = lambda: source
mockEdge.inputPort = 'result'
mockEdge.outputNode = lambda: target
mockEdge.outputPort = 'input1'
mockEdges.append(mockEdge)
return (mockNodes, mockEdges)
# Mock Data Model for node
class MockNodeData(object):
def __init__(self):
self.name("Node")
def name(self, value=None):
if value is not None:
self._name = value
return self._name
# Mock Data Model for edge
class MockEdgeData(object):
def __init__(self):
self.inputNode = None
self.outputNode = None
def name(self, value=None):
if value is not None:
self._name = value
if not hasattr(self, '_name'):
return "%s.%s->%s.%s" % (self.inputNode().name(),
self.inputPort,
self.outputNode().name(),
self.outputPort)
return self._name | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/test/MockData.py | MockData.py |
import os
import sys
import unittest
APP_ROOT = os.getenv('ZGL_HOME')
sys.path.insert(0, '%s/src' % APP_ROOT)
import wx
import zgl_graphdrawer.Application as ApplicationModule
import zgl_graphdrawer.canvas.wx_canvas as CanvasModule
import zgl_graphdrawer.Frame as FrameModule
import zgl_graphdrawer.GraphDrawerManager as ContextManagerModule
import zgl_graphdrawer.utils as UtilsModule
import MockData as DataModule
class TestCase(unittest.TestCase):
def setUp(self):
print "setUp"
pass
def tearDown(self):
print "tearDown"
pass
def _testApplication1(self):
app = wx.PySimpleApp()
frame = wx.Frame(None, title='Hello World')
frame.Show()
app.MainLoop()
def testApplication2(self):
print "starting"
app = ApplicationModule.wxApplication()
app.setResourcePath(
os.path.sep.join([os.getenv('ZGL_HOME'), 'resources']))
frame = FrameModule.wxFrame(
None, title='foo',
size=wx.Size(ApplicationModule.DEFAULT_WIDTH,
ApplicationModule.DEFAULT_HEIGHT))
frame.app(app)
canvas = CanvasModule.Canvas(frame,
ApplicationModule.DEFAULT_WIDTH,
ApplicationModule.DEFAULT_HEIGHT)
contextManager = ContextManagerModule.GraphDrawerManager()
contextManager.initialize(canvas=canvas)
app.contextManager(contextManager)
contextManager.app(app)
# TODO:
# this needs to be specified elsewhere
contextManager.commandPath('dot', '/sw/bin/dot')
app.initializePolicies()
(mockNodes, mockEdges) = DataModule.createMockData()
contextManager.setDataContext(mockNodes, mockEdges)
canvas.computeLayout()
frame.Show()
app.MainLoop()
print "ending"
pass
# END class TestApplication1
pass
def main():
UtilsModule.configLogging()
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestCase, 'test'))
runner = unittest.TextTestRunner()
runner.run(suite)
return
if __name__=="__main__":
main() | zgl | /zgl-0.1.4.tar.gz/zgl-0.1.4/src/test/TestWxApplication.py | TestWxApplication.py |
# Zgli
All documentation available at [zgli's website](https://zgly-92273.web.app/)
#### 1. Instalation
```pip install zgli```
#### 2. Quick Start
Follow along the [quick start guide](https://zgly-92273.web.app/quickstart.html), and learn the tool using an hands-on approach.
#### 3. Normalized Compression Distance (NCD)
Want to better understand the Normalized compression distance? Read a quick explantation [here](https://zgly-92273.web.app/ncd.html)
#### 4. Dependencies
This tool uses:
* [Numpy](https://numpy.org/)
* [Pandas](https://pandas.pydata.org/)
* [Tqdm](https://tqdm.github.io/)
* [Multiprocessing](https://docs.python.org/3/library/multiprocessing.html) | zgli | /zgli-0.2.0.tar.gz/zgli-0.2.0/README.md | README.md |
## 框架更新了我还没同步过来 大的结构会有变化 三天之内全部修改完 我说的 耶稣也拦不住我(更新完了)
## 新手安装教程请看
https://southcat.net/2526/
## 插件必看
喵帕斯的已经没了 自动补码是喵帕斯的 需要dlercloud的请把updatacode目录下的dlercloud.py文件名修改为updatacode.py覆盖掉源文件,需要其他的自己修改或是可以找我
我有时间就做一个 马上放假了时间多的是
## 第一次开发有问题及时反馈
email:[email protected]
博客:[南猫](https://southcat.net)
基于[zgram](https://github.com/plutobell/zgram)开源项目开发。
## 增加功能
1.邀请码自动发码模块 并且限制领取数量(邀请码添加在invite_code目录下的code.txt 一行一个)
2.邀请码数量统计,如果邀请码用完自动给管理员发消息(需自行修改len_invite.py里面的发送id)
3.补码模块已经开发 指令`/add_code邀请码` 请注意一行一码因为太菜要求比较严格 可能效果不是很理想,请等我再去学两天python再回来改
4.删除用户信息模块,删除后用户可以再次获取邀请码指令`/del用户id`目前只能删除单个用户
5.updatacode模块,目前仅支持从喵帕斯进行获取,会自动抓取邀请码页面前两页的邀请码,并和之前的数据进行对比,然后写入code,后续会支持更多网站
理论上所有和喵帕斯同模板的都可以使用,请在updatacode/updatacode.py 文件夹内填入你的账号密码
6.很遗憾喵关门了 在plguins/updatacode文件夹下更新了dlercloud的自动更新模块,需要使用的话备份原文件,将文件名修改为updatacode.py即可,理论上喵帕斯的补码支持所有同模板的网站只需要更改里面的网站即可,接下来会针对所有有邀请码模块的网站开发补码模块,或是你们也可以从邮箱或是tg发给我网站,我尽量进行适配(放假闲的)
## 开发计划
1.增加管理员添加邀请码模块
2.多类邀请码模块支持
3.代码优化,目前存在大量多余的代码
4.添加白名单,白名单用户支持无限获取
环境要求
Python版本
zgram 只支持 Python3.x,不支持Python2.x。
本项目在 Python 3.5 及以上版本测试通过。
安装
pip install zgram
升级
pip install zgram --upgrade
使用
一行命令启动 (Polling Mode)
zgram -c/--config <config file path> -k/--key <bot key> -r/--root <your user id>
此命令会自动生成在Polling模式下适用的配置文件,但仍需手动配置插件路径。
一、运行模式
zgram 支持以 Webhook 模式和 Polling 模式运行。生产环境推荐使用 Webhook 模式,而 Polling 则仅用于开发环境。
1、Webhook 模式
要以 Webhook 模式运行,请将配置文件字段 webhook 设置为 True ,此模式涉及的配置文件字段如下:
[config]
webhook=True
self_signed=False
cert_key=your private cert path
cert_pub=your public cert path
server_address=your server ip address or domain
server_port=your server port
local_address=webhook local address
local_port=webhook local port
self_signed 用于设置是否使用自签名证书,而 cert_key 和 cert_pub 则是你的证书路径(绝对路径),server_address 为你的服务器公网IP, server_port 为服务器的端口(目前 telegram 官方仅支持 443, 80, 88, 8443),local_address 为Webhook 本地监听地址, local_port 为 Webhook 本地运行的端口。
推荐搭配 nginx 使用,自签名证书生成请参考:Generating a self-signed certificate pair (PEM)
2、Polling 模式
要以 Polling 模式运行,只需要保证配置文件 webhook 字段为 False 即可。此模式最基本的配置文件如下:
[config]
key=bot key
pool_size=40
webhook=False
root_id=your user id
debug=False
plugin_dir=your plugin dir
二、运行
任意路径打开终端,输入以下命令:
对于使用程序配置文件默认路径的:
输入zgram 回车,正常情况下你应该能看见屏幕提示机器人开始运行。
对于命令行手动指定配置文件路径的:
输入zgram -c/--config <configure file path> 回车,正常情况下你应该能看见屏幕提示机器人开始运行。(更多指令请通过 -h/--help 查看)
可配合supervisor使用。
三、配置文件
完整的配置文件如下所示:
[config]
key=bot key
plugin_dir=your plugin dir
pool_size=40 //the thread pool size, default 40, range(1, 101)
webhook=False
self_signed=False //Optional while webhook is False
cert_key=your private cert path //Optional while webhook is False
cert_pub=your public cert path //Optional while webhook is False
server_ip=your server ip address //Optional while webhook is False
server_port=your server port //Optional while webhook is False
local_address=webhook local address //Optional while webhook is False
local_port=webhook local port //Optional while webhook is False
root_id=your user id
debug=False
drop_pending_updates=False
local_api_server=local api server address //[Optional]
在 1.13.0 及以上版本,支持自动生成配置文件。(默认为Polling模式)
1.在命令行未指定配置文件路径的情况下,会在默认配置文件路径下不存在配置文件时自动生成配置文件 config.cfg。
在Linux下,会自动在用户目录下创建文件夹 .zgram ,并生成配置文件 config.cfg
在Windows下,则会在 C:\Users\<username> 目录下创建文件夹 .zgram ,并生成配置文件 config.cfg
2.指定配置文件
Linux 和 Windows 都可在命令行通过参数手动指定配置文件路径,命令格式:
zgram -c/--config <configure file path>
路径必须为绝对路径,此情况下也会在指定路径上不存在配置文件时自动生成配置文件 ,配置文件命名由指定的路径决定。
Tip: 自动生成的配置文件未设置这几个字段值:key、root_id、plugin_dir,key 和 root_id 为必须,但我们仍然可以通过命令行设置他们:
zgram -c/--config <config file path> -k/--key <bot key> -r/--root <your user id>
使用以上命令会以Polling模式运行框架,而无需困扰于处理配置文件。
之后请手动设置 plugin_dir 。
插件开发指南 (以 Hello 插件为例) BETA 0.8
一、插件结构
一个完整的 zgram 插件应当呈现为一个文件夹,即一个Python包,以 Hello 插件为例,最基本的目录结构如下:
Hello/
./__init__.py
./Hello.py
./Hello_screenshot.png
./readme.md
./requirement.txt
二、规则
命名
在构建zgram插件中应当遵守的规则是:每个插件目录下应当存在一个与插件同名的.py 文件,比如插件 Hello 中的 Hello.py 文件,并且此文件中必须存在作为插件入口的同名函数,以插件 Hello 为例:
#file Hello/Hello.py
# -*- coding:utf-8 -*-
def Hello(bot, message):
pass
函数 Hello() 即为插件的入口函数,参数 bot 为Bot接口库实例化对象,参数 message 用于接收消息数据。
资源路径
若要打开某个插件目录下的文件资源,需要使用的路径应当遵循以下的格式:
bot.path_converter(bot.plugin_dir + "<plugin dir name>/<resource address>")
方法 path_converter 根据操作系统转换路径格式。
三、自定义触发指令
插件指令
插件的触发指令可不同于插件名,允许自定义。以插件 Hello 为例,触发指令为 /helloworld 而不是 Hello。
修改插件目录下的 __init__.py 文件设置触发指令:
#file Hello/__init__.py
#/helloworld
#Hello World插件例子
第一行为触发指令,默认以 / 作为前缀;第二行为插件简介。
不用作插件的特殊情况
通常情况下,位于 plugins 目录下的所有包都将被识别为插件并自动加载到 zgram 中。但在某些情况下,存在并不用作插件而只是多个插件共用包的情况,若想该包不被 zgram 加载,请将触发指令设置为 ~~ 。以 tools 共用包为例, __init__.py 文件内容如下:
#fille tools/__init__.py
#~~
#tools 包的简介
建议用作插件的包名遵守 Pascal命名法,即每个单词的首字母大写;而不用做插件的包名使用全小写的包名,每个单词之间以_ 分隔。以区分 插件包 和 非插件包 :
- plugins
- Menu #插件包
- tools #非插件包
四、插件模板创建工具
在 v1.9.20_dev 及以上版本,可以通过命令行指令一键创建插件模板。
zgram -p/--plugin <plugin name>
该指令会使用框架配置文件(config.cfg)中的插件路径作为所创建插件模板的存放路径。
五、周期性任务
在 v1.11.1 及以上版本,可以创建周期性任务,功能类似循环定时器。
可获得的方法:
schedule.add : 添加任务
schedule.delete : 移除任务
schedule.find : 查找任务
schedule.clear : 清空任务池
schedule.status : 查看任务池状态
例:
ok, uid = bot.schedule.add(gap, event, (bot, ))
ok, uid = bot.schedule.delete(uid)
ok, uid = bot.schedule.find(uid)
ok, uid = bot.schedule.clear()
ok, uid = bot.schedule.status()
周期性任务池的大小为全局线程池的三分之一 ,线程池大小则可通过配置文件指定。
1.克隆或点击下载本项目到本地,保证本机安装有`Python3.x`版本和包`requests` ;
2.`config.cfg` 配置文件
配置文件格式:
```python
[config]
key=your key
pool_size=40 //the thread pool size, default 40, range(1, 101)
webhook=False
cert_pub=your public certificate dir //Optional while webhook is False
server_ip=your server ip address //Optional while webhook is False
server_port=your server port //Optional while webhook is False
local_address=webhook local address //Optional while webhook is False
local_port=webhook local port //Optional while webhook is False
root=your user id
debug=False
timeout=60
plugin_dir=your plugin dir //[Optional]
```
* Linux
在 `/root` 目录下创建文件夹 `.zgram` ,并在其内新建配置文件 `config.cfg` ,按照上面的格式填写配置文件
* Windows
在 `C:\Users\<username>` 目录下创建文件夹 `.zgram` ,并在其内新建配置文件 `config.cfg` ,按照上面的格式填写配置文件
* 指定配置文件
Linux 和 Windows 都可在命令行通过参数手动指定配置文件路径,命令格式:
```
python -m zgram -c/-C <configure file path>
```
路径必须为绝对路径。
3.运行
终端下进入zgram文件夹所在目录。
* 对于使用程序配置文件默认路径的:
输入`python -m zgram` 回车,正常情况下你应该能看见屏幕提示机器人开始运行。
* 对于命令行手动指定配置文件路径的:
输入`python -m zgram -c/-C <configure file path>` 回车,正常情况下你应该能看见屏幕提示机器人开始运行。
#### 三、Pip安装运行
##### 安装 #####
* 确保本机Python环境拥有pip包管理工具。
* 在本项目Releases页面下载包文件。
* 本机命令行进入包文件所在目录,执行:
```
pip install <zgram package file name>
or
pip3 install <zgram package file name>
```
由于API未封装完毕,暂未上传至 `PyPI` ,故不能在线安装,望谅解。
##### 运行 #####
任意路径打开终端,输入以下命令:
- 对于使用程序配置文件默认路径的:
输入`zgram` 回车,正常情况下你应该能看见屏幕提示机器人开始运行。
- 对于命令行手动指定配置文件路径的:
输入`zgram -c/-C <configure file path>` 回车,正常情况下你应该能看见屏幕提示机器人开始运行。
可配合`supervisor`使用。
## 插件开发指南 (以 Hello 插件为例) BETA 0.6
#### 一、插件结构
一个完整的 `zgram` 插件应当呈现为一个文件夹,即一个Python包,以 `Hello` 插件为例,最基本的目录结构如下:
```Python
Hello/
./__init__.py
./Hello.py
./Hello_screenshot.png
./readme.md
```
#### 二、规则
##### 命名
在构建zgram插件中应当遵守的规则是:每个插件目录下应当存在一个与插件同名的`.py` 文件,比如插件 `Hello ` 中的 `Hello.py` 文件,并且此文件中必须存在作为插件入口的同名函数,以插件 `Hello` 为例:
```python
#file Hello/Hello.py
# -*- coding:utf-8 -*-
def Hello(bot, message):
pass
```
函数 `Hello()` 即为插件的入口函数,参数 `bot` 为Bot接口库实例化对象,参数 `message` 用于接收消息数据。
##### 资源路径
若要打开某个插件目录下的文件资源,需要使用的路径应当遵循以下的格式:
```python
bot.plugin_dir + "<plugin dir name>/<resource address>"
```
#### 三、自定义触发指令
##### 插件指令
插件的触发指令可不同于插件名,允许自定义。以插件 `Hello` 为例,触发指令为 `/helloworld` 而不是 `Hello`。
修改插件目录下的 `__init__.py` 文件设置触发指令:
```python
#file Hello/__init__.py
#/helloworld
#Hello World插件例子
```
第一行为触发指令,默认以 `/` 作为前缀;第二行为插件简介。
##### 不用作插件的特殊情况
通常情况下,位于 `plugins` 目录下的所有包都将被识别为插件并自动加载到 `zgram` 中。但在某些情况下,存在并不用作插件而只是多个插件共用包的情况,若想该包不被 `zgram` 加载,请将触发指令设置为 `~~` 。以 `tools` 共用包为例, `__init__.py` 文件内容如下:
```python
#fille tools/__init__.py
#~~
#tools 包的简介
```
建议用作插件的包名遵守 `Pascal命名法`,即每个单词的首字母大写;而不用做插件的包名使用全小写的包名,每个单词之间以`_` 分隔。以区分 `插件包` 和 `非插件包` :
```python
- plugins
- Menu #插件包
- tools #非插件包
```
| zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/README.md | README.md |
import threading
from uuid import uuid4
class _Schedule(object):
"""
周期性任务类
"""
def __init__(self, queue_size):
self.__queue_size = queue_size
self.__queue_mutex = threading.Lock()
self.__queue = {}
def __create(self, gap, func, args):
class RepeatingTimer(threading.Timer):
def run(self):
while not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.wait(self.interval)
try:
t = RepeatingTimer(gap, func, args)
t.setDaemon(True)
return True, t
except Exception as e:
print(e)
return False, str(e)
def add(self, gap, func, args):
"""
添加周期性任务
"""
def __short_uuid():
uuidChars = ("a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5",
"6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I",
"J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z")
uuid = str(uuid4().hex)
uid = ''
for i in range(0,8):
sub = uuid[i * 4: i * 4 + 4]
x = int(sub,16)
uid += uuidChars[x % 0x3E]
return uid
if len(self.__queue) == self.__queue_size:
return False, "Full"
ok, t = self.__create(gap, func, args)
if ok:
t.start()
uid = __short_uuid()
with self.__queue_mutex:
self.__queue[uid] = t
return True, uid
else:
return False, t
def status(self):
"""
获取周期性任务池的使用情况
"""
try:
used = len(self.__queue)
free = self.__queue_size - used
size = self.__queue_size
result = {
"used": used,
"free": free,
"size": size
}
return True, result
except Exception as e:
return False, {"exception": e}
def find(self, uid):
"""
查找周期性任务
"""
if len(self.__queue) <= 0:
return False, "Empty"
if str(uid) in self.__queue.keys():
return True, str(uid)
else:
return False, "NotFound"
def delete(self, uid):
"""
移除周期性任务
"""
if len(self.__queue) <= 0:
return False, "Empty"
if str(uid) in self.__queue.keys():
self.__queue[str(uid)].cancel()
with self.__queue_mutex:
self.__queue.pop(str(uid))
return True, str(uid)
else:
return False, "NotFound"
def clear(self):
"""
移除所有周期性任务
"""
if len(self.__queue) == 0:
return False, "Empty"
else:
try:
for uid in list(self.__queue.keys()):
self.__queue[str(uid)].cancel()
with self.__queue_mutex:
self.__queue.clear()
return True, "Cleared"
except Exception as e:
return False, str(e) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/schedule.py | schedule.py |
from http.server import HTTPServer, BaseHTTPRequestHandler
#from socketserver import ThreadingMixIn
import ssl
import sys
import json
def __MakeRequestHandler(bot):
class RequestHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super(RequestHandler, self).__init__(*args, **kwargs)
def do_POST(self):
if self.command == "POST" and self.path == "/bot" + str(bot._key):
req_data = self.rfile.read(int(self.headers['content-length']))
res = req_data.decode('utf-8')
message = json.loads(res)
results = [message]
messages = bot._washUpdates(results)
if messages is not None and messages:
for message in messages:
bot._pluginRun(bot, message)
data = {'status': 'ok'}
data = json.dumps(data)
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(data.encode('utf-8'))
else:
data = {'status': 'false'}
data = json.dumps(data)
self.send_response(400)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(data.encode('utf-8'))
def log_message(self, format, *args):
pass
return RequestHandler
# class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
# pass
def _runWebhook(bot, host, port):
RequestHandler = __MakeRequestHandler(bot)
if bot._local_address == "0.0.0.0":
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(bot._cert_pub, bot._cert_key)
server = HTTPServer((host, port), RequestHandler)
server.socket = context.wrap_socket(server.socket, server_side=True)
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit("Bot Exit.")
else:
try:
server = HTTPServer((host, port), RequestHandler)
server.serve_forever()
except KeyboardInterrupt:
server.server_close()
sys.exit("Bot Exit.") | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/webhook.py | webhook.py |
import configparser
import argparse
import os
import sys
import shutil
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from pathlib import Path
from .version import __author__, __github__, __version__
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
cloud_api_server = "https://api.telegram.org/"
parser = argparse.ArgumentParser(description="zgram console command list")
parser.add_argument("-c", "--config", type=str,
help="specify the configuration file")
parser.add_argument("-k", "--key", type=str,
help="Specify the bot key")
parser.add_argument("-r", "--root", type=str,
help="Specify the root user id")
parser.add_argument("-p", "--plugin", type=str,
help="create a plugin template")
parser.add_argument("-L", "--logout",
help="use it to log out from the cloud Bot API server before launching the bot locally.",
action="store_true")
parser.add_argument("-C", "--close",
help="use it to close the bot instance before moving it from one local server to another.",
action="store_true")
parser.add_argument(
"-d", "--debug", help="run zgram in debug mode", action="store_true")
parser.add_argument(
"-v", "--version", help="view the current version of zgram", action="store_true")
args = parser.parse_args()
if len(sys.argv) == 2 and args.version:
print("\nVersion: " + __version__)
print("Author: " + __author__)
print("Project: " + __github__)
os._exit(0)
def _config():
'''
获取bot配置信息及初始化
'''
config = {}
if len(sys.argv) == 3 and args.config:
config_dir = os.path.abspath(str(Path(args.config)))
else:
config_dir = str(Path(os.path.abspath(
os.path.expanduser('~')) + "/.zgram/config.cfg"))
(path, filename) = os.path.split(config_dir)
(filename_, extension) = os.path.splitext(filename)
if extension != ".cfg":
print("only support configuration files with .cfg suffix.")
os._exit(0)
if not os.path.exists(str(Path(path))):
os.makedirs(str(Path(path)))
if not os.path.exists(str(Path(config_dir))):
print("the configuration file does not exist.")
key = ""
if args.key:
key = args.key
root = ""
if args.root:
root = args.root
with open(config_dir, "w") as conf_file:
conf_file.writelines([
"[config]" + "\n",
"key=" + str(key) + "\n",
"root_id=" + str(root) + "\n",
"plugin_dir=" + "\n",
"pool_size=40" + "\n",
"debug=False" + "\n",
"local_api_server=False" + "\n",
"drop_pending_updates=False" + "\n",
"webhook=False" + "\n",
"self_signed=False" + "\n",
"cert_key=" + "\n",
"cert_pub=" + "\n",
"server_address=" + "\n",
"server_port=" + "\n",
"local_address=" + "\n",
"local_port="
])
print("the configuration file has been created automatically.")
print("configuration file path: " + str(config_dir))
if not args.key or not args.root:
print("please modify the relevant parameters and restart the zgram.")
os._exit(0)
# else:
# print("\n")
conf = configparser.ConfigParser()
conf.read(config_dir)
options = conf.options("config")
if args.debug:
conf.set("config", "debug", str(True))
if args.key:
conf.set("config", "key", str(args.key))
if args.root:
conf.set("config", "root_id", str(args.root))
if args.debug:
default_args = ["key", "webhook", "root_id", "debug"]
else:
default_args = ["key", "webhook", "root_id"]
for default_arg in default_args:
if default_arg not in options:
print("the configuration file is missing necessary parameters.",
"\nnecessary parameters:" + default_args)
os._exit(0)
for option in options:
config[str(option)] = conf.get("config", option)
none_count = 0
for default_arg in default_args:
if config[default_arg] == "" or\
config[default_arg] == None:
none_count += 1
print("field " + default_arg + " is not set in configuration file.")
if none_count != 0:
os._exit(0)
if any(["version" in config.keys(), "author" in config.keys()]):
print("error in configuration file.")
os._exit(0)
if config["webhook"] == "True":
webhook_args = ["self_signed",
"server_address", "server_port",
"local_address", "local_port",
"cert_pub", "cert_key"]
for w in webhook_args:
if w not in config.keys():
print("please check if the following fields exist in the configuration file: \n" +
"cert_pub cert_key self_signed server_address server_port local_address local_port")
os._exit(0)
plugin_dir_in_config = False
if "plugin_dir" in config.keys():
if config["plugin_dir"] == "" or config["plugin_dir"] == None:
plugin_dir = str(Path(os.path.dirname(os.path.abspath(__file__)) + r"/plugins/")) + os.sep
else:
plugin_dir = str(Path(os.path.abspath(config["plugin_dir"]))) + os.sep
plugin_dir_in_config = True
else:
plugin_dir = str(Path(os.path.dirname(os.path.abspath(__file__)) + r"/plugins/")) + os.sep
if os.path.exists(str(Path(os.path.dirname(
os.path.abspath(__file__)) + r"/__pycache__"))):
shutil.rmtree(str(Path(os.path.dirname(
os.path.abspath(__file__)) + r"/__pycache__")))
if not os.path.isdir(plugin_dir): # 插件目录检测
# os.makedirs(plugin_dir)
os.mkdir(plugin_dir)
with open(str(Path(plugin_dir + "__init__.py")), "w") as f:
pass
elif not os.path.exists(str(Path(plugin_dir + "__init__.py"))):
with open(str(Path(plugin_dir + "__init__.py")), "w") as f:
pass
if args.plugin and plugin_dir_in_config: #插件模板创建
plugin_name = args.plugin
if not os.path.exists(str(Path(plugin_dir + plugin_name))):
os.mkdir(str(Path(plugin_dir + plugin_name)))
if not os.path.exists(str(Path(plugin_dir + plugin_name + os.sep + plugin_name + ".py"))):
with open(str(Path(plugin_dir + plugin_name + os.sep + plugin_name + ".py")), "w") as enter:
enter.writelines([
"# -*- coding:utf-8 -*-\n",
"\n",
"def " + plugin_name + "(bot, message):\n",
"\n" + \
" # root_id = bot.root_id\n" + \
" # bot_id = bot.bot_id\n" + \
" # author = bot.author\n" + \
" # version = bot.version\n" + \
" # plugin_dir = bot.plugin_dir\n" + \
" # plugin_bridge = bot.plugin_bridge\n" + \
" # uptime = bot.uptime\n" + \
" # response_times = bot.response_times\n" + \
" # response_chats = bot.response_chats\n" + \
" # response_users = bot.response_users\n" + \
"\n" + \
' chat_id = message["chat"]["id"]\n' + \
' user_id = message["from"]["id"]\n' + \
' message_id = message["message_id"]\n' + \
"\n" + \
' message_type = message["message_type"]\n' + \
' chat_type = message["chat"]["type"]\n' + \
"\n" + \
' prefix = "/' + plugin_name.lower() + '"\n' + \
"\n\n" + \
" # Write your plugin code below"
])
if not os.path.exists(str(Path(plugin_dir + plugin_name + os.sep + "__init__.py"))):
with open(str(Path(plugin_dir + plugin_name + os.sep + "__init__.py")), "w") as init:
init.writelines([
"#/" + plugin_name.lower() + "\n",
"#" + plugin_name + " Plugin\n"
])
if not os.path.exists(str(Path(plugin_dir + plugin_name + os.sep + "readme.md"))):
with open(str(Path(plugin_dir + plugin_name + os.sep + "readme.md")), "w") as readme:
readme.writelines([
"# " + plugin_name + " #\n"
])
if not os.path.exists(str(Path(plugin_dir + plugin_name + os.sep + "requirement.txt"))):
with open(str(Path(plugin_dir + plugin_name + os.sep + "requirement.txt")), "w") as requirement:
pass
print("plugin " + plugin_name + " was created successfully.")
else:
print("plugin " + plugin_name + " already exists.")
os._exit(0)
elif args.plugin and not plugin_dir_in_config:
print("the plugin_dir is not set in the configuration file.")
os._exit(0)
if "pool_size" in config.keys():
if int(config["pool_size"]) < 1 or int(config["pool_size"]) > 100:
print("thread pool size is out of range (1-100).")
os._exit(0)
else:
config["pool_size"] = "40"
if "local_api_server" in config.keys():
local_api_server = config["local_api_server"]
if (local_api_server == None or
local_api_server == "" or
local_api_server == "False" or
len(local_api_server) < 7):
config["local_api_server"] = "False"
else:
if "https://" in local_api_server:
print("local api server address not support https.")
os._exit(0)
if "http://" not in local_api_server:
print("local api server address incorrect.")
os._exit(0)
if "telegram.org" in local_api_server:
print("local api server address incorrect.")
os._exit(0)
if local_api_server[len(local_api_server)-1] != "/":
local_api_server += "/"
config["local_api_server"] = local_api_server
else:
config["local_api_server"] = "False"
if "self_signed" in config.keys():
if config["self_signed"] == "True":
config["self_signed"] = True
elif config["self_signed"] == "False":
config["self_signed"] = False
else:
print("The self_signed field value in the configuration file is wrong.")
os._exit(0)
else:
config["self_signed"] = False
if "drop_pending_updates" in config.keys():
if config["drop_pending_updates"] == "True":
config["drop_pending_updates"] = True
elif config["drop_pending_updates"] == "False":
config["drop_pending_updates"] = False
else:
print("The drop_pending_updates field value in the configuration file is wrong.")
os._exit(0)
else:
config["drop_pending_updates"] = False
if config["debug"] == "True":
config["debug"] = True
elif config["debug"] == "False":
config["debug"] = False
else:
print("The debug field value in the configuration file is wrong.")
os._exit(0)
if config["webhook"] == "True":
config["webhook"] = True
elif config["webhook"] == "False":
config["webhook"] = False
else:
print("The webhook field value in the configuration file is wrong.")
os._exit(0)
config["author"] = __author__
config["version"] = __version__
config["plugin_dir"] = plugin_dir
config["plugin_bridge"] = _bridge(config["plugin_dir"])
config["plugin_info"] = _plugin_info(
config["plugin_bridge"].keys(), config["plugin_dir"])
config["cloud_api_server"] = cloud_api_server
if args.debug:
config["debug"] = True
# print(config)
return config
def _bridge(plugin_dir):
'''
获取插件和指令的映射
'''
plugin_bridge = {}
plugin_list = []
plugin_lis = os.listdir(plugin_dir)
for plugi in plugin_lis:
if os.path.isdir(str(Path(plugin_dir + plugi))) and plugi != "__pycache__" and plugi[0] != '.':
plugin_list.append(plugi)
for plugin in plugin_list:
with open(str(Path(plugin_dir + plugin + r"/__init__.py")), encoding="utf-8") as f:
row_one = f.readline().strip()[1:]
if row_one != "~~": # Hidden plugin
plugin_bridge[plugin] = row_one
# print(plugin_bridge)
return plugin_bridge
def _plugin_info(plugin_list, plugin_dir):
'''
获取插件修改状态
'''
plugin_info = {}
for plugin in plugin_list:
mtime = os.stat(str(Path(plugin_dir + plugin + "/" + plugin + ".py"))).st_mtime
plugin_info[plugin] = mtime
return plugin_info
if args.close and args.logout:
print("only one of logout and close can be used at the same time.")
os._exit(0)
elif args.logout and not args.close:
config = _config()
logout_url = cloud_api_server + "bot" + config["key"] + "/logOut"
try:
req = requests.post(url=logout_url, verify=False)
except:
print("error request the cloud Bot API server.")
os._exit(0)
if req.json().get("ok"):
print("successfully log out from the cloud Bot API server.")
elif not req.json().get("ok"):
print("error log out from the cloud Bot API server.")
if (req.json().get("error_code") == 401 and
req.json().get("description") == "Unauthorized"):
print("if you already logout the bot from the cloud Bot API server,please wait at least 10 minutes and try again.")
os._exit(0)
elif args.close and not args.logout:
config = _config()
if config["local_api_server"] == "False":
print("close can only be used when local_api_server is configured.")
os._exit(0)
close_url = config["local_api_server"] + "bot" + config["key"] + "/close"
try:
req = requests.post(url=close_url, verify=False)
except:
print("error request the the local API server.")
os._exit(0)
if req.json().get("ok"):
print("successfully close from the local API server.")
elif not req.json().get("ok"):
print("error close from the local API server.")
if req.json().get("error_code") == 429:
print("too many requests, please retry after " + str(req.json().get("parameters")["retry_after"]) + " seconds.")
os._exit(0) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/handler.py | handler.py |
import inspect
import time
import sys
import os
import json
import shutil
import importlib
import threading
from pathlib import Path
from urllib.parse import quote
from concurrent.futures import ThreadPoolExecutor
from .handler import _config, _bridge, _plugin_info
from .logger import _logger
from .schedule import _Schedule
from .request import _Request
class Bot(object):
"""机器人的基类"""
def __init__(self, key=""):
config = _config()
if key != "":
self._key = key
elif key == "":
self._key = config["key"]
self._cloud_api_server = config["cloud_api_server"]
self._local_api_server = config["local_api_server"]
if self._local_api_server != "False":
self._basic_url = config["local_api_server"]
else:
self._basic_url = self._cloud_api_server
self._url = self._basic_url + r"bot" + self._key + r"/"
self._webhook = config["webhook"]
if self._webhook:
self._self_signed = config["self_signed"]
self._cert_key = config["cert_key"]
self._cert_pub = config["cert_pub"]
self._server_address = config["server_address"]
self._server_port = config["server_port"]
self._local_address = config["local_address"]
self._local_port = config["local_port"]
self._offset = 0
self._timeout = 60
self._debug = config["debug"]
self._pool_size = config["pool_size"]
self._drop_pending_updates = config["drop_pending_updates"]
self.__root_id = config["root_id"]
self.__bot_id = self._key.split(":")[0]
self.__AUTHOR = config["author"]
self.__VERSION = config["version"]
self.__plugin_dir = config["plugin_dir"]
self.__plugin_bridge = config["plugin_bridge"]
self.__start_time = int(time.time())
self.__response_times = 0
self.__response_chats = []
self.__response_users = []
thread_pool_size = round(int(self._pool_size) * 2 / 3)
schedule_queue_size = int(self._pool_size) - thread_pool_size
self.request = _Request(thread_pool_size, self._url, self._debug)
self.schedule = _Schedule(schedule_queue_size)
self.__thread_pool = ThreadPoolExecutor(
max_workers=thread_pool_size)
self.__timer_thread_pool = ThreadPoolExecutor(
max_workers=int(self._pool_size) * 5)
self.__plugin_info = config["plugin_info"]
del config
del thread_pool_size
del schedule_queue_size
def __del__(self):
self.__thread_pool.shutdown(wait=True)
self.__timer_thread_pool.shutdown(wait=True)
del self.request
del self.schedule
# zgram method
def __threadpool_exception(self, fur):
"""
线程池异常回调
"""
if fur.exception() is not None:
_logger.debug("EXCEPTION" + " - " + str(fur.result()))
def __import_module(self, plugin_name):
"""
动态导入模块
"""
sys.path.append(self.path_converter(self.__plugin_dir + plugin_name + os.sep))
Module = importlib.import_module(plugin_name) # 模块检测
return Module
def __update_plugin(self, plugin_name):
"""
热更新插件
"""
plugin_uri = self.path_converter(
self.__plugin_dir + plugin_name + os.sep + plugin_name + ".py")
now_mtime = os.stat(plugin_uri).st_mtime
# print(now_mtime, self.__plugin_info[plugin_name])
if now_mtime != self.__plugin_info[plugin_name]: # 插件热更新
if os.path.exists(self.path_converter(self.__plugin_dir + plugin_name + r"/__pycache__")):
shutil.rmtree(self.path_converter(self.__plugin_dir + plugin_name + r"/__pycache__"))
self.__plugin_info[plugin_name] = now_mtime
Module = self.__import_module(plugin_name)
importlib.reload(Module)
_logger.info("The plugin " + plugin_name + " has been updated")
def __load_plugin(self, now_plugin_bridge, now_plugin_info):
"""
动态装载插件
"""
for plugin in list(now_plugin_bridge.keys()):
if plugin not in list(self.__plugin_bridge.keys()):
_logger.info("The plugin " + plugin + " has been installed")
self.__plugin_info[plugin] = now_plugin_info[plugin]
for plugin in list(self.__plugin_bridge.keys()):
if plugin not in list(now_plugin_bridge.keys()):
_logger.info("The plugin " + plugin + " has been uninstalled")
self.__plugin_info.pop(plugin)
self.__plugin_bridge = now_plugin_bridge
def __control_plugin(self, plugin_bridge, chat_type, chat_id):
if chat_type != "private" and "PluginCTL" in plugin_bridge.keys() \
and plugin_bridge["PluginCTL"] == "/pluginctl":
if os.path.exists(self.path_converter(self.__plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db")):
with open(self.path_converter(self.__plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
plugin_bridge_temp = {}
for plugin in list(plugin_bridge.keys()):
if plugin not in plugin_list_off:
plugin_bridge_temp[plugin] = plugin_bridge[plugin]
plugin_bridge = plugin_bridge_temp
return plugin_bridge
def __mark_message_for_pluginRun(self, message):
if "callback_query_id" in message.keys(): # callback query
message["message_type"] = "callback_query_data"
message_type = "callback_query_data"
elif ("new_chat_members" in message.keys()) or ("left_chat_member" in message.keys()):
message["message_type"] = "text"
message_type = "text"
message["text"] = "" # default prefix of command
elif "photo" in message.keys():
message["message_type"] = "photo"
message_type = "message_type"
elif "sticker" in message.keys():
message["message_type"] = "sticker"
message_type = "message_type"
elif "video" in message.keys():
message["message_type"] = "video"
message_type = "message_type"
elif "audio" in message.keys():
message["message_type"] = "audio"
message_type = "message_type"
elif "document" in message.keys():
message["message_type"] = "document"
message_type = "message_type"
elif "text" in message.keys():
message["message_type"] = "text"
message_type = "text"
elif "caption" in message.keys():
message["message_type"] = "caption"
message_type = "caption"
elif "query" in message.keys():
message["message_type"] = "query"
message_type = "query"
else:
message["message_type"] = "unknown"
message_type = "unknown"
return message_type, message
def __logging_for_pluginRun(self, message, plugin):
title = "" # INFO日志
user_name = ""
if message["chat"]["type"] == "private":
if "first_name" in message["chat"].keys():
title += message["chat"]["first_name"]
if "last_name" in message["chat"].keys():
if "first_name" in message["chat"].keys():
title += " " + message["chat"]["last_name"]
else:
title += message["chat"]["last_name"]
elif "title" in message["chat"].keys():
title = message["chat"]["title"]
if "reply_markup" in message.keys() and \
message["message_type"] == "callback_query_data":
from_id = message["click_user"]["id"]
if "first_name" in message["click_user"].keys():
user_name += message["click_user"]["first_name"]
if "last_name" in message["click_user"].keys():
if "first_name" in message["click_user"].keys():
user_name += " " + message["click_user"]["last_name"]
else:
user_name += message["chat"]["last_name"]
else:
from_id = message["from"]["id"]
if "first_name" in message["from"].keys():
user_name += message["from"]["first_name"]
if "last_name" in message["from"].keys():
if "first_name" in message["from"].keys():
user_name += " " + message["from"]["last_name"]
else:
user_name += message["from"]["last_name"]
if message["message_type"] == "unknown":
_logger.info(
"From:" + title + "(" + str(message["chat"]["id"]) + ") - " + \
"User:" + user_name + "(" + str(from_id) + ") - " + \
"Plugin: " + "" + " - " + \
"Type:" + message["message_type"])
else:
_logger.info(
"From:" + title + "(" + str(message["chat"]["id"]) + ") - " + \
"User:" + user_name + "(" + str(from_id) + ") - " + \
"Plugin: " + str(plugin) + " - " + \
"Type:" + message["message_type"])
def _pluginRun(self, bot, message):
"""
运行插件
"""
if message is None:
return
now_plugin_bridge = _bridge(self.__plugin_dir)
now_plugin_info = _plugin_info(now_plugin_bridge.keys(), self.__plugin_dir)
if now_plugin_bridge != self.__plugin_bridge: # 动态装载插件
self.__load_plugin(now_plugin_bridge, now_plugin_info)
if len(now_plugin_info) != len(self.__plugin_info) or \
now_plugin_info != self.__plugin_info: # 动态更新插件信息
for plugin_name in list(self.__plugin_bridge.keys()):
self.__update_plugin(plugin_name) #热更新插件
if len(self.__plugin_bridge) == 0:
os.system("")
_logger.warn("\033[1;31mNo plugins installed\033[0m")
plugin_bridge = self.__control_plugin( # pluginctl控制
self.__plugin_bridge, message["chat"]["type"], message["chat"]["id"])
message_type = ""
message_type, message = self.__mark_message_for_pluginRun(message) # 分类标记消息
if message_type == "unknown":
self.__logging_for_pluginRun(message, "unknown")
return
for plugin, command in plugin_bridge.items():
if message.get(message_type)[:len(command)] == command:
module = self.__import_module(plugin)
pluginFunc = getattr(module, plugin)
fur = self.__thread_pool.submit(pluginFunc, bot, message)
fur.add_done_callback(self.__threadpool_exception)
self.__response_times += 1
if message["chat"]["type"] != "private" and \
message["chat"]["id"] not in self.__response_chats:
self.__response_chats.append(message["chat"]["id"])
if message["from"]["id"] not in self.__response_users:
self.__response_users.append(message["from"]["id"])
self.__logging_for_pluginRun(message, plugin)
def _washUpdates(self, results):
"""
清洗消息队列
results应当是一个列表
"""
if not results:
return False
elif len(results) < 1:
return None
update_ids = []
messages = []
for result in results:
if "update_id" not in result.keys():
return None
update_ids.append(result["update_id"])
query_or_message = ""
if result.get("inline_query"):
query_or_message = "inline_query"
elif result.get("callback_query"):
query_or_message = "callback_query"
elif result.get("message"):
query_or_message = "message"
update_ids.append(result.get("update_id"))
if query_or_message == "callback_query":
callback_query = result.get(query_or_message).get("message")
callback_query["click_user"] = result.get(query_or_message)[
"from"]
callback_query["callback_query_id"] = result.get(
query_or_message).get("id")
callback_query["callback_query_data"] = result.get(
query_or_message).get("data")
messages.append(callback_query)
else:
messages.append(result.get(query_or_message))
if len(update_ids) >= 1:
self._offset = max(update_ids) + 1
return messages
else:
return None
def message_deletor(self, time_gap, chat_id, message_id):
"""
定时删除一条消息,时间范围:[0, 900],单位秒
"""
if time_gap < 0 or time_gap > 900:
return "time_gap_error"
else:
def message_deletor_func(time_gap, chat_id, message_id):
time.sleep(int(time_gap))
self.deleteMessage(chat_id=chat_id, message_id=message_id)
if time_gap == 0:
message_deletor_func(chat_id, message_id)
else:
fur = self.__timer_thread_pool.submit(
message_deletor_func, time_gap, chat_id, message_id)
fur.add_done_callback(self.__threadpool_exception)
return "ok"
def timer(self, time_gap, func, args):
"""
单次定时器,时间范围:[0, 900],单位秒
"""
if time_gap < 0 or time_gap > 900:
return "time_gap_error"
elif type(args) is not tuple:
return "args_must_be_tuple"
else:
def timer_func(time_gap, func, args):
time.sleep(int(time_gap))
func(*args)
if time_gap == 0:
func(args)
else:
fur = self.__timer_thread_pool.submit(
timer_func, time_gap, func, args)
fur.add_done_callback(self.__threadpool_exception)
return "ok"
def path_converter(self, path):
"""
根据操作系统转换URI
"""
path = str(Path(path))
return path
@property
def plugin_bridge(self):
"""
获取插件桥
"""
return self.__plugin_bridge
@property
def plugin_dir(self):
"""
获取插件路径
"""
return self.__plugin_dir
@property
def version(self):
"""
获取框架版本号
"""
return self.__VERSION
@property
def author(self):
"""
作者信息
"""
return self.__AUTHOR
@property
def root_id(self):
"""
获取root用户ID
"""
return self.__root_id
@property
def bot_id(self):
"""
获取Bot的ID
"""
return self.__bot_id
@property
def uptime(self):
"""
获取框架的持续运行时间(单位为秒)
"""
second = int(time.time()) - self.__start_time
return second
@property
def response_times(self):
"""
获取框架启动后响应指令的统计次数
"""
return self.__response_times
@property
def response_chats(self):
"""
获取框架启动后响应的所有群组ID
"""
return self.__response_chats
@property
def response_users(self):
"""
获取框架启动后响应的所有用户ID
"""
return self.__response_users
def getChatCreator(self, chat_id):
"""
获取群组创建者信息
"""
if str(chat_id)[0] == "-":
req = self.getChatAdministrators(str(chat_id))
if req:
creator = []
for i, user in enumerate(req):
if user["status"] == "creator":
creator.append(req[i])
if len(creator) == 1:
return creator[0]
else:
return False
else:
return False
def getFileDownloadPath(self, file_id):
"""
生成文件下载链接
注意:下载链接包含Bot Key
"""
req = self.getFile(file_id=file_id)
if req:
file_path = req["file_path"]
if (self._local_api_server != "False" and
"telegram.org" not in self._basic_url):
return file_path
else:
file_download_path = self._basic_url + "file/bot" + self._key + r"/" + file_path
return file_download_path
else:
return False
# Getting updates
def getUpdates(self, limit=100, allowed_updates=None):
"""
获取消息队列
"""
command = inspect.stack()[0].function
addr = command + "?offset=" + str(self._offset) + \
"&limit=" + str(limit) + "&timeout=" + str(self._timeout)
if allowed_updates is not None:
return self.request.postJson(addr, allowed_updates)
else:
return self.request.get(addr)
def setWebhook(self, url, certificate=None, ip_address=None,
max_connections=None, allowed_updates=None, drop_pending_updates=None):
"""
设置Webhook
Ports currently supported for Webhooks: 443, 80, 88, 8443.
"""
command = inspect.stack()[0].function
addr = command + "?url=" + str(url)
if ip_address is not None:
addr += "&ip_address=" + str(ip_address)
if max_connections is not None:
addr += "&max_connections=" + str(max_connections)
if allowed_updates is not None:
addr += "&allowed_updates=" + str(allowed_updates)
if drop_pending_updates is not None:
addr += "&drop_pending_updates=" + str(drop_pending_updates)
file_data = None
if certificate is not None:
if type(certificate) == bytes:
file_data = {"certificate": certificate}
else:
file_data = {"certificate": open(certificate, 'rb')}
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def deleteWebhook(self, drop_pending_updates=None):
"""
删除设置的Webhook
"""
command = inspect.stack()[0].function
addr = command
if drop_pending_updates is not None:
addr += "?drop_pending_updates=" + str(drop_pending_updates)
return self.request.post(addr)
def getWebhookInfo(self):
"""
获取当前的Webhook状态
"""
command = inspect.stack()[0].function
addr = command
return self.request.post(addr)
# Available methods
def getMe(self):
"""
获取机器人基本信息
"""
command = inspect.stack()[0].function
addr = command + "?" + "offset=" + \
str(self._offset) + "&timeout=" + str(self._timeout)
return self.request.post(addr)
def getFile(self, file_id):
"""
获取文件信息
"""
command = inspect.stack()[0].function
addr = command + "?file_id=" + file_id
return self.request.post(addr)
def logOut(self):
"""
在本地启动机器人之前,使用此方法从云Bot API服务器注销。
"""
command = inspect.stack()[0].function
addr = command
return self.request.post(addr)
def close(self):
"""
在将bot实例从一个本地服务器移动到另一个本地服务器之前
使用此方法关闭它
"""
command = inspect.stack()[0].function
addr = command
return self.request.post(addr)
def sendMessage(self, chat_id, text, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, disable_web_page_preview=None, entities=None,
allow_sending_without_reply=None):
"""
发送文本消息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&text=" + quote(text)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if disable_web_page_preview is not None:
addr += "&disable_web_page_preview=" + str(disable_web_page_preview)
if entities is not None:
addr += "&entities=" + json.dumps(entities)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.post(addr)
def sendVoice(self, chat_id, voice, caption=None, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None, caption_entities=None):
"""
发送音频消息 .ogg
"""
command = inspect.stack()[0].function
if voice[:7] == "http://" or voice[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&voice=" + voice
elif type(voice) == bytes:
file_data = {"voice": voice}
addr = command + "?chat_id=" + str(chat_id)
elif type(voice) == str and '.' not in voice:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&voice=" + voice
else:
file_data = {"voice": open(voice, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def sendAnimation(self, chat_id, animation, caption=None, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None, caption_entities=None):
"""
发送动画 gif/mp4
"""
command = inspect.stack()[0].function
if animation[:7] == "http://" or animation[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&animation=" + animation
elif type(animation) == bytes:
file_data = {"animation": animation}
addr = command + "?chat_id=" + str(chat_id)
elif type(animation) == str and '.' not in animation:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&animation=" + animation
else:
file_data = {"animation": open(animation, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
self.request.postFile(addr, file_data)
def sendAudio(self, chat_id, audio, caption=None, parse_mode="Text", title=None, reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None, caption_entities=None):
"""
发送音频 mp3
"""
command = inspect.stack()[0].function
if audio[:7] == "http://" or audio[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&audio=" + audio
elif type(audio) == bytes:
file_data = {"audio": audio}
addr = command + "?chat_id=" + str(chat_id)
elif type(audio) == str and '.' not in audio:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&audio=" + audio
else:
file_data = {"audio": open(audio, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode" + parse_mode
if title is not None:
addr += "&title=" + title
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def sendPhoto(self, chat_id, photo, caption=None, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None, caption_entities=None): # 发送图片
"""
发送图片
"""
command = inspect.stack()[0].function
if photo[:7] == "http://" or photo[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&photo=" + photo
elif type(photo) == bytes:
file_data = {"photo": photo}
addr = command + "?chat_id=" + str(chat_id)
elif type(photo) == str and '.' not in photo:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&photo=" + photo
else:
file_data = {"photo": open(photo, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def sendVideo(self, chat_id, video, caption=None, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None, caption_entities=None):
"""
发送视频
"""
command = inspect.stack()[0].function
if video[:7] == "http://" or video[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&video=" + video
elif type(video) == bytes:
file_data = {"video": video}
addr = command + "?chat_id=" + str(chat_id)
elif type(video) == str and '.' not in video:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&video=" + video
else:
file_data = {"video": open(video, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def sendVideoNote(self, chat_id, video_note, caption=None, parse_mode="Text", reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None):
"""
发送圆形或方形视频?
"""
command = inspect.stack()[0].function
char_id_str = str(chat_id)
if video_note[:7] == "http://" or video_note[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + char_id_str + "&video_note=" + video_note
elif type(video_note) == bytes:
file_data = {"video_note": video_note}
addr = command + "?chat_id=" + char_id_str
elif type(video_note) == str and '.' not in video_note:
file_data = None
addr = command + "?chat_id=" + char_id_str + "&video_note=" + video_note
else:
file_data = {"video_note": open(video_note, 'rb')}
addr = command + "?chat_id=" + char_id_str
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def sendMediaGroup(self, chat_id, medias, disable_notification=None, reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None): # 暂未弄懂格式。
"""
使用此方法可以将一组照片,视频,文档或音频作为相册发送。
文档和音频文件只能在具有相同类型消息的相册中分组。
(目前只支持http链接和文件id,暂不支持上传文件)
media的格式:(同时请求需要加入header头,指定传送参数为json类型,
并且将data由字典转为json字符串传送)
medias ={
'caption': 'test',
'media': [
{
'type': 'photo',
'media': 'https://xxxx.com/sample/7kwx_2.jpg'
},
{
'type': 'photo',
'media': 'AgACAgQAAx0ETbyLwwADeF5s6QosSI_IW3rKir3PrMUX'
}
]
}
InputMediaPhoto:
type
media
caption
parse_mode
InputMediaVideo:
type
media
thumb
caption
parse_mode
width
height
duration
supports_streaming
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.postJson(addr, medias)
def sendDocument(self, chat_id, document, caption=None, parse_mode="Text",
reply_to_message_id=None, reply_markup=None, disable_content_type_detection=None,
allow_sending_without_reply=None, caption_entities=None):
"""
发送文件
"""
command = inspect.stack()[0].function
if document[:7] == "http://" or document[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&document=" + document
elif type(document) == bytes:
file_data = {"document": document}
addr = command + "?chat_id=" + str(chat_id)
elif type(document) == str and '.' not in document:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&document=" + document
else:
file_data = {"document": open(document, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + parse_mode
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if disable_content_type_detection is not None:
addr += "&disable_content_type_detection=" + str(disable_content_type_detection)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def leaveChat(self, chat_id):
"""
退出群组
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def getChat(self, chat_id):
"""
使用此方法可获取有关聊天的最新信息(一对一对话的用户的当前名称,
用户的当前用户名,组或频道等)。
成功返回一个Chat对象。
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def getChatAdministrators(self, chat_id):
"""
获取群组所有管理员信息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def getChatMembersCount(self, chat_id):
"""
获取群组成员总数
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def getUserProfilePhotos(self, user_id, offset=None, limit=None):
"""
获取用户头像
"""
command = inspect.stack()[0].function
addr = command + "?user_id=" + str(user_id)
if offset is not None:
addr += "&offset=" + str(offset)
if limit is not None and limit in list(range(1, 101)):
addr += "&limit=" + str(limit)
return self.request.post(addr)
def getChatMember(self, chat_id, user_id):
"""
获取群组特定用户信息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&user_id=" + str(user_id)
return self.request.post(addr)
def setChatTitle(self, chat_id, title):
"""
设置群组标题
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&title=" + quote(str(title))
return self.request.post(addr)
def setChatDescription(self, chat_id, description):
"""
设置群组简介(测试好像无效。。)
//FIXME
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&description=" + quote(str(description))
return self.request.post(addr)
def setChatPhoto(self, chat_id, photo):
"""
设置群组头像
"""
command = inspect.stack()[0].function
file_data = {"photo": open(photo, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
return self.request.postFile(addr, file_data)
def deleteChatPhoto(self, chat_id):
"""
删除群组头像
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def setChatPermissions(self, chat_id, permissions):
"""
设置群组默认聊天权限
permissions = {
'can_send_messages':False,
'can_send_media_messages':False,
'can_send_polls':False,
'can_send_other_messages':False,
'can_add_web_page_previews':False,
'can_change_info':False,
'can_invite_users':False,
'can_pin_messages':False
}
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
permissions = {"permissions": permissions}
return self.request.postJson(addr, permissions)
def restrictChatMember(self, chat_id, user_id, permissions, until_date=None):
"""
限制群组用户权限
permissions = {
'can_send_messages':False,
'can_send_media_messages':False,
'can_send_polls':False,
'can_send_other_messages':False,
'can_add_web_page_previews':False,
'can_change_info':False,
'can_invite_users':False,
'can_pin_messages':False
}
until_date format:
timestamp + offset
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + \
str(chat_id) + "&user_id=" + str(user_id)
if len(permissions) != 8:
return False
if until_date is not None:
until_date = int(time.time()) + int(until_date)
addr += "&until_date=" + str(until_date)
return self.request.postJson(addr, permissions)
def promoteChatMember(self, chat_id, user_id, is_anonymous=None,
can_change_info=None, can_post_messages=None, can_edit_messages=None,
can_delete_messages=None, can_invite_users=None, can_restrict_members=None,
can_pin_messages=None, can_promote_members=None):
"""
修改管理员权限(只能修改由机器人任命的管理员的权限,
范围为机器人权限的子集)
{
'is_anonymous':None,
'can_change_info':False,
'can_post_messages':False,
'can_edit_messages':False,
'can_delete_messages':False,
'can_invite_users':False,
'can_restrict_members':False,
'can_pin_messages':False,
'can_promote_members':False
}
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&user_id=" + str(user_id)
if is_anonymous is not None:
addr += "&is_anonymous=" + str(is_anonymous)
if can_change_info is not None:
addr += "&can_change_info=" + str(can_change_info)
if can_post_messages is not None:
addr += "&can_post_messages=" + str(can_post_messages)
if can_edit_messages is not None:
addr += "&can_edit_messages=" + str(can_edit_messages)
if can_delete_messages is not None:
addr += "&can_delete_messages=" + str(can_delete_messages)
if can_invite_users is not None:
addr += "&can_invite_users=" + str(can_invite_users)
if can_restrict_members is not None:
addr += "&can_restrict_members=" + str(can_restrict_members)
if can_pin_messages is not None:
addr += "&can_pin_messages=" + str(can_pin_messages)
if can_promote_members is not None:
addr += "&can_promote_members=" + str(can_promote_members)
return self.request.post(addr)
def pinChatMessage(self, chat_id, message_id, disable_notification=None):
"""
置顶消息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&message_id=" + str(message_id)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
return self.request.post(addr)
def unpinChatMessage(self, chat_id, message_id=None):
"""
使用此方法可以从聊天中的置顶消息列表中删除消息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
if message_id is not None:
addr += "&message_id=" + str(message_id)
return self.request.post(addr)
def unpinAllChatMessages(self, chat_id):
"""
使用此方法可以清除聊天中的置顶消息列表中的所有置顶消息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def sendLocation(self, chat_id, latitude, longitude,
horizontal_accuracy=None, live_period=None,
heading=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None,
allow_sending_without_reply=None):
"""
发送地图定位,经纬度
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&latitude=" + str(
float(latitude)) + "&longitude=" + str(float(longitude))
if live_period is not None:
addr += "&live_period=" + str(live_period)
if horizontal_accuracy is not None:
addr += "&horizontal_accuracy=" + str(horizontal_accuracy)
if heading is not None:
addr += "&heading=" + str(heading)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.post(addr)
def sendContact(self, chat_id, phone_number, first_name, last_name=None, reply_to_message_id=None,
reply_markup=None, allow_sending_without_reply=None):
"""
发送联系人信息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&phone_number=" + str(phone_number) + "&first_name=" + str(
first_name)
if last_name is not None:
addr += "&last_name=" + str(last_name)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.post(addr)
def sendPoll(self, chat_id, question, options, is_anonymous=None,
type_=None, allows_multiple_answers=None, correct_option_id=None,
explanation=None, explanation_parse_mode=None, explanation_entities=None,
open_period=None, close_date=None, is_closed=None, disable_notification=None,
reply_to_message_id=None, allow_sending_without_reply=None, reply_markup=None):
"""
使用此方法发起投票(quiz or regular, defaults to regular)
options格式:
options = [
"option 1",
"option 2"
]
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&question=" + str(question)
addr += "&options=" + json.dumps(options)
if is_anonymous is not None:
addr += "&is_anonymous=" + str(is_anonymous)
if type_ is not None:
addr += "&type=" + str(type_)
if type_ == "quiz":
if allows_multiple_answers is not None:
addr += "&allows_multiple_answers=" + str(allows_multiple_answers)
if correct_option_id is not None:
addr += "&correct_option_id=" + str(correct_option_id)
if explanation is not None:
addr += "&explanation=" + str(explanation)
if explanation_parse_mode is not None:
addr += "&explanation_parse_mode=" + str(explanation_parse_mode)
if explanation_entities is not None:
addr += "&explanation_entities=" + json.dumps(explanation_entities)
if open_period is not None:
addr += "&open_period=" + str(open_period)
if close_date is not None:
addr += "&close_date=" + str(close_date)
if is_closed is not None:
addr += "&is_closed=" + str(is_closed)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def sendDice(self, chat_id, emoji, disable_notification=None,
reply_to_message_id=None, allow_sending_without_reply=None,
reply_markup=None):
"""
使用此方法发送一个动画表情
emoji参数必须是以下几种:
1.dice(骰子) values 1-6
2.darts(飞镖) values 1-6
3.basketball(篮球) values 1-5
4.football(足球) values 1-5
5.slot machine(老虎机) values 1-64
默认为骰子
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&emoji=" + str(emoji)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def sendVenue(self, chat_id, latitude, longitude, title, address,
allow_sending_without_reply=None,
foursquare_id=None, foursquare_type=None,
google_place_id=None, google_place_type=None,
disable_notification=None, reply_to_message_id=None,
reply_markup=None):
"""
使用此方法发送关于地点的信息。
(发送地点,显示在地图上)
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&latitude=" + str(float(latitude)) + "&longitude=" + str(
float(longitude)) + "&title=" + str(title) + "&address=" + str(address)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if foursquare_id is not None:
addr += "&foursquare_id=" + str(foursquare_id)
if foursquare_type is not None:
addr += "&foursquare_type=" + str(foursquare_type)
if google_place_id is not None:
addr += "&google_place_id=" + str(google_place_id)
if google_place_type is not None:
addr += "&google_place_type=" + str(google_place_type)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def sendChatAction(self, chat_id, action):
"""
发送聊天状态,类似: 正在输入...
typing :for text messages,
upload_photo :for photos,
record_video/upload_video :for videos,
record_audio/upload_audio :for audio files,
upload_document :for general files,
find_location :for location data,
record_video_note/upload_video_note :for video notes.
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&action=" + str(action)
return self.request.post(addr)
def forwardMessage(self, chat_id, from_chat_id, message_id, disable_notification=None):
"""
转发消息
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&from_chat_id=" + str(from_chat_id) \
+ "&message_id=" + str(message_id)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
return self.request.post(addr)
def copyMessage(self, chat_id, from_chat_id, message_id,
caption=None, parse_mode="Text", caption_entities=None,
disable_notification=None, reply_to_message_id=None,
allow_sending_without_reply=None, reply_markup=None):
"""
使用此方法可以复制任何类型的消息。
该方法类似于forwardMessages方法,
但是复制的消息没有指向原始消息的链接。
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&from_chat_id=" + str(from_chat_id) \
+ "&message_id=" + str(message_id)
if caption is not None:
addr += "&caption=" + quote(caption)
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode" + parse_mode
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if caption_entities is not None:
return self.request.postJson(addr, caption_entities)
else:
return self.request.post(addr)
def kickChatMember(self, chat_id, user_id, until_date=None):
"""
从Group、Supergroup或者Channel中踢人,被踢者在until_date期限内不可再次加入
until_date format:
timestamp + offset
"""
command = inspect.stack()[0].function
if until_date is not None:
until_date = int(time.time()) + int(until_date)
addr = command + "?chat_id=" + str(chat_id) + "&user_id=" + str(user_id) + "&until_date=" + str(until_date)
if until_date is None:
addr = command + "?chat_id=" + \
str(chat_id) + "&user_id=" + str(user_id)
return self.request.post(addr)
def unbanChatMember(self, chat_id, user_id, only_if_banned=None):
"""
使用此方法可以取消超级组或频道中以前被踢过的用户的权限。
(解除user被设置的until_date)
ChatPermissions:
can_send_messages
can_send_media_messages
can_send_polls
can_send_other_messages
can_add_web_page_previews
can_change_info
can_invite_users
can_pin_messages
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + \
str(chat_id) + "&user_id=" + str(user_id)
if only_if_banned is not None:
addr += "&only_if_banned=" + str(only_if_banned)
return self.request.post(addr)
def setChatAdministratorCustomTitle(self, chat_id, user_id, custom_title):
"""
为群组的管理员设置自定义头衔
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&user_id=" + str(user_id) + "&custom_title=" + quote(str(custom_title))
return self.request.post(addr)
def exportChatInviteLink(self, chat_id):
"""
使用此方法生成新的群组分享链接,旧有分享链接全部失效,成功返回分享链接
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def setChatStickerSet(self, chat_id, sticker_set_name):
"""
为一个超级群组设置贴纸集
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&sticker_set_name=" + str(sticker_set_name)
return self.request.post(addr)
def addStickerToSet(self, user_id, name, emojis,
png_sticker=None, tgs_sticker=None, mask_position=None):
"""
使用此方法在机器人创建的集合中添加一个新贴纸。
必须使用png标签或tgs标签中的一个字段。
动画贴纸只能添加到动画贴纸组中。
动画贴纸组最多可以有50个贴纸。
静态贴纸组最多可以有120个贴纸。
"""
command = inspect.stack()[0].function
addr = command + "?user_id=" + str(user_id) + "&name=" + str(name) \
+ "&emoji=" + str(emoji)
if png_sticker is not None and tgs_sticker is not None:
return False
elif png_sticker is None and tgs_sticker is None:
return False
else:
if png_sticker is not None:
if png_sticker[:7] == "http://" or png_sticker[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&png_sticker=" + png_sticker
elif type(png_sticker) == bytes:
file_data = {"png_sticker": png_sticker}
addr = command + "?chat_id=" + str(chat_id)
elif type(png_sticker) == str and '.' not in png_sticker:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&png_sticker=" + png_sticker
else:
file_data = {"png_sticker": open(png_sticker, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
elif tgs_sticker is not None:
if tgs_sticker[:7] == "http://" or tgs_sticker[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&tgs_sticker=" + tgs_sticker
elif type(png_sticker) == bytes:
file_data = {"tgs_sticker": tgs_sticker}
addr = command + "?chat_id=" + str(chat_id)
elif type(tgs_sticker) == str and '.' not in tgs_sticker:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&tgs_sticker=" + tgs_sticker
else:
file_data = {"tgs_sticker": open(tgs_sticker, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def deleteChatStickerSet(self, chat_id):
"""
删除超级群组的贴纸集
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
return self.request.post(addr)
def editMessageLiveLocation(self, latitude, longitude,
horizontal_accuracy=None, chat_id=None, message_id=None,
heading=None, inline_message_id=None, reply_markup=None):
"""
使用此方法编辑实时位置消息
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
addr += "&latitude=" + str(latitude)
addr += "&longitude=" + str(longitude)
if horizontal_accuracy is not None:
addr += "&horizontal_accuracy=" + str(horizontal_accuracy)
if heading is not None:
addr += "&heading=" + str(heading)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def stopMessageLiveLocation(self, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None):
"""
使用此方法可在活动期间到期前停止更新活动位置消息
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def setMyCommands(self, commands):
"""
使用此方法更改机器人的命令列表
commands传入格式示例:
commands = [
{"command": "start", "description": "插件列表"},
{"command": "bing", "description": "获取每日Bing壁纸"}
]
"""
command = inspect.stack()[0].function
addr = command
commands = {"commands": commands}
return self.request.postJson(addr, commands)
def getMyCommands(self):
"""
使用此方法获取机器人当前的命令列表
"""
command = inspect.stack()[0].function
addr = command
return self.request.post(addr)
# Updating messages
def editMessageText(self, text, chat_id=None, message_id=None, inline_message_id=None,
parse_mode="Text", disable_web_page_preview=None,
reply_markup=None, entities=None):
"""
编辑一条文本消息.成功时,若消息为Bot发送则返回编辑后的消息,其他返回True
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
addr += "&text=" + quote(str(text))
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + str(parse_mode)
if disable_web_page_preview is not None:
addr += "&disable_web_page_preview=" + \
str(disable_web_page_preview)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if entities is not None:
addr += "&entities=" + json.dumps(entities)
return self.request.post(addr)
def editMessageCaption(self, chat_id=None, message_id=None,
inline_message_id=None, caption=None, parse_mode="Text",
reply_markup=None, caption_entities=None):
"""
编辑消息的Caption。成功时,若消息为Bot发送则返回编辑后的消息,其他返回True
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
if caption is not None:
addr += "&caption=" + quote(str(caption))
if parse_mode in ("Markdown", "MarkdownV2", "HTML"):
addr += "&parse_mode=" + str(parse_mode)
if reply_markup is not None:
addr += "&reply_markup=" + str(reply_markup)
if caption_entities is not None:
addr += "&caption_entities=" + json.dumps(caption_entities)
return self.request.post(addr)
def editMessageMedia(self, media, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None):
"""
编辑消息媒体
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
media format:
media = {
'media':{
'type': 'photo',
'media': 'http://pic1.win4000.com/pic/d/6a/25a2c0e959.jpg',
'caption': '编辑后的Media'
}
}
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.postJson(addr, media)
def editMessageReplyMarkup(self, chat_id=None, message_id=None, inline_message_id=None, reply_markup=None):
"""
编辑MessageReplyMarkup
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def stopPoll(self, chat_id, message_id, reply_markup=None):
"""
停止投票?并返回最终结果
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&message_id=" + str(message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
return self.request.post(addr)
def deleteMessage(self, chat_id, message_id):
"""
删除一条消息,机器人必须具备恰当的权限
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id) + "&message_id=" + str(message_id)
return self.request.post(addr)
# Inline mode
def answerInlineQuery(self, inline_query_id, results, cache_time=None,
is_personal=None, next_offset=None, switch_pm_text=None, switch_pm_parameter=None):
"""
使用此方法发送Inline mode的应答
"""
command = inspect.stack()[0].function
addr = command + "?inline_query_id=" + str(inline_query_id)
if cache_time is not None:
addr += "&cache_time=" + str(cache_time)
if is_personal is not None:
addr += "&is_personal=" + str(is_personal)
if next_offset is not None:
addr += "&next_offset=" + str(next_offset)
if switch_pm_text is not None:
addr += "&switch_pm_text=" + str(switch_pm_text)
if switch_pm_parameter is not None:
addr += "&switch_pm_parameter=" + str(switch_pm_parameter)
return self.request.postJson(addr, results)
def answerCallbackQuery(self, callback_query_id, text=None, show_alert="false", url=None, cache_time=0):
"""
使用此方法发送CallbackQuery的应答
InlineKeyboardMarkup格式:
replyKeyboard = [
[
{ "text": "命令菜单","callback_data":"/start"},
{ "text": "一排之二","url":"https://google.com"}
],
[
{ "text": "二排之一","url":"https://google.com"},
{ "text": "二排之二","url":"https://google.com"},
{ "text": "二排之三","url":"https://google.com"}
]
]
reply_markup = {
"inline_keyboard": replyKeyboard
}
ReplyKeyboardMarkup格式(似乎不能用于群组):
replyKeyboard = [
[
{ "text": "命令菜单"},
{ "text": "一排之二"}
],
[
{ "text": "二排之一"},
{ "text": "二排之二"},
{ "text": "二排之三"}
]
]
reply_markup = {
"keyboard": replyKeyboard,
"resize_keyboard": bool("false"),
"one_time_keyboard": bool("false"),
"selective": bool("true")
}
ReplyKeyboardRemove格式:
reply_markup = {
"remove_keyboard": bool("true"),
"selective": bool("true")
}
"""
command = inspect.stack()[0].function
addr = command + "?callback_query_id=" + str(callback_query_id)
if text is not None:
addr += "&text=" + quote(str(text))
if show_alert == "true":
addr += "&show_alert=" + str(bool(show_alert))
if url is not None:
addr += "&url=" + str(url)
if cache_time != 0:
addr += "&cache_time=" + str(cache_time)
return self.request.post(addr)
# Stickers
def sendSticker(self, chat_id, sticker, disable_notification=None,
reply_to_message_id=None, reply_markup=None,
allow_sending_without_reply=None):
"""
使用此方法发送静态、webp或动画、tgs贴纸
"""
command = inspect.stack()[0].function
if sticker[:7] == "http://" or sticker[:7] == "https:/":
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&sticker=" + sticker
elif type(sticker) == bytes:
file_data = {"sticker": sticker}
addr = command + "?chat_id=" + str(chat_id)
elif type(sticker) == str and '.' not in sticker:
file_data = None
addr = command + "?chat_id=" + str(chat_id) + "&sticker=" + sticker
else:
file_data = {"sticker": open(sticker, 'rb')}
addr = command + "?chat_id=" + str(chat_id)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def getStickerSet(self, name):
"""
使用此方法获取贴纸集
"""
command = inspect.stack()[0].function
addr = command + "?name=" + str(name)
return self.request.post(addr)
def uploadStickerFile(self, user_id, name, title, emojis,
png_sticker=None, tgs_sticker=None, contains_masks=None,
mask_position=None):
"""
使用此方法可以上传带有标签的.PNG文件
以供以后在createNewStickerSet和addStickerToSet方法中使用
(可以多次使用)
"""
command = inspect.stack()[0].function
user_id_str = str(user_id)
if png_sticker[:7] == "http://" or png_sticker[:7] == "https:/":
file_data = None
addr = command + "?user_id=" + user_id_str + "&png_sticker=" + png_sticker
elif type(png_sticker) == bytes:
file_data = {"png_sticker": png_sticker}
addr = command + "?user_id=" + user_id_str
elif type(png_sticker) == str and '.' not in png_sticker:
file_data = None
addr = command + "?user_id=" + user_id_str + "&png_sticker=" + png_sticker
else:
file_data = {"png_sticker": open(png_sticker, 'rb')}
addr = command + "?user_id=" + user_id_str
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def createNewStickerSet(self, user_id, name, title, emojis, png_sticker=None, tgs_sticker=None,
contains_masks=None, mask_position=None):
"""
使用此方法可以创建用户拥有的新贴纸集
机器人将能够编辑由此创建的贴纸集
png_sticker或tgs_sticker字段只能且必须存在一个
"""
command = inspect.stack()[0].function
addr = command + "?user_id=" + str(user_id)
addr += "&name=" + str(name)
addr += "&title=" + str(title)
addr += "&emojis=" + str(emojis)
if png_sticker is None and tgs_sticker is None:
return False
elif png_sticker is not None and tgs_sticker is not None:
return False
else:
if png_sticker is not None:
if png_sticker[:7] == "http://" or png_sticker[:7] == "https:/":
file_data = None
addr += "&png_sticker=" + png_sticker
elif type(png_sticker) == bytes:
file_data = {"png_sticker": png_sticker}
elif type(png_sticker) == str and '.' not in png_sticker:
file_data = None
addr += "&png_sticker=" + png_sticker
else:
file_data = {"png_sticker": open(png_sticker, 'rb')}
elif tgs_sticker is not None:
if tgs_sticker[:7] == "http://" or tgs_sticker[:7] == "https:/":
file_data = None
addr += "&tgs_sticker=" + tgs_sticker
elif type(tgs_sticker) == bytes:
file_data = {"tgs_sticker": tgs_sticker}
elif type(tgs_sticker) == str and '.' not in tgs_sticker:
file_data = None
addr += "&tgs_sticker=" + tgs_sticker
else:
file_data = {"tgs_sticker": open(tgs_sticker, 'rb')}
if contains_masks is not None:
addr += "&contains_masks=" + str(contains_masks)
if mask_position is not None:
addr += "&mask_position=" + json.dumps(mask_position)
else:
return False
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def addStickerToSet(self, user_id, name, emojis, png_sticker=None, tgs_sticker=None,
mask_position=None):
"""
使用此方法可以将新标签添加到由机器人创建的集合中
png_sticker或tgs_sticker字段只能且必须存在一个。
可以将动画贴纸添加到动画贴纸集中,并且只能添加到它们
动画贴纸集最多可以包含50个贴纸。 静态贴纸集最多可包含120个贴纸
"""
command = inspect.stack()[0].function
addr = command + "?user_id=" + str(user_id)
addr += "&name=" + str(name)
addr += "&emojis=" + str(emojis)
if png_sticker is None and tgs_sticker is None:
return False
elif png_sticker is not None and tgs_sticker is not None:
return False
else:
if png_sticker is not None:
if png_sticker[:7] == "http://" or png_sticker[:7] == "https:/":
file_data = None
addr += "&png_sticker=" + png_sticker
elif type(png_sticker) == bytes:
file_data = {"png_sticker": png_sticker}
elif type(png_sticker) == str and '.' not in png_sticker:
file_data = None
addr += "&png_sticker=" + png_sticker
else:
file_data = {"png_sticker": open(png_sticker, 'rb')}
elif tgs_sticker is not None:
if tgs_sticker[:7] == "http://" or tgs_sticker[:7] == "https:/":
file_data = None
addr += "&tgs_sticker=" + tgs_sticker
elif type(tgs_sticker) == bytes:
file_data = {"tgs_sticker": tgs_sticker}
elif type(tgs_sticker) == str and '.' not in tgs_sticker:
file_data = None
addr += "&tgs_sticker=" + tgs_sticker
else:
file_data = {"tgs_sticker": open(tgs_sticker, 'rb')}
if mask_position is not None:
addr += "&mask_position=" + json.dumps(mask_position)
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
def setStickerPositionInSet(self, sticker, position):
"""
使用此方法将机器人创建的一组贴纸移动到特定位置
"""
command = inspect.stack()[0].function
addr = command + "?sticker=" + str(sticker)
addr += "&position=" + str(position)
return self.request.post(addr)
def deleteStickerFromSet(self, sticker):
"""
使用此方法从机器人创建的集合中删除贴纸
"""
command = inspect.stack()[0].function
addr = command + "?sticker=" + str(sticker)
return self.request.post(addr)
def setStickerSetThumb(self, name, user_id, thumb=None):
"""
使用此方法设置贴纸集的缩略图
只能为动画贴纸集设置动画缩略图
"""
command = inspect.stack()[0].function
addr = command + "?name=" + str(name)
addr += "&user_id=" + str(user_id)
if thumb is not None:
if thumb[:7] == "http://" or thumb[:7] == "https:/":
file_data = None
addr += "&thumb=" + thumb
elif type(thumb) == bytes:
file_data = {"thumb": thumb}
elif type(thumb) == str and '.' not in thumb:
file_data = None
addr += "&thumb=" + thumb
else:
file_data = {"thumb": open(thumb, 'rb')}
if file_data is None:
return self.request.post(addr)
else:
return self.request.postFile(addr, file_data)
# Payments
def sendInvoice(self, chat_id, title, description, payload, provider_token, start_parameter,
currency, prices, provider_data=None, photo_url=None,
photo_size=None, photo_width=None, photo_height=None,
need_name=None, need_phone_number=None, need_email=None,
need_shipping_address=None, send_phone_number_to_provider=None,
send_email_to_provider=None, is_flexible=None, disable_notification=None,
reply_to_message_id=None, reply_markup=None,
allow_sending_without_reply=None):
"""
使用此方法发送发票
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
addr += "&title=" + str(title)
addr += "&description=" + str(description)
addr += "&payload" + str(payload)
addr += "&provider_token=" + str(provider_token)
addr += "&start_parameter=" + str(start_parameter)
addr += "¤cy=" + str(currency)
addr += "&prices=" + json.dumps(prices)
if provider_data is not None:
addr += "&provider_data=" + str(provider_data)
if photo_url is not None:
addr += "&photo_url=" + str(photo_url)
if photo_size is not None:
addr += "&photo_size=" + str(photo_size)
if photo_width is not None:
addr += "&photo_width=" + str(photo_width)
if photo_height is not None:
addr += "&photo_height=" + str(photo_height)
if need_name is not None:
addr += "&need_name=" + str(need_name)
if need_phone_number is not None:
addr += "&need_phone_number=" + str(need_phone_number)
if need_email is not None:
addr += "&need_email=" + str(need_email)
if need_shipping_address is not None:
addr += "&need_shipping_address=" + str(need_shipping_address)
if send_phone_number_to_provider is not None:
addr += "&send_phone_number_to_provider=" + \
str(send_phone_number_to_provider)
if send_email_to_provider is not None:
addr += "&send_email_to_provider=" + str(send_email_to_provider)
if is_flexible is not None:
addr += "&is_flexible=" + str(is_flexible)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.post(addr)
def answerShippingQuery(self, shipping_query_id, ok, shipping_options=None, error_message=None):
"""
使用此方法可以答复运输查询
"""
command = inspect.stack()[0].function
addr = command + "?shipping_query_id=" + str(shipping_query_id)
addr += "&ok=" + str(ok)
if shipping_options is not None:
addr += "&shipping_options=" + json.dumps(shipping_options)
if error_message is not None:
addr += "&error_message=" + str(error_message)
return self.request.post(addr)
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok, error_message=None):
"""
使用此方法来响应此类预结帐查询
"""
command = inspect.stack()[0].function
addr = command + "?pre_checkout_query_id=" + str(pre_checkout_query_id)
addr += "&ok=" + str(ok)
if error_message is not None:
addr += "&error_message=" + str(error_message)
return self.request.post(addr)
# Telegram Passport
def setPassportDataErrors(self, user_id, errors):
"""
通知用户他们提供的某些Telegram Passport元素包含错误
在错误纠正之前,用户将无法重新提交其护照
(错误返回字段的内容必须更改)
"""
command = inspect.stack()[0].function
addr = command + "?user_id=" + str(user_id)
addr += "&errors=" + json.dumps(errors)
return self.request.post(addr)
# Games
def sendGame(self, chat_id, game_short_name, disable_notification=None,
reply_to_message_id=None, reply_markup=None,
allow_sending_without_reply=None):
"""
使用此方法发送游戏
"""
command = inspect.stack()[0].function
addr = command + "?chat_id=" + str(chat_id)
addr += "&game_short_name=" + str(game_short_name)
if disable_notification is not None:
addr += "&disable_notification=" + str(disable_notification)
if reply_to_message_id is not None:
addr += "&reply_to_message_id=" + str(reply_to_message_id)
if reply_markup is not None:
addr += "&reply_markup=" + json.dumps(reply_markup)
if allow_sending_without_reply is not None:
addr += "&allow_sending_without_reply=" + str(allow_sending_without_reply)
return self.request.post(addr)
def setGameScore(self, user_id, score, force=None, disable_edit_message=None,
chat_id=None, message_id=None, inline_message_id=None):
"""
使用此方法设置游戏中指定用户的分数
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
addr += "&user_id=" + str(user_id)
addr += "&score=" + str(score)
if force is not None:
addr += "&force=" + str(force)
if disable_edit_message is not None:
addr += "&disable_edit_message=" + str(disable_edit_message)
return self.request.post(addr)
def getGameHighScores(self, user_id, chat_id=None, message_id=None, inline_message_id=None):
"""
使用此方法获取高分表的数据
将返回指定用户及其在游戏中几个邻居的分数
在未指定inline_message_id的时候chat_id和message_id为必须存在的参数
"""
command = inspect.stack()[0].function
if inline_message_id is None:
if message_id is None or chat_id is None:
return False
if inline_message_id is not None:
addr = command + "?inline_message_id=" + str(inline_message_id)
else:
addr = command + "?chat_id=" + str(chat_id)
addr += "&message_id=" + str(message_id)
addr += "&user_id=" + str(user_id)
return self.request.post(addr) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/zgram.py | zgram.py |
import os
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from .polling import _runUpdates
from .webhook import _runWebhook
from .zgram import Bot
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
name = "zgram"
__all__ = ['Bot']
bot = Bot()
VERSION = bot.version
if bot._local_api_server != "False":
api_server = "Local"
else:
api_server = "Remote"
def main():
print(" * Self-checking...", end="\r")
req = requests.post(url=bot._url + "getWebhookInfo", verify=False)
if not req.json().get("ok"):
if (req.json().get("error_code") == 401 and
req.json().get("description") == "Unauthorized"):
print("\nif you already logout the bot from the cloud Bot API server,please wait at least 10 minutes and try again.")
else:
print("\nfailed to get running mode!")
os._exit(0)
status = req.json().get("result")
pending_update_count = status["pending_update_count"]
if bot._webhook:
protocol = "https://"
if bot._local_api_server != "False":
protocol = "http://"
url = protocol + str(bot._server_address + ":" + str(
bot._server_port) + "/bot" + str(bot._key))
if (bot._drop_pending_updates == True and pending_update_count != 0) \
or (status["url"] != url) or (status["has_custom_certificate"] != bot._self_signed)\
or status["max_connections"] != int(bot._pool_size):
if bot._self_signed:
status = bot.setWebhook(
url=url,
certificate=bot._cert_pub,
max_connections=bot._pool_size,
drop_pending_updates=bot._drop_pending_updates
)
else:
status = bot.setWebhook(
url=url,
max_connections=bot._pool_size,
drop_pending_updates=bot._drop_pending_updates
)
if not status:
print("\nfailed to set Webhook!")
os._exit(0)
print(" * The zgram starts running",
"\n * Version : v" + VERSION,
"\n * Mode : Webhook",
"\n * Thread : " + str(bot._pool_size),
"\n * Server : " + api_server + "\n")
_runWebhook(bot=bot,
host=bot._local_address,port=int(bot._local_port))
else:
if status["url"] != "" or status["has_custom_certificate"]:
status = bot.deleteWebhook()
if not status:
print("\nfailed to set getUpdates!")
os._exit(0)
print(" * The zgram starts running",
"\n * Version : v" + VERSION,
"\n * Mode : Polling",
"\n * Thread : " + str(bot._pool_size),
"\n * Server : " + api_server + "\n")
if bot._drop_pending_updates == True and \
pending_update_count != 0:
results = bot.getUpdates()
messages = bot._washUpdates(results)
_runUpdates(bot=bot) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/__init__.py | __init__.py |
import os
import requests
import inspect
from .logger import _logger
from traceback import extract_stack
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
class _Request(object):
"""
接口请求类
"""
def __init__(self, thread_pool_size, url, debug=False):
self.__url = url
self.__debug = debug
self.__session = self.__connection_session(
pool_connections=thread_pool_size,
pool_maxsize=thread_pool_size * 2
)
def __del__(self):
self.__session.close()
def __connection_session(self, pool_connections=10, pool_maxsize=10, max_retries=5):
"""
连接池
"""
session = requests.Session()
session.verify = False
adapter = requests.adapters.HTTPAdapter(pool_connections=pool_connections,
pool_maxsize=pool_maxsize, max_retries=max_retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def __debug_info(self, result):
"""
debug模式
"""
if self.__debug and not result.get("ok"):
os.system("") # "玄学"解决Windows下颜色显示失效的问题...
stack_info = extract_stack()
if len(stack_info) > 8: # 插件内
_logger.debug("\033[1;31m" + \
"Request failed" + " - " + \
"From:" + stack_info[-3][2] + " - " + \
"Path:" + stack_info[5][0] + " - " + \
"Line:" + str(stack_info[5][1]) + " - " + \
"Method:" + stack_info[6][2] + " - " + \
"Result:" + str(result) + \
"\033[0m")
elif len(stack_info) > 3: # 外部调用
_logger.debug("\033[1;31m" + \
"Request failed" + " - " + \
"From:" + stack_info[0][0] + " - " + \
"Path:" + stack_info[1][0] + " - " + \
"Line:" + str(stack_info[0][1]) + " - " + \
"Method:" + stack_info[1][2] + " - " + \
"Result:" + str(result) + \
"\033[0m")
def post(self, addr):
try:
with self.__session.post(self.__url + addr) as req:
self.__debug_info(req.json())
if req.json().get("ok"):
return req.json().get("result")
elif not req.json().get("ok"):
return req.json().get("ok")
except:
return False
def postFile(self, addr, file_data):
try:
with self.__session.post(self.__url + addr, files=file_data) as req:
self.__debug_info(req.json())
if req.json().get("ok"):
return req.json().get("result")
elif not req.json().get("ok"):
return req.json().get("ok")
except:
return False
def postJson(self, addr, json):
try:
with self.__session.get(self.__url + addr, json=json) as req:
self.__debug_info(req.json())
if req.json().get("ok"):
return req.json().get("result")
elif not req.json().get("ok"):
return req.json().get("ok")
except:
return False
def get(self, addr):
try:
with self.__session.get(self.__url + addr) as req:
self.__debug_info(req.json())
if req.json().get("ok"):
return req.json().get("result")
elif not req.json().get("ok"):
return req.json().get("ok")
except:
return False | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/request.py | request.py |
import os
def About(bot, message):
chat_id = message["chat"]["id"]
message_id = message["message_id"]
text = message["text"]
bot_id = bot.bot_id
prefix = "about"
plugin_dir = bot.plugin_dir
VERSION = bot.version
if not os.path.exists(bot.path_converter(plugin_dir + "About/config.ini")):
first_btn = ["交流群组", "https://t.me/zgram_chat"]
last_btn = ["项目地址", "https://github.com/plutobell/zgram"]
else:
with open(bot.path_converter(plugin_dir + "About/config.ini"), 'r') as g:
first_btn = g.readline().strip().split(',')
last_btn = g.readline().strip().split(',')
if text[1:len(prefix)+1] == prefix:
inlineKeyboard = [
[
{"text": first_btn[0], "url": first_btn[1]},
{"text": last_btn[0], "url": last_btn[1]},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
status = bot.sendChatAction(chat_id, "typing")
msg = "此 Bot 基于 <b>zgram</b> 框架 <b>v" + VERSION + "</b>\n\n" +\
"<b>zgram</b> 是基于 Telegram Bot API 的 Bot 框架,具有插件系统,扩展方便。\n\n"
req = bot.getUserProfilePhotos(user_id=str(bot_id), limit=1)
if req.get("photos", "notphotos") != "notphotos":
bot_icon = req.get("photos")[0][0]["file_id"]
if type(bot_icon) == str and len(bot_icon) > 50:
photo = bot_icon
else:
with open(bot.path_converter(plugin_dir + "About/icon.png"), "rb") as p:
photo = p.read()
else:
with open(bot.path_converter(plugin_dir + "About/icon.png"), "rb") as p:
photo = p.read()
status = bot.sendPhoto(chat_id=chat_id, photo=photo, caption=msg, parse_mode="HTML", reply_to_message_id=message_id, reply_markup=reply_markup)
bot.message_deletor(15, chat_id, status["message_id"])
else:
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="指令格式错误,请检查!", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"]) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/plugins/About/About.py | About.py |
import time
def Schedule(bot, message):
chat_id = message["chat"]["id"]
user_id = message["from"]["id"]
message_id = message["message_id"]
text = message["text"]
root_id = bot.root_id
gaps = {
"1s": 1,
"2s": 2,
"5s": 5,
"10s": 10,
"15s": 15,
"30s": 30,
"45s": 45,
"1m": 60,
"2m": 120,
"5m": 300,
"10m": 600,
"15m": 900,
"30m": 1800,
"45m": 2700,
"1h": 3600,
"2h": 7200,
"4h": 10800,
"6h": 21600,
"8h": 28800,
"10h": 36000,
"12h": 43200,
"1d": 86400,
"3d": 259200,
"5d": 432000,
"7d": 604800,
"10d": 864000,
"15d": 1296000,
"20d": 1728000,
"30d": 2592000
}
prefix = "/sched"
command = { #命令注册
"/schedadd": "add",
"/scheddel": "del",
"/schedfind": "find",
"/schedclear": "clear",
"/schedstatus": "status"
}
count = 0
for c in command.keys():
if c in str(text):
count += 1
if text.split(" ")[0] != prefix and prefix in text and str(user_id) != root_id:
status = bot.sendMessage(chat_id, text="<b>无权限</b>", parse_mode="HTML",
reply_to_message_id=message_id)
bot.message_deletor(15, status["chat"]["id"], status["message_id"])
return
if text[:len(prefix)] == prefix and count == 0:
msg = "<b>Schedule 插件功能</b>" + "\n\n" + \
"<b>/schedadd</b> 添加任务 格式:指令+空格+周期+消息" + "\n" + \
"<b>/scheddel</b> 移除任务 格式:指令+空格+标识" + "\n" + \
"<b>/schedfind</b> 查找任务 格式:指令+空格+标识" + "\n" + \
"<b>/schedclear</b> 移除所有任务" + "\n" + \
"<b>/schedstatus</b> 查看队列信息" + "\n\n" + \
"<i>支持的周期指令:1s 2s 5s 10s 15s 30s 45s | "+ \
"1m 2m 5m 10m 15m 30m 45m | " + \
"1h 2h 4h 6h 8h 10h 12h | " + \
"1d 3d 5d 7d 10d 15d 20d 30d" + "</i>"
status = bot.sendMessage(chat_id, text=msg, parse_mode="HTML",
reply_to_message_id=message_id)
bot.message_deletor(60, status["chat"]["id"], status["message_id"])
elif text[:len(prefix + "add")] == prefix + "add":
if len(text.split(" ")) == 3:
msg = ""
gap_key = str(text.split(" ")[1])
if gap_key not in gaps.keys():
msg = "<b>错误的周期,支持的周期指令:</b> \n\n" + \
"<b>1s 2s 5s 10s 15s 30s 45s \n" + \
"1m 2m 5m 10m 15m 30m 45m \n" + \
"1h 2h 4h 6h 8h 10h 12h \n" + \
"1d 3d 5d 7d 10d 15d 20d 30d" + "</b>"
status = bot.sendMessage(chat_id, text=msg, parse_mode="HTML",
reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
return
gap = gaps[gap_key]
gap_key = gap_key.replace("s", "秒").replace("m", "分钟").replace("h", "小时").replace("d", "天")
msg = str(text.split(" ")[2]) + "\n\n" + "<code>此消息为定时发送,周期" + str(gap_key) + "</code>"
ok, uid = bot.schedule.add(gap, event, (bot, message["chat"]["id"], msg, "HTML"))
timestamp = time.strftime('%Y/%m/%d %H:%M:%S',time.localtime(time.time()))
if ok:
msg = "<b>任务已加入队列</b>\n\n" + \
"周期: <code>" + gap_key + "</code>\n" + \
"目标: <code>" + str(chat_id) + "</code>\n" + \
"标识: <code>" + str(uid) + "</code>\n" + \
"时间: <code>" + str(timestamp) + "</code>\n\n" + \
"<code>此消息将在<b>60秒</b>后销毁,请尽快保存标识</code>\n"
else:
msg = ""
if uid == "Full":
msg = "<b>队列已满</b>"
else:
msg = "<b>遇到错误</b> \n\n <i>" + uid + "</i>"
status = bot.sendMessage(chat_id, text=msg, parse_mode="HTML",
reply_to_message_id=message_id)
bot.message_deletor(60, status["chat"]["id"], status["message_id"])
else:
status = bot.sendMessage(chat_id,
text="<b>指令格式错误 (e.g.: " + prefix + "add gap text)</b>",
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
elif text[:len(prefix + "del")] == prefix + "del":
if len(text.split(" ")) == 2:
msg = ""
uid = str(text.split(" ")[1])
ok, uid = bot.schedule.delete(uid)
if ok:
msg = "<b>移除了任务 " + str(uid) + "</b>"
else:
if uid == "Empty":
msg = "<b>队列为空</b>"
elif uid == "NotFound":
msg = "<b>任务未找到</b>"
status = bot.sendMessage(chat_id, text=msg,
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
else:
status = bot.sendMessage(chat_id,
text="<b>指令格式错误 (e.g.: " + prefix + "del uid)</b>",
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
elif text[:len(prefix + "find")] == prefix + "find":
if len(text.split(" ")) == 2:
msg = ""
uid = str(text.split(" ")[1])
ok, uid = bot.schedule.find(uid)
if ok:
msg = "<b>任务存在于队列中</b>"
else:
if uid == "Empty":
msg = "<b>队列为空</b>"
elif uid == "NotFound":
msg = "<b>任务未找到</b>"
status = bot.sendMessage(chat_id, text=msg,
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
else:
status = bot.sendMessage(chat_id,
text="<b>指令格式错误 (e.g.: " + prefix + "del uid)</b>",
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
elif text[:len(prefix + "clear")] == prefix + "clear":
msg = ""
ok, msgg = bot.schedule.clear()
if ok:
msg = "<b>已清空队列</b>"
else:
if msgg == "Empty":
msg = "<b>队列为空</b>"
elif msgg != "Cleared":
msg = "<b>遇到错误</b> \n\n <i>" + msgg + "</i>"
status = bot.sendMessage(chat_id, text=msg,
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
elif text[:len(prefix + "status")] == prefix + "status":
msg = ""
ok, result = bot.schedule.status()
if ok:
msg = "<code>使用: " + str(result["used"]) + "\n" + \
"空闲: " + str(result["free"]) + "\n" + \
"容量: " + str(result["size"]) + "</code>\n"
else:
msg = "<b>遇到错误</b> \n\n <i>" + result["exception"] + "</i>"
status = bot.sendMessage(chat_id, text=msg,
parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(30, status["chat"]["id"], status["message_id"])
def event(bot, chat_id, msg, parse_mode):
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML") | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/plugins/Schedule/Schedule.py | Schedule.py |
import requests
import urllib.parse as ubp
requests.adapters.DEFAULT_RETRIES = 5
def Chat(bot, message):
plugin_dir = bot.plugin_dir
url = "http://api.qingyunke.com/api.php?key=free&appid=0&msg="
hello = ("你好", "nihao", "hello", "Hello",
"HELLO", "hi", "Hi", "HI",
"早上好", "上午好", "下午好", "晚上好", "中午好",
"good morning", "Good morning", "good afternoom",
"Good afternoom", "good evening", "Good evening")
if message["text"][1:] in hello:
status = bot.sendChatAction(message["chat"]["id"], "typing")
status = bot.sendVoice(message["chat"]["id"], voice=bot.path_converter(plugin_dir + "Chat/hello.ogg"),
reply_to_message_id=message["message_id"])
else:
try:
with requests.post(url + ubp.quote(message["text"][1:])) as req: #urlencode编码
req.keep_alive = False
req.encoding = "utf-8"
if not req.status_code == requests.codes.ok:
status = bot.sendChatAction(message["chat"]["id"], "typing")
status = bot.sendMessage(chat_id=message["chat"]["id"], text="接口调用失败!",
parse_mode="HTML", reply_to_message_id=message["message_id"])
bot.message_deletor(15, status["chat"]["id"], status["message_id"])
else:
try:
msg = str(req.json().get("content").replace("{br}", "\n").replace("菲菲", "小埋"))
if "{face:" in msg:
msg = msg.split("}")[1]
except:
msg = "出错了."
status = bot.sendChatAction(message["chat"]["id"], "typing")
status = bot.sendMessage(message["chat"]["id"],text=msg,
parse_mode="HTML", reply_to_message_id=message["message_id"])
except Exception as e:
print(e)
def timer_func(bot, chat_id, message_id):
status = bot.deleteMessage(chat_id=chat_id, message_id=message_id) | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/plugins/Chat/Chat.py | Chat.py |
import os
from threading import Lock
lock = Lock()
def PluginCTL(bot, message):
message_id = message["message_id"]
chat_id = message["chat"]["id"]
user_id = message["from"]["id"]
text = message["text"]
prefix = "pluginctl"
plugin_dir = bot.plugin_dir
root_id = bot.root_id
plugin_bridge = bot.plugin_bridge
if not os.path.exists(bot.path_converter(plugin_dir + "PluginCTL/db/")):
os.mkdir(bot.path_converter(plugin_dir + "PluginCTL/db/"))
if message["chat"]["type"] != "private" and not os.path.exists(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db")):
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
pass
command = {
"/pluginctlshow": "show",
"/pluginctlon": "on",
"/pluginctloff": "off"
}
count = 0
for c in command.keys():
if c in str(text):
count += 1
if message["chat"]["type"] != "private":
admins = administrators(bot=bot, chat_id=chat_id)
if str(root_id) not in admins:
admins.append(str(root_id)) #root permission
if message["chat"]["type"] == "private" and text[1:len(prefix)+1] == prefix: #判断是否为私人对话
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id, "抱歉,该指令不支持私人会话.", parse_mode="text", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
elif text[1:len(prefix)+1] == prefix and count == 0:
status = bot.sendChatAction(chat_id, "typing")
msg = "<b>PluginCTL 插件功能</b>\n\n" +\
"<b>/pluginctlshow</b> - 展示插件开启状态 \n" +\
"<b>/pluginctlon</b> - 启用插件。格式:/pluginctlon接要启用的插件名,以空格分隔 \n" +\
"<b>/pluginctloff</b> - 禁用插件。格式:/pluginctloff接要禁用的插件名,以空格分隔 \n" +\
"<b>/pluginctlon all</b> - 启用所有插件 \n" +\
"<b>/pluginctloff all</b> - 禁用所有插件,但必须的插件将被保留 \n" +\
"<b>\n同时操作多个插件请用英文逗号分隔</b>\n"
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML", reply_to_message_id=message["message_id"])
bot.message_deletor(30, chat_id, status["message_id"])
elif "reply_markup" in message.keys():
click_user_id = message["click_user"]["id"]
from_user_id = message["reply_to_message"]["from"]["id"]
callback_query_data = message["callback_query_data"]
pluginctlsho_on_page = "/" + prefix + "showonpage"
pluginctlsho_off_page = "/" + prefix + "showoffpage"
plugin_dict = bot.plugin_bridge
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
plugin_list_on = {}
for plugin in plugin_bridge.keys():
if plugin not in plugin_list_off:
plugin_list_on[plugin] = plugin_bridge[plugin]
for key, val in plugin_list_on.items(): #dict.keys()不可修改!
if val == "" or val == " ":
plugin_list_on[key] = "nil"
if callback_query_data == pluginctlsho_on_page:
inlineKeyboard = [
[
{"text": "禁用的", "callback_data": "/pluginctlshowoffpage"},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
if click_user_id == from_user_id:
for key, val in plugin_list_on.items(): #dict.keys()不可修改!
if val == "" or val == " ":
plugin_list_on[key] = "nil"
msg_on = "<b>启用的插件</b> \n\n"
for i, on in enumerate(plugin_list_on):
msg_on += " <b>[" + str(i+1) + "] " + str(on) + " " + str(plugin_list_on[on]) + "</b>\n"
msg_on += "\n<b>nil</b> 代表指令为空"
status = bot.editMessageText(chat_id=chat_id, message_id=message_id, text=msg_on + "\n", parse_mode="HTML", reply_markup=reply_markup)
status = bot.answerCallbackQuery(message["callback_query_id"])
else:
status = bot.answerCallbackQuery(message["callback_query_id"], text="点啥点,关你啥事?", show_alert=bool("true"))
elif callback_query_data == pluginctlsho_off_page:
inlineKeyboard = [
[
{"text": "启用的", "callback_data": "/pluginctlshowonpage"},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
if click_user_id == from_user_id:
msg_off = "<b>禁用的插件</b> \n\n"
for key, val in plugin_bridge.items(): #dict.keys()不可修改!
if val == "" or val == " ":
plugin_bridge[key] = "nil"
for i, pluo in enumerate(plugin_list_off):
if pluo == "" or pluo == " ":
del plugin_list_off[i]
if len(plugin_list_off) == 0:
msg_off += "无\n"
else:
for i, off in enumerate(plugin_list_off):
msg_off += " <b>[" + str(i+1) + "] " + str(off) + " " + str(plugin_bridge[off]) + "</b>\n"
msg_off += "\n<b>nil</b> 代表指令为空"
status = bot.editMessageText(chat_id=chat_id, message_id=message_id, text=msg_off + "\n", parse_mode="HTML", reply_markup=reply_markup)
status = bot.answerCallbackQuery(message["callback_query_id"])
else:
status = bot.answerCallbackQuery(message["callback_query_id"], text="点啥点,关你啥事?", show_alert=bool("true"))
elif count > 0:
if str(user_id) not in admins:
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="抱歉,您无权操作.", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
elif text[1:len(prefix + command["/pluginctlshow"])+1] == prefix + command["/pluginctlshow"]:
inlineKeyboard = [
[
{"text": "禁用的", "callback_data": "/pluginctlshowoffpage"},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
plugin_dict = bot.plugin_bridge
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
plugin_list_on = {}
for plugin in plugin_bridge.keys():
if plugin not in plugin_list_off:
plugin_list_on[plugin] = plugin_bridge[plugin]
for key, val in plugin_list_on.items(): #dict.keys()不可修改!
if val == "" or val == " ":
plugin_list_on[key] = "nil"
msg_on = "<b>启用的插件</b> \n\n"
for i, on in enumerate(plugin_list_on):
msg_on += " <b>[" + str(i+1) + "] " + str(on) + " " + str(plugin_list_on[on]) + "</b>\n"
msg_on += "\n<b>nil</b> 代表指令为空"
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text=msg_on + "\n", parse_mode="HTML", reply_to_message_id=message_id, reply_markup=reply_markup)
bot.message_deletor(60, chat_id, status["message_id"])
elif text[1:len(prefix + command["/pluginctlon"])+1] == prefix + command["/pluginctlon"]:
plugin_list = list(plugin_bridge.keys())
if len(text.split(' ')) == 2:
plug_set = text.split(' ')[1]
for p in plug_set.split(','):
if p == "nil":
p = ''
if p not in plugin_list:
if '' in plugin_list and p == ' ':
continue
if p == "all":
continue
msg = "插件 <b>" + str(p) + "</b> 不存在,请重试."
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
if plug_set == "all":
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write('')
lock.release()
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="<b>已启用全部插件。</b>", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
elif len(plug_set.split(',')) >= 2:
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
for i, plug_s in enumerate(plug_set.split(',')):
if plug_s in plugin_list_off:
for i, p in enumerate(plugin_list_off):
if p == plug_s:
del plugin_list_off[i]
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write(','.join(plugin_list_off))
lock.release()
else:
plug_set = plug_set.strip()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
if plug_set in plugin_list_off:
for i, p in enumerate(plugin_list_off):
if p == plug_set:
del plugin_list_off[i]
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write(','.join(plugin_list_off))
lock.release()
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="<b>启用成功!</b>", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
else:
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="指令错误,请检查.", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
elif text[1:len(prefix + command["/pluginctloff"])+1] == prefix + command["/pluginctloff"]:
default_plugin = ["Menu", "About", "PluginCTL", "Uptime", "Schedule"]
plugin_list = list(plugin_bridge.keys())
if len(text.split(' ')) == 2:
plug_set = text.split(' ')[1]
for p in plug_set.split(','):
if p not in plugin_list:
if p == ' ' or p == '':
continue
if p == "all":
continue
msg = "插件 <b>" + str(p) + "</b> 不存在,请重试."
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
if type(plug_set) == str and plug_set == "all":
plugin_list_alloff = []
for i, p in enumerate(plugin_list):
if p == "" or p == " ":
p = "nil"
if p not in default_plugin:
plugin_list_alloff.append(p)
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write(','.join(plugin_list_alloff))
lock.release()
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="<b>已禁用全部插件,\n但必须的插件仍被保留。</b>", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
elif len(plug_set.split(',')) >= 2:
for i, p in enumerate(plug_set.split(',')):
if p in default_plugin:
status = bot.sendChatAction(chat_id, "typing")
msg = "插件 <b>" + str(p) + "</b> 不支持禁用."
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
for i, plug_s in enumerate(plug_set.split(',')):
if plug_s not in plugin_list_off:
plugin_list_off.append(plug_s)
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write(','.join(plugin_list_off))
lock.release()
else:
plug_set = plug_set.strip()
for i, p in enumerate(plug_set.split(',')):
if p in default_plugin:
status = bot.sendChatAction(chat_id, "typing")
msg = "插件 <b>" + str(p) + "</b> 不支持禁用."
status = bot.sendMessage(chat_id=chat_id, text=msg, parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
return False
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
if plug_set not in plugin_list_off:
plugin_list_off.append(plug_set)
lock.acquire()
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "w") as f:
f.write(','.join(plugin_list_off))
lock.release()
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="<b>禁用成功!</b>", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
else:
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="指令错误,请检查.", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
else:
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text="指令错误,请检查.", parse_mode="HTML", reply_to_message_id=message_id)
bot.message_deletor(15, chat_id, status["message_id"])
def administrators(bot, chat_id):
admins = []
results = bot.getChatAdministrators(chat_id=chat_id)
if results != False:
for result in results:
if str(result["user"]["is_bot"]) == "False":
admins.append(str(result["user"]["id"]))
else:
admins = False
return admins | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/plugins/PluginCTL/PluginCTL.py | PluginCTL.py |
import os
def Menu(bot, message):
chat_id = message["chat"]["id"]
message_id = message["message_id"]
chat_type = message["chat"]["type"]
prefix = "start"
plugin_bridge = bot.plugin_bridge
plugin_dir = bot.plugin_dir
plugin_list = list(plugin_bridge.keys())
if chat_type != "private" and "/pluginctl" in plugin_bridge.values() and plugin_bridge["PluginCTL"] == "/pluginctl":
if os.path.exists(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db")):
with open(bot.path_converter(plugin_dir + "PluginCTL/db/" + str(chat_id) + ".db"), "r") as f:
plugin_setting = f.read().strip()
plugin_list_off = plugin_setting.split(',')
plugin_list_temp = []
for plugin in plugin_bridge.keys():
if plugin not in plugin_list_off:
plugin_list_temp.append(plugin)
plugin_list = plugin_list_temp
plugin_count = len(plugin_list)
page_size = 5
page_total = int((plugin_count + page_size - 1) / page_size) # 总页数=(总数+每页数量-1)/每页数量
page_callback_command = "/" + prefix + "page?page="
if not os.path.exists(bot.path_converter(plugin_dir + "Menu/config.ini")):
first_btn = ["交流群组", "https://t.me/zgram_chat"]
last_btn = ["项目地址", "https://github.com/plutobell/zgram"]
else:
with open(bot.path_converter(plugin_dir + "Menu/config.ini"), 'r') as g:
first_btn = g.readline().strip().split(',')
last_btn = g.readline().strip().split(',')
wait_time = plugin_count * 7
if "reply_markup" in message.keys():
click_user_id = message["click_user"]["id"]
from_user_id = message["reply_to_message"]["from"]["id"]
callback_query_data = message["callback_query_data"]
if callback_query_data[:len(page_callback_command)] == page_callback_command:
if click_user_id == from_user_id:
page = int(callback_query_data.split('=')[1])
page, menu_str = menu_text(bot, plugin_dir=plugin_dir, page=page, page_total=page_total, page_size=page_size, plugin_list=plugin_list)
previous_page = page - 1
if previous_page < 1:
previous_page = 1
next_page = page + 1
if next_page > page_total:
next_page = page_total
if page_total == 1:
inlineKeyboard = [
[
{"text": first_btn[0], "url": first_btn[1]},
{"text": last_btn[0], "url": last_btn[1]},
]
]
elif page == 1:
inlineKeyboard = [
[
{"text": first_btn[0], "url": first_btn[1]},
{"text": "下一页", "callback_data": page_callback_command + str(page+1)},
]
]
elif page == page_total:
inlineKeyboard = [
[
{"text": "上一页", "callback_data": page_callback_command + str(page-1)},
{"text": last_btn[0], "url": last_btn[1]},
]
]
else:
inlineKeyboard = [
[
{"text": "上一页", "callback_data": page_callback_command + str(previous_page)},
{"text": "下一页", "callback_data": page_callback_command + str(next_page)},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
status = bot.editMessageText(chat_id=chat_id, message_id=message_id, text=menu_str, parse_mode="HTML", reply_markup=reply_markup)
status = bot.answerCallbackQuery(message["callback_query_id"])
else:
status = bot.answerCallbackQuery(message["callback_query_id"], text="点啥点,关你啥事?", show_alert=bool("true"))
else:
page = 1
if page_total == 1:
inlineKeyboard = [
[
{"text": first_btn[0], "url": first_btn[1]},
{"text": last_btn[0], "url": last_btn[1]},
]
]
else:
inlineKeyboard = [
[
{"text": first_btn[0], "url": first_btn[1]},
{"text": "下一页", "callback_data": page_callback_command + str(page+1)},
]
]
reply_markup = {
"inline_keyboard": inlineKeyboard
}
page, menu_str = menu_text(bot=bot, plugin_dir=plugin_dir, page=page, page_total=page_total, page_size=page_size, plugin_list=plugin_list)
status = bot.sendChatAction(chat_id, "typing")
status = bot.sendMessage(chat_id=chat_id, text=menu_str, parse_mode="HTML", reply_to_message_id=message_id, reply_markup=reply_markup)
bot.message_deletor(wait_time, message["chat"]["id"], status["message_id"])
def menu_text(bot, plugin_dir, page, page_total, page_size, plugin_list):
VERSION = bot.version
if page < 1:
page = 1
elif page > page_total:
page = page_total
if page >=1 and page <= page_total:
menu_str = ""
plugin_range = range(page*page_size-page_size, page*page_size-1+1)
for i, plugin in enumerate(plugin_list): #(now_page*page_size-page_size,now_page*page_size-1)
if i in plugin_range:
with open(bot.path_converter(plugin_dir + plugin + r"/__init__.py"), encoding="utf-8") as f:
line_1 = ""
line_2 = ""
for i in range(2):
if i == 0:
line_1 = f.readline().strip()[1:]
elif i == 1:
line_2 = f.readline().strip()[1:]
menu_str += "<b>" + line_1 + "</b> - " + line_2 + "\n\n"
menu_str = "<b>插件列表 [" + str(page) + "/" + str(page_total) + "]</b>\n\n" + menu_str + "\n<code>v" + VERSION + "</code>"
return page, menu_str | zgram | /zgram-1.0.0.tar.gz/zgram-1.0.0/Zgram/plugins/Menu/Menu.py | Menu.py |
PostgreSQL failover managed by ZooKeeper
========================================
Leverage the co-ordination capability of ZooKeeper
(http://zookeeper.apache.org) to provide a robust implementation of automated
failover for PostgreSQL.
Basic Design
------------
All inter-server communication occurs via ZooKeeper. No SSH or network
connections of any kind.
There are 2 different daemons which make up the system: "deadman" and
the "sync" daemons. On the nodes which actually use the PostgreSQL
cluster, the "deadman" daemon is run to control the PostgreSQL process
and optionally the sync daemon to update PostgreSQL configuration files
(e.g. pg\_hba.conf). On application servers or proxies the "sync" daemon
is run.
Terminology
-----------
- database cluster or cluster:
http://www.postgresql.org/docs/9.4/static/creating-cluster.html
- znode: a ZooKeeper data node in the ZooKeeper database
- group, cluster group, database group: A group of master/replica providing one logical
database, made up of one or more database clusters.
ZooKeeper Directory Layout
--------------------------
In ZooKeeper, we have one directory which contains the information on
all the nodes in multiple master-replica database groups. It is a
directory with 4 sub-directories types:
/state/{DATABASE_GROUP_NAME}_{IPADDR}:{PORT}
This contains all the information about a database cluster nodes,
healthy or not. It is updated frequently with data such as the WAL log
replay status. It is an ephemeral node and will dissapear if
the connection to zookeeper is lost.
Ephemeral, created/maintained by the "deadman" daemon.
/conn/{DATABASE_GROUP_NAME}_{IPADDR}:{PORT}
This znode contains a subset of information from the state-
node. It is the static connection information/metadata about a single
healthy (i.e. connectable) cluster. If the node is not "healthy", this
entry will not exist. The information in this znode is not vollatile and
is gaurenteed not to change over the lifespan of the znode. Ephemeral,
created/maintained by the "deadman" daemon.
/lock/{DATABASE_GROUP_NAME}
This contains the IPADDR:PORT of the current master for the
database group. Connection info should be looked up in the
"_conn_" node (if it exists).
Created/maintained by the "deadman" daemon on the current
/static/{DATABASE_GROUP_NAME}-db-id
Contains the database identifier of the database group.
Most of the above znodes contain a JSON encoded
dictionary/object/hashmap.
Sync Daemon
-----------
This daemon runs on any node which wishes to connect to a database group
and maintains the local machine's configuration files. For example, it
can rewrite a pgbouncer configuration if the master of one database
group fails over. It can also start a new pgbouncer daemon if a new
database group is created. Another example is dynamically changing
HAProxy weight according to the node location (e.g. availability zone)
or replication lag.
Actually applying the configuration changes is the job of plugins, the
sync daemon will not apply any changes by itself. Plugins can be
specified in 2 ways:
* Using setuptools entry points to subscribe in-process to the
changes. This allows subscribers to subscribe to events from
either state-, master- or healthy- znodes.
* Provide an executable which will be called with the path to a
JSON encoded file containing the information from the healthy-
and master- znodes. This is provided by the zgres-apply package
which plugs into zgres-sync using the previous plugin method.
This plugin does not recieve state- events for performance
reasons.
These plugins MUST be idempotent, they will be called repeatedly with
the same data.
Deadman Daemon
--------------
This daemon controls one PostgreSQL database cluster and registers it in
zookeeper (creating/maintaining the state-, conn- and master-
znodes). It must run on the same machine as the database cluster.
It is responsible for promoting or shutting down it's postgresql
database cluster.
Currently, remastering and starting PostgreSQL should be handled outside
before deadman is started.
Plugins for the deadman daemon should be able to do 2 things:
* Provide extra metadata (i.e. availability-zone or replication lag)
* Veto the cluster being up (aside from a builtin SELECT 1)
Challenges
----------
* multiple replicas: How to fail over to the "best" replica.
* multiple replicas: How to connect replicas to new master.
* Would it be a good idea to know where the basebackups are and initialize new replicas on startup?
* PLUGINS PLUGINS PLUGINS:
- Provide a lot of plugins builtin, allow them to be seen and enabled via the
"config" in zookeeper? EEEK: dynamic reconfiguration of daemons?
- What happens if a few nodes don't have some plugins?
- Configuration on a "cluster group" level
The Good
--------
* Relatively simple configuration. No ssh config.
* Fully distributed.
Implementation Thoughts
-----------------------
* Implement daemons in python, log to stdout and stderr. Have them be
run via systemd with configured to restart on fail. Fail noisily!
Dependencies
------------
* systemd
* kazoo - ZooKeeper client
* psycopg2 - connections to PostgreSQL
Acknowledgment
--------------
Zgres is heavily influenced by HandyRep
(https://github.com/pgexperts/handyrep) and Patroni
(https://github.com/zalando/patroni). Many thanks to the developers of
those for some great ideas.
| zgres | /zgres-0.4.tar.gz/zgres-0.4/README.md | README.md |
__author__ = ('Douglas Creager <[email protected]>',
'Michal Nazarewicz <[email protected]>')
__license__ = 'This file is placed into the public domain.'
__maintainer__ = 'Michal Nazarewicz'
__email__ = '[email protected]'
__all__ = ('getVersion')
import re
import subprocess
import sys
RELEASE_VERSION_FILE = 'RELEASE-VERSION'
# http://www.python.org/dev/peps/pep-0386/
_PEP386_SHORT_VERSION_RE = r'\d+(?:\.\d+)+(?:(?:[abc]|rc)\d+(?:\.\d+)*)?'
_PEP386_VERSION_RE = r'^%s(?:\.post\d+)?(?:\.dev\d+)?$' % (
_PEP386_SHORT_VERSION_RE)
_GIT_DESCRIPTION_RE = r'^release/(?P<ver>%s)-(?P<commits>\d+)-g(?P<sha>[\da-f]+)$' % (
_PEP386_SHORT_VERSION_RE)
def readGitVersion():
try:
proc = subprocess.Popen(('git', 'describe', '--long',
'--match', 'release/[0-9.]*'),
stdout=subprocess.PIPE)
data, _ = proc.communicate()
data = data.decode('utf-8')
if proc.returncode:
return None
ver = data.splitlines()[0].strip()
except:
return None
if not ver:
return None
m = re.search(_GIT_DESCRIPTION_RE, ver)
if not m:
sys.stderr.write('version: git description (%s) is invalid, '
'ignoring\n' % ver)
return None
commits = int(m.group('commits'))
if not commits:
return m.group('ver')
else:
return '%s.post%d.dev%d' % (
m.group('ver'), commits, int(m.group('sha'), 16))
def readReleaseVersion():
try:
fd = open(RELEASE_VERSION_FILE)
try:
ver = fd.readline().strip()
finally:
fd.close()
if not re.search(_PEP386_VERSION_RE, ver):
sys.stderr.write('version: release version (%s) is invalid, '
'will use it anyway\n' % ver)
return ver
except:
return None
def writeReleaseVersion(version):
fd = open(RELEASE_VERSION_FILE, 'w')
fd.write('%s\n' % version)
fd.close()
def getVersion():
release_version = readReleaseVersion()
version = readGitVersion() or release_version
if not version:
raise ValueError('Cannot find the version number')
if version != release_version:
writeReleaseVersion(version)
return version
if __name__ == '__main__':
print(getVersion()) | zgres | /zgres-0.4.tar.gz/zgres-0.4/version.py | version.py |
<h1 align="center"> ZgRoBot </h1>
<b align="center">
[](https://github.com/pylover7/ZgRobot/actions)
[](https://codecov.io/gh/pylover7/ZgRobot)
[](https://github.com/pylover7/ZgRobot/releases)
[](https://github.com/pylover7/ZgRobot/releases)
[](https://github.com/pylover7/ZgRobot/blob/master/LICENSE)
[](https://zgrobot.readthedocs.io/zh/latest/?badge=latest)
[](https://pypi.org/project/zgrobot/)
[](https://github.com/pylover7/ZgRobot/commits/feature-update_docs)
[](https://wakatime.com/@1d39df6a-cef0-41f7-a903-ef4b9dd13fb0)
[](https://pylover7-upgraded-space-chainsaw-pwgxxw54w6ph6jj.github.dev/)
</b>
`ZgRoBot` 是一个基于 [`WeRoBot`](https://github.com/offu/WeRoBot) 开发的微信公众号后台开发框架,采用MIT协议发布。
> 我是来看【 [使用文档](https://zgrobot.readthedocs.io/zh/stable/) 】的!
## 安装
### 推荐安装
推荐使用 `pip` 进行安装:
```shell
pip install zgrobot
```
### 其他安装方法
1. 手动下载安装包
- 下载地址1: [release](https://github.com/pylover7/ZgRobot/releases)
- 下载地址2:[蓝奏云仓库](https://shuoshuo.lanzoui.com/b016uiu7i) 、 [蓝奏云备用地址](https://shuoshuo.lanzoux.com/b016uiu7i) 【密码:1n86】
2. 本地安装
```shell
pip install zgrobot-XXX.tar.gz
```
### 更新
```shell
pip install --upgrade zgrobot
```
## 使用
### Hello World
一个非常简单的 `Hello World` 微信公众号,会对收到的所有文本消息回复 `Hello World`
```python
import zgrobot
robot = zgrobot.ZgRoBot(token='tokenhere')
@robot.text
def hello_world():
return 'Hello World!'
robot.run()
```
### 高级使用
请参阅【 [使用文档](https://zgrobot.readthedocs.io/zh/stable/) 】
## Status

## Star History
[](https://star-history.com/#pylover7/ZgRobot&Date)
| zgrobot | /zgrobot-2.0.5.tar.gz/zgrobot-2.0.5/README.md | README.md |
# zgtf
gtf conversion utlity
# Installation
```bash
# clone the repo
git clone https://github.com/zavolanlab/zgtf.git
# create a virtual environment
python3 -m venv venv
# activate the virtual environment
source venv/bin/activate
# install zgtf scripts
pip install .
```
# Run
Convert gtf file to bed12
```bash
gtf2bed12 --gtf <INPUT.gtf> --bed12 <OUTPUT.bed> --verbose
```
```
arguments:
-h, --help show this help message and exit
--gtf FILE Annotation file (gtf format)
--bed12 FILE Output file (bed12 format)
--transcript_type TRANSCRIPT_TYPE
Transcript type [Default: protein_coding]
-v, --verbose Verbose
```
# Docker
Pull image
```bash
docker pull zavolab/zgtf
```
Run
```bash
docker run -it zavolab/zgtf gtf2bed12 --help
```
# Singularity
Pull image
```bash
singularity pull docker://zavolab/zgtf
```
Run
```bash
singularity exec zgtf_latest.sif gtf2bed12 --help
``` | zgtf | /zgtf-0.1.2.tar.gz/zgtf-0.1.2/README.md | README.md |
# Zgulde's Python Utilities
A collection of miscellaneous helper functions and classes that I find useful.
Maybe you will too!
To install/upgrade:
```
python -m pip install --upgrade zgulde
```
## Pandas Extensions
The most polished of all of these modules is `extend_pandas`. It provides some
extensions to [pandas](http://pandas.pydata.org/) that I've found useful.
Usage:
```python
# from within a python script / jupyter notebook, etc
import zgulde.extend_pandas
# now you can use all the additional methods
```
[Further documentation can be found here](https://zgulde.github.io/zgulde-python/), or by looking at the built-in
help:
```python
import zgulde.extend_pandas
help(zgulde.extend_pandas)
```
## Other Modules
Are in various stages of completeness, and documentation for them (if present)
can be found within docstrings in the module.
| zgulde | /zgulde-0.0.41.tar.gz/zgulde-0.0.41/README.md | README.md |
import logging
from typing import Any, Dict, Optional
from django.utils.translation import gettext_lazy as _
import jwt
from rest_framework import exceptions
from .models import ApplicationCredentials
logger = logging.getLogger(__name__)
ALG = "HS256"
class ZGWAuth:
def __init__(self, encoded: str):
self.encoded = encoded
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, self.payload)
@property
def payload(self) -> Optional[Dict[str, Any]]:
if self.encoded is None:
return None
if not hasattr(self, "_payload"):
# decode the JWT and validate it
# jwt check
try:
payload = jwt.decode(
self.encoded,
options={"verify_signature": False},
algorithms=[ALG],
)
except jwt.DecodeError:
logger.info("Invalid JWT encountered")
raise exceptions.AuthenticationFailed(
_(
"JWT could not be decoded. Possibly you made a copy-paste mistake."
),
code="jwt-decode-error",
)
# get client_id
try:
client_id = payload["client_id"]
except KeyError:
raise exceptions.AuthenticationFailed(
_("`client_id` claim is missing in the JWT."),
code="missing-client-identifier",
)
# find client_id in DB and retrieve its secret
try:
jwt_secret = ApplicationCredentials.objects.exclude(secret="").get(
client_id=client_id
)
except ApplicationCredentials.DoesNotExist:
raise exceptions.AuthenticationFailed(
_("Client identifier does not exist"),
code="invalid-client-identifier",
)
else:
key = jwt_secret.secret
# check signature of the token
try:
payload = jwt.decode(
self.encoded,
key,
algorithms=[ALG],
)
except jwt.InvalidSignatureError:
logger.exception("Invalid signature - possible payload tampering?")
raise exceptions.AuthenticationFailed(
_("Client credentials are invalid."), code="invalid-jwt-signature"
)
self._payload = payload
return self._payload | zgw-auth-backend | /zgw_auth_backend-1.0.4-py3-none-any.whl/zgw_auth_backend/zgw.py | zgw.py |
import logging
from django.contrib.auth import get_user_model
from django.utils.translation import gettext_lazy as _
from rest_framework import exceptions
from rest_framework.authentication import BaseAuthentication, get_authorization_header
from rest_framework.request import Request
from .zgw import ZGWAuth
logger = logging.getLogger(__name__)
class ZGWAuthentication(BaseAuthentication):
www_authenticate_realm = "api"
def authenticate(self, request: Request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b"bearer":
return None
if len(auth) == 1:
msg = _("Invalid bearer header. No credentials provided.")
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _(
"Invalid bearer header. Credentials string should not contain spaces."
)
raise exceptions.AuthenticationFailed(msg)
auth = ZGWAuth(auth[1].decode("utf-8"))
user_id = auth.payload.get("user_id")
if not user_id:
msg = _("Invalid 'user_id' claim. The 'user_id' should not be empty.")
raise exceptions.AuthenticationFailed(msg)
email = auth.payload.get("email", "")
return self.authenticate_user_id(user_id, email)
def authenticate_user_id(self, username: str, email: str):
UserModel = get_user_model()
fields = {UserModel.USERNAME_FIELD: username}
user, created = UserModel._default_manager.get_or_create(**fields)
if created:
msg = "Created user object for username %s" % username
logger.info(msg)
if email:
email_field = UserModel.get_email_field_name()
email_value = getattr(user, email_field)
if not email_value or email_value != email:
setattr(user, email_field, email)
user.save()
msg = "Set email to %s of user with username %s" % (email, username)
logger.info(msg)
return (user, None)
def authenticate_header(self, request):
return 'Bearer realm="%s"' % self.www_authenticate_realm | zgw-auth-backend | /zgw_auth_backend-1.0.4-py3-none-any.whl/zgw_auth_backend/authentication.py | authentication.py |
.. zgw_consumers documentation master file, created by startproject.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to ZGW Consumers' documentation!
========================================
:Version: 0.26.2
:Source: https://github.com/maykinmedia/zgw-consumers
:Keywords: OpenAPI, Zaakgericht Werken, Common Ground, NLX
:PythonVersion: 3.7, 3.8, 3.9
|build-status| |coverage| |linting| |docs|
|python-versions| |django-versions| |pypi-version|
Manage your external API's to consume.
.. contents::
.. section-numbering::
Features
========
* Store services with their configuration in the database
* Integrate with OpenAPI 3.0 specifications
* NLX support
* Declare data/domain objects as modern Python dataclasses
Installation
============
Requirements
------------
* Python 3.6 or newer
* setuptools 30.3.0 or newer
* Django 2.2 or newer
Install
-------
1. Install from PyPI
.. code-block:: bash
pip install zgw-consumers
2. Add ``zgw_consumers`` to the ``INSTALLED_APPS`` setting.
3. Optionally override ``ZGW_CONSUMERS_CLIENT_CLASS`` to a custom client class.
4. Optionally specify ``ZGW_CONSUMERS_OAS_CACHE`` to point to a separate django cache.
Defaults to ``django.core.cache.DEFAULT_CACHE_ALIAS``, which is ``default`` in
practice. For performance reasons we highly recommend to use a real cache backend
like Redis or Memcache.
Usage
=====
In the Django admin, you can create ``Service`` instances to define your external APIs.
**Client**
To get a client for a given resource, you can use:
.. code-block:: python
from zgw_consumers.models import Service
client = Service.get_client(some_url)
Or, to just retrieve the auth header(s):
.. code-block:: python
from zgw_consumers.models import Service
auth = Service.get_auth_header(some_url)
**Data model**
Use ``zgw_consumers.api_models.base.factory`` to turn raw JSON responses into instances
of domain models:
.. code-block:: python
from zgw_consumers.api_models.base import factory
from zgw_consumers.api_models.zaken import Zaak
results = client.list("zaak")["results"]
return factory(Zaak, results)
It works for both collections and scalar values, and takes care of the camel-case to
snake case conversion.
You can also define your own data models, take a look at the ``zgw_consumers.api_models``
package for inspiration.
.. |build-status| image:: https://github.com/maykinmedia/zgw-consumers/workflows/Run%20CI/badge.svg
:target: https://github.com/maykinmedia/zgw-consumers/actions?query=workflow%3A%22Run+CI%22
:alt: Run CI
.. |linting| image:: https://github.com/maykinmedia/zgw-consumers/workflows/Code%20quality%20checks/badge.svg
:target: https://github.com/maykinmedia/zgw-consumers/actions?query=workflow%3A%22Code+quality+checks%22
:alt: Code linting
.. |coverage| image:: https://codecov.io/gh/maykinmedia/zgw-consumers/branch/main/graph/badge.svg
:target: https://codecov.io/gh/maykinmedia/zgw-consumers
:alt: Coverage status
.. |docs| image:: https://readthedocs.org/projects/zgw-consumers/badge/?version=latest
:target: https://zgw-consumers.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. |python-versions| image:: https://img.shields.io/pypi/pyversions/zgw_consumers.svg
.. |django-versions| image:: https://img.shields.io/pypi/djversions/zgw_consumers.svg
.. |pypi-version| image:: https://img.shields.io/pypi/v/zgw_consumers.svg
:target: https://pypi.org/project/zgw_consumers/
| zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/README.rst | README.rst |
Changes
=======
0.26.2 (2023-05-25)
-------------------
Bugfix release
Removed the ``lru_cache`` of ``zgw_consumers.admin_fields.get_zaaktypen`` to prevent
stale data being returned on configuration changes or changes in remote API data.
0.26.1 (2023-04-06)
-------------------
Fixed a data migration crash after introducing the ``Service.uuid`` field.
0.26.0 (2023-04-04)
-------------------
Feature/support release
* Catch HTTP 4xx and 5xx errors when using ZaaktypeField in the admin and display a
meaningful message to the end user
* Added ``Service.uuid`` model field
* Confirmed support for Django 4.2 and Python 3.11
0.25.0 (2023-02-27)
-------------------
Small compatibility release
* Hardened ``get_paginated_results()`` function to accept missing 'next'-link
0.24.0 (2023-02-16)
-------------------
Small maintenance release
* Replaced django-choices with models.TextChoices
* Confirmed support for Django 4.1
* Formatted code with latest black version
0.23.2 (2022-12-06)
-------------------
* Fixed bug in ``get_paginated_results`` function
* Fixed bug in compatibility layer for zds-client v1/v2
0.23.1 (2022-11-16)
-------------------
Fixed missing prefix in default constraint name
0.23.0 (2022-11-15)
-------------------
Feature release
* ``ServiceUrlField`` now creates check constraints to guarantee data consistency
0.22.0 (2022-10-28)
-------------------
Feature release
* Added support for zds-client 2.0
0.21.2 (2022-09-07)
-------------------
Fixed the API models to be more compliant with the ZGW API standards.
(Most) fields that are not required in the API schema can now effectively be omitted
from the response and still work with the API models.
0.21.1 (2022-09-07)
-------------------
* Fixed the usage of ServiceUrlField in nested queries.
0.21.0 (2022-08-31)
-------------------
💥 Breaking changes release!
TLS certificate management has been split off into the django-simple-certmanager_
library, which is now a dependency of this project. You should update the following
references in your own code:
* ``zgw_consumers.constants.CertificateTypes`` -> ``simple_certmanager.constants.CertificateTypes``
* ``zgw_consumers.models.Certificate`` -> ``simple_certmanager.models.Certificate``
The ``Certificate`` model is identical to the one shipped in zgw-consumers before
0.21.0. As a convenience, ``zgw_consumers.Certifcate`` is still provided, which is a
proxy model to ``simple_certmanager.Certificate``.
**Other changes**
* Dropped support for Django 2.2. Only Django 3.2 and upwards are supported.
* The minimum version of gemma-zds-client_ has been bumped to the 1.0.x series
.. _django-simple-certmanager: https://pypi.org/project/django-simple-certmanager/
.. _gemma-zds-client: https://pypi.org/project/gemma-zds-client/
0.20.0 (2022-08-22)
-------------------
* Added database field ServiceUrlField
0.19.0 (2022-07-22)
-------------------
* Added management command to dump certificates to .zip archive
* Added docs (published on readthedocs.io)
* Updated package meta information
0.18.2 (2022-06-13)
-------------------
* Fixed admin crash when certificate label is empty
0.18.1 (2022-05-17)
-------------------
* Added Dutch translations
* Always display a label for certificates
* [#39] Hardened certificate admin changelist to not crash on missing physical files
* [#34] Test certificates are removed from filesystem when certificate records are deleted
* Expose type hints
| zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/CHANGELOG.rst | CHANGELOG.rst |
from typing import List, Optional, Type
from urllib.parse import parse_qs, urlparse
from .api_models.base import ZGWModel, factory
from .api_models.catalogi import Catalogus, InformatieObjectType
from .client import Client
from .concurrent import parallel
from .constants import APITypes
from .models import Service
def get_paginated_results(
client: Client,
resource: str,
minimum: Optional[int] = None,
test_func: Optional[callable] = None,
*args,
**kwargs
) -> list:
request_kwargs = kwargs.get("request_kwargs", {})
request_params = request_kwargs.get("params", {})
results = []
response = client.list(resource, *args, **kwargs)
def _get_results(response):
_results = response["results"]
if test_func:
_results = [result for result in _results if test_func(result)]
return _results
results += _get_results(response)
if minimum and len(results) >= minimum:
return results
while response.get("next"):
next_url = urlparse(response["next"])
query = parse_qs(next_url.query)
new_page = int(query["page"][0])
request_params["page"] = [new_page]
request_kwargs["params"] = request_params
kwargs["request_kwargs"] = request_kwargs
response = client.list(resource, *args, **kwargs)
results += _get_results(response)
if minimum and len(results) >= minimum:
return results
return results
def _get_ztc_clients():
services = Service.objects.filter(api_type=APITypes.ztc)
clients = [service.build_client() for service in services]
return clients
def _fetch_list(
resource: str, clients: List[Client], model: Type[ZGWModel]
) -> List[ZGWModel]:
def _fetch(client: Client):
results = get_paginated_results(client, resource)
return results
with parallel() as executor:
resp_data = executor.map(_fetch, clients)
flattened = sum(resp_data, [])
return factory(model, flattened)
def get_catalogi(clients: List[Client] = None):
if clients is None:
clients = _get_ztc_clients()
return _fetch_list("catalogus", clients, Catalogus)
def get_informatieobjecttypen(
clients: List[Client] = None,
) -> List[InformatieObjectType]:
"""
Retrieve all informatieobjecttypen for all catalogi.
"""
if clients is None:
clients = _get_ztc_clients()
catalogi = {cat.url: cat for cat in get_catalogi(clients=clients)}
iots = _fetch_list("informatieobjecttype", clients, InformatieObjectType)
# resolve relations
for iot in iots:
iot.catalogus = catalogi[iot.catalogus]
return iots | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/service.py | service.py |
import logging
from typing import Any, Dict, List, Tuple
from urllib.parse import parse_qs, urlparse
from django import forms
from django.contrib import messages
from django.contrib.admin import widgets
from django.db.models import Field
from django.http import HttpRequest
from django.utils.translation import gettext_lazy as _
from requests.exceptions import HTTPError
from zds_client.client import ClientError
from .constants import APITypes
from .models.services import NLXConfig, Service
from .nlx import ServiceType, get_nlx_services
from .utils import cache_on_request
logger = logging.getLogger(__name__)
# TODO: parallelize
def get_zaaktypen() -> Dict[Service, List[Dict[str, Any]]]:
services = Service.objects.filter(api_type=APITypes.ztc)
zaaktypen_per_service = {}
for service in services:
client = service.build_client()
logger.debug("Fetching zaaktype list for service %r", service)
zaaktypen_per_service[service] = []
response = client.list("zaaktype")
zaaktypen_per_service[service] += response["results"]
while response["next"]:
next_url = urlparse(response["next"])
query = parse_qs(next_url.query)
new_page = int(query["page"][0]) + 1
query["page"] = [new_page]
response = client.list(
"zaaktype",
query_params=query,
)
zaaktypen_per_service[service] += response["results"]
return zaaktypen_per_service
def get_zaaktype_field(db_field: Field, request: HttpRequest, **kwargs):
try:
zaaktypen = get_zaaktypen()
except ClientError as exc:
error_message = exc.args[0]
messages.error(
request,
_(
"Failed to retrieve available zaaktypen "
"(got {http_status} - {detail}). "
"The cause of this exception was: {cause}"
).format(
http_status=error_message["status"],
detail=error_message["detail"],
cause=exc.__cause__,
),
)
choices = []
except HTTPError as exc:
error_message = exc.args[0]
choices = []
messages.error(request, error_message)
else:
def _get_choice(zaaktype: dict) -> Tuple[str, str]:
return (
zaaktype["url"],
f"{zaaktype['identificatie']} - {zaaktype['omschrijving']}",
)
choices = [
(
f"Service: {service.label}",
[_get_choice(zaaktype) for zaaktype in _zaaktypen],
)
for service, _zaaktypen in zaaktypen.items()
]
return forms.ChoiceField(
label=db_field.verbose_name.capitalize(),
widget=widgets.AdminRadioSelect(),
choices=choices,
required=False,
help_text=db_field.help_text,
)
def get_nlx_field(db_field: Field, request: HttpRequest, **kwargs):
with cache_on_request(request, "_nlx_services", get_nlx_services) as cached:
try:
nlx_services = cached.value
except Exception:
logger.warning("Failed fetching the NLX services", exc_info=True)
nlx_services = []
nlx_outway = NLXConfig.get_solo().outway
def _get_choice(service: ServiceType) -> Tuple[str, str]:
org_id = service["organization"]["serial_number"]
name = service["name"]
url = f"{nlx_outway}{org_id}/{name}/"
return (url, name)
choices = [
(
f"{organization['name']} (ID: {organization['serial_number']})",
[_get_choice(service) for service in services],
)
for organization, services in nlx_services
]
choices.insert(0, (_("No NLX"), [("", "---------")]))
return forms.ChoiceField(
label=db_field.verbose_name.capitalize(),
choices=choices,
required=False,
help_text=db_field.help_text,
) | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/admin_fields.py | admin_fields.py |
import logging
import warnings
from dataclasses import dataclass
from typing import IO, Any, Dict, List, Optional, Union
from urllib.parse import urljoin
from django.utils.module_loading import import_string
import yaml
import zds_client
from zds_client import Client
from zds_client.oas import schema_fetcher
from .settings import get_setting
logger = logging.getLogger(__name__)
IS_OLD_ZDS_CLIENT = zds_client.__version__ < "2.0.0"
Object = Dict[str, Any]
def get_client_class() -> type:
client_class = get_setting("ZGW_CONSUMERS_CLIENT_CLASS")
Client = import_string(client_class)
return Client
def load_schema_file(file: IO):
spec = yaml.safe_load(file)
return spec
class ZGWClient(Client):
def __init__(
self,
*args,
auth_value: Optional[Dict[str, str]] = None,
schema_url: str = "",
schema_file: IO = None,
client_certificate_path=None,
client_private_key_path=None,
server_certificate_path=None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.auth_value = auth_value
self.schema_url = schema_url
self.schema_file = schema_file
self.client_certificate_path = client_certificate_path
self.client_private_key_path = client_private_key_path
self.server_certificate_path = server_certificate_path
def fetch_schema(self) -> None:
"""support custom OAS resolution"""
if self.schema_file:
logger.info("Loaded schema from file '%s'", self.schema_file)
self._schema = load_schema_file(self.schema_file)
else:
url = self.schema_url or urljoin(self.base_url, "schema/openapi.yaml")
logger.info("Fetching schema at '%s'", url)
self._schema = schema_fetcher.fetch(url, {"v": "3"})
def pre_request(
self, method: str, url: str, kwargs: Optional[dict] = None, **old_kwargs
):
"""
Add authorization header to requests for APIs without jwt.
"""
kwargs = kwargs or {}
if old_kwargs:
warnings.warn(
"Keyword argument unpacking is removed in zds-client 2.0.",
DeprecationWarning,
)
kwargs.update(old_kwargs)
if not self.auth and self.auth_value:
headers = kwargs.get("headers", {})
headers.update(self.auth_value)
if IS_OLD_ZDS_CLIENT:
super_kwargs = kwargs
else:
super_kwargs = {"kwargs": kwargs}
return super().pre_request(method, url, **super_kwargs)
@property
def auth_header(self) -> dict:
if self.auth:
return self.auth.credentials()
return self.auth_value or {}
def request(
self,
path: str,
operation: str,
method="GET",
expected_status=200,
request_kwargs: Optional[dict] = None,
**kwargs,
) -> Union[List[Object], Object]:
if self.server_certificate_path:
kwargs.update({"verify": self.server_certificate_path})
if self.client_certificate_path:
if self.client_private_key_path:
kwargs.update(
{
"cert": (
self.client_certificate_path,
self.client_private_key_path,
)
}
)
else:
kwargs.update({"cert": self.client_certificate_path})
return super().request(
path, operation, method, expected_status, request_kwargs, **kwargs
)
class UnknownService(Exception):
pass | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/client.py | client.py |
from itertools import groupby
from typing import Any, Dict, Iterable, List, Optional, Tuple, Union
import requests
from zds_client.client import Object
from .models import NLXConfig, Service
def _rewrite_url(value: str, rewrites: Iterable) -> Optional[str]:
for start, replacement in rewrites:
if not value.startswith(start):
continue
return value.replace(start, replacement, 1)
return None
class Rewriter:
def __init__(self):
self.rewrites = Service.objects.exclude(nlx="").values_list("api_root", "nlx")
@property
def reverse_rewrites(self):
return [(to_value, from_value) for from_value, to_value in self.rewrites]
def forwards(self, data: Union[list, dict]):
"""
Rewrite URLs from from_value to to_value.
"""
self._rewrite(data, self.rewrites)
def backwards(self, data: Union[list, dict]):
"""
Rewrite URLs from to_value to from_value.
"""
self._rewrite(data, self.reverse_rewrites)
def _rewrite(self, data: Union[list, dict], rewrites: Iterable) -> None:
if isinstance(data, list):
new_items = []
for item in data:
if isinstance(item, str):
new_value = _rewrite_url(item, rewrites)
if new_value:
new_items.append(new_value)
else:
new_items.append(item)
else:
self._rewrite(item, rewrites=rewrites)
new_items.append(item)
# replace list elements
assert len(new_items) == len(data)
for i in range(len(data)):
data[i] = new_items[i]
return
if not isinstance(data, dict):
return
for key, value in data.items():
if isinstance(value, (dict, list)):
self._rewrite(value, rewrites=rewrites)
continue
elif not isinstance(value, str):
continue
assert isinstance(value, str)
rewritten = _rewrite_url(value, rewrites)
if rewritten is not None:
data[key] = rewritten
class NLXClientMixin:
"""
Enable URL rewriting for zds_client.Client clients.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rewriter = Rewriter()
# def pre_request(self, method: str, url: str, **kwargs) -> Any:
# """
# Rewrite NLX urls in the request body and params.
# From NLX -> canonical.
# """
# json = kwargs.get("json")
# if json:
# self.rewriter.backwards(json)
# params = kwargs.get("params")
# if params:
# self.rewriter.backwards(params)
# return super().pre_request(method, url, **kwargs)
def request(
self, path: str, operation: str, method="GET", expected_status=200, **kwargs
) -> Union[List[Object], Object]:
"""
Make the actual HTTP request.
"""
# intercept canonical URLs and rewrite to NLX
_paths = [path]
self.rewriter.forwards(_paths)
path = _paths[0]
return super().request(
path, operation, method=method, expected_status=expected_status, **kwargs
)
def post_response(
self, pre_id: Any, response_data: Optional[Union[dict, list]] = None
) -> None:
"""
Rewrite from NLX -> canonical.
"""
if response_data:
self.rewriter.backwards(response_data)
super().post_response(pre_id, response_data)
Organization = Dict[str, str]
ServiceType = Dict[str, str]
def get_nlx_services() -> List[Tuple[Organization, List[ServiceType]]]:
config = NLXConfig.get_solo()
if not config.outway or not config.directory_url:
return []
directory = config.directory_url
url = f"{directory}api/directory/list-services"
cert = (
(config.certificate.path, config.certificate_key.path)
if (config.certificate and config.certificate_key)
else None
)
response = requests.get(url, cert=cert)
response.raise_for_status()
services = response.json()["services"]
services.sort(key=lambda s: (s["organization"]["serial_number"], s["name"]))
services_per_organization = [
(k, list(v)) for k, v in groupby(services, key=lambda s: s["organization"])
]
return services_per_organization | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/nlx.py | nlx.py |
import django.db.models.deletion
from django.db import migrations, models
import privates.fields
import privates.storages
class Migration(migrations.Migration):
dependencies = [
("zgw_consumers", "0014_auto_20211217_1557"),
]
operations = [
migrations.CreateModel(
name="Certificate",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"label",
models.CharField(
help_text="Recognisable label for the certificate",
max_length=100,
verbose_name="label",
),
),
(
"type",
models.CharField(
choices=[
("key_pair", "Key-pair"),
("cert_only", "Certificate only"),
],
help_text="Is this only a certificate or is there an associated private key?",
max_length=20,
verbose_name="type",
),
),
(
"public_certificate",
privates.fields.PrivateMediaFileField(
help_text="The content of the certificate",
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="ssl_certs_keys/%Y/%m/%d",
verbose_name="public certificate",
),
),
(
"private_key",
privates.fields.PrivateMediaFileField(
blank=True,
help_text="The content of the private key",
storage=privates.storages.PrivateMediaFileSystemStorage(),
upload_to="ssl_certs_keys/%Y/%m/%d",
verbose_name="private key",
),
),
],
options={
"verbose_name": "certificate",
"verbose_name_plural": "certificates",
},
),
migrations.AddField(
model_name="service",
name="client_certificate",
field=models.ForeignKey(
blank=True,
help_text="The SSL/TLS certificate of the client",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="service_client",
to="zgw_consumers.certificate",
),
),
migrations.AddField(
model_name="service",
name="server_certificate",
field=models.ForeignKey(
blank=True,
help_text="The SSL/TLS certificate of the server",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="service_server",
to="zgw_consumers.certificate",
),
),
] | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/migrations/0015_auto_20220307_1522.py | 0015_auto_20220307_1522.py |
import django.db.models.deletion
from django.db import migrations, models
def get_operation(forwards: bool = True):
from_app = "zgw_consumers" if forwards else "simple_certmanager"
to_app = "simple_certmanager" if forwards else "zgw_consumers"
def migrate_data(apps, _):
NewCertificate = apps.get_model(to_app, "Certificate")
OldCertificate = apps.get_model(from_app, "Certificate")
old_certs = OldCertificate.objects.all()
for old_cert in old_certs:
new_cert = NewCertificate(
id=old_cert.id,
label=old_cert.label,
type=old_cert.type,
public_certificate=old_cert.public_certificate,
private_key=old_cert.private_key,
)
new_cert.save()
return migrate_data
class Migration(migrations.Migration):
dependencies = [
("simple_certmanager", "0001_initial"),
("zgw_consumers", "0015_auto_20220307_1522"),
]
operations = [
migrations.RunPython(get_operation(forwards=True), migrations.RunPython.noop),
migrations.AlterField(
model_name="service",
name="client_certificate",
field=models.ForeignKey(
blank=True,
help_text="The SSL/TLS certificate of the client",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="service_client",
to="simple_certmanager.certificate",
),
),
migrations.AlterField(
model_name="service",
name="server_certificate",
field=models.ForeignKey(
blank=True,
help_text="The SSL/TLS certificate of the server",
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="service_server",
to="simple_certmanager.certificate",
),
),
migrations.RunPython(migrations.RunPython.noop, get_operation(forwards=False)),
migrations.DeleteModel(
name="Certificate",
),
migrations.CreateModel(
name="Certificate",
fields=[],
options={
"proxy": True,
"indexes": [],
"constraints": [],
},
bases=("simple_certmanager.certificate",),
),
] | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/migrations/0016_auto_20220818_1412.py | 0016_auto_20220818_1412.py |
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name="Service",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("label", models.CharField(max_length=100, verbose_name="label")),
(
"api_type",
models.CharField(
choices=[
("ac", "AC (Authorizations)"),
("nrc", "NRC (Notifications)"),
("zrc", "ZRC (Zaken)"),
("ztc", "ZTC (Zaaktypen)"),
("drc", "DRC (Informatieobjecten"),
("brc", "BRC (Besluiten)"),
("orc", "ORC (Overige)"),
],
max_length=20,
verbose_name="type",
),
),
(
"api_root",
models.CharField(
max_length=255, unique=True, verbose_name="api root url"
),
),
(
"extra",
django.contrib.postgres.fields.jsonb.JSONField(
default=dict,
help_text="Extra configuration that's service-specific",
verbose_name="extra configuration",
),
),
("client_id", models.CharField(blank=True, max_length=255)),
("secret", models.CharField(blank=True, max_length=255)),
],
options={"verbose_name": "service", "verbose_name_plural": "services"},
)
] | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/migrations/0001_initial.py | 0001_initial.py |
import copy
from collections import OrderedDict
from datetime import date, datetime, time
from decimal import Decimal
from uuid import UUID
from rest_framework import fields, serializers
from .utils import extract_model_field_type, get_field_kwargs
class APIModelSerializer(serializers.Serializer):
serializer_field_mapping = {
str: fields.CharField,
int: fields.IntegerField,
float: fields.FloatField,
Decimal: fields.DecimalField,
date: fields.DateField,
datetime: fields.DateTimeField,
time: fields.TimeField,
bool: fields.BooleanField,
UUID: fields.UUIDField,
}
def get_fields(self):
assert hasattr(
self, "Meta"
), 'Class {serializer_class} missing "Meta" attribute'.format(
serializer_class=self.__class__.__name__
)
assert hasattr(
self.Meta, "model"
), 'Class {serializer_class} missing "Meta.model" attribute'.format(
serializer_class=self.__class__.__name__
)
declared_fields = copy.deepcopy(self._declared_fields)
model = self.Meta.model
depth = getattr(self.Meta, "depth", 0)
if depth is not None:
assert depth >= 0, "'depth' may not be negative."
assert depth <= 10, "'depth' may not be greater than 10."
field_names = self.get_field_names(declared_fields)
extra_kwargs = self.get_extra_kwargs()
# Determine the fields that should be included on the serializer.
fields = OrderedDict()
for field_name in field_names:
# If the field is explicitly declared on the class then use that.
if field_name in declared_fields:
fields[field_name] = declared_fields[field_name]
continue
extra_field_kwargs = extra_kwargs.get(field_name, {})
source = extra_field_kwargs.get("source", "*")
if source == "*":
source = field_name
# Determine the serializer field class and keyword arguments.
field_class, field_kwargs = self.build_field(source, model)
# Include any kwargs defined in `Meta.extra_kwargs`
field_kwargs = self.include_extra_kwargs(field_kwargs, extra_field_kwargs)
# Create the serializer field.
fields[field_name] = field_class(**field_kwargs)
return fields
def get_field_names(self, declared_fields):
fields = self.Meta.fields
# Ensure that all declared fields have also been included in the
# `Meta.fields` option.
# Do not require any fields that are declared in a parent class,
# in order to allow serializer subclasses to only include
# a subset of fields.
required_field_names = set(declared_fields)
for cls in self.__class__.__bases__:
required_field_names -= set(getattr(cls, "_declared_fields", []))
for field_name in required_field_names:
assert field_name in fields, (
"The field '{field_name}' was declared on serializer "
"{serializer_class}, but has not been included in the "
"'fields' option.".format(
field_name=field_name, serializer_class=self.__class__.__name__
)
)
return fields
def get_extra_kwargs(self):
"""
Return a dictionary mapping field names to a dictionary of
additional keyword arguments.
"""
extra_kwargs = copy.deepcopy(getattr(self.Meta, "extra_kwargs", {}))
read_only_fields = getattr(self.Meta, "read_only_fields", None)
if read_only_fields is not None:
if not isinstance(read_only_fields, (list, tuple)):
raise TypeError(
"The `read_only_fields` option must be a list or tuple. "
"Got %s." % type(read_only_fields).__name__
)
for field_name in read_only_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs["read_only"] = True
extra_kwargs[field_name] = kwargs
else:
# Guard against the possible misspelling `readonly_fields` (used
# by the Django admin and others).
assert not hasattr(self.Meta, "readonly_fields"), (
"Serializer `%s.%s` has field `readonly_fields`; "
"the correct spelling for the option is `read_only_fields`."
% (self.__class__.__module__, self.__class__.__name__)
)
return extra_kwargs
def include_extra_kwargs(self, kwargs, extra_kwargs):
"""
Include any 'extra_kwargs' that have been included for this field,
possibly removing any incompatible existing keyword arguments.
"""
if extra_kwargs.get("read_only", False):
for attr in [
"required",
"default",
"allow_blank",
"allow_null",
"min_length",
"max_length",
"min_value",
"max_value",
"validators",
"queryset",
]:
kwargs.pop(attr, None)
if extra_kwargs.get("default") and kwargs.get("required") is False:
kwargs.pop("required")
if extra_kwargs.get("read_only", kwargs.get("read_only", False)):
extra_kwargs.pop(
"required", None
) # Read only fields should always omit the 'required' argument.
kwargs.update(extra_kwargs)
return kwargs
def build_field(self, field_name, model_class):
model_field_type = extract_model_field_type(model_class, field_name)
return self.build_standard_field(field_name, model_field_type)
def build_standard_field(self, field_name, model_field_type):
"""
Create regular model fields.
"""
field_mapping = self.serializer_field_mapping
field_class = field_mapping[model_field_type]
field_kwargs = get_field_kwargs(field_name, model_field_type)
if "choices" in field_kwargs:
# Fields with choices get coerced into `ChoiceField`
# instead of using their regular typed field.
field_class = self.serializer_choice_field
# Some model fields may introduce kwargs that would not be valid
# for the choice field. We need to strip these out.
# Eg. models.DecimalField(max_digits=3, decimal_places=1, choices=DECIMAL_CHOICES)
valid_kwargs = {
"read_only",
"write_only",
"required",
"default",
"initial",
"source",
"label",
"help_text",
"style",
"error_messages",
"validators",
"allow_null",
"allow_blank",
"choices",
}
for key in list(field_kwargs):
if key not in valid_kwargs:
field_kwargs.pop(key)
if not issubclass(field_class, fields.CharField) and not issubclass(
field_class, fields.ChoiceField
):
# `allow_blank` is only valid for textual fields.
field_kwargs.pop("allow_blank", None)
return field_class, field_kwargs | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/drf/serializers.py | serializers.py |
from typing import Optional, Tuple
from django.db.models.fields.related_lookups import get_normalized_value
from django.db.models.lookups import Exact as _Exact, In as _In
from zgw_consumers.models import Service
from .fields import ServiceUrlField
def decompose_value(value: str) -> Tuple[Optional[Service], Optional[str]]:
service = Service.get_service(value)
if not service:
return None, None
relative_val = value[len(service.api_root) :]
return service, relative_val
class ServiceUrlFieldMixin:
def split_lhs(self, compiler, connection) -> Tuple[str, tuple, str, tuple]:
target = self.lhs.target
alias = self.lhs.alias
base_lhs = target._base_field.get_col(alias)
relative_lhs = target._relative_field.get_col(alias)
base_lhs_sql, base_lhs_params = self.process_lhs(
compiler, connection, lhs=base_lhs
)
relative_lhs_sql, relative_lhs_params = self.process_lhs(
compiler, connection, lhs=relative_lhs
)
return base_lhs_sql, base_lhs_params, relative_lhs_sql, relative_lhs_params
def get_prep_lookup(self) -> list:
if not self.rhs_is_direct_value():
return super().get_prep_lookup()
target = self.lhs.target
alias = target.model._meta.db_table
base_lhs, relative_lhs = [
field.get_col(alias, output_field=field)
for field in [target._base_field, target._relative_field]
]
value = self.rhs if self.get_db_prep_lookup_value_is_iterable else [self.rhs]
prepared_values = []
for rhs in value:
base_value, relative_value = decompose_value(rhs)
# convert model instances to int for FK fields
base_normalized_value = get_normalized_value(base_value, base_lhs)[0]
relative_normalized_value = get_normalized_value(
relative_value, relative_lhs
)[0]
prepared_values.append(
[
target._base_field.get_prep_value(base_normalized_value),
target._relative_field.get_prep_value(relative_normalized_value),
]
)
return (
prepared_values[0]
if not self.get_db_prep_lookup_value_is_iterable
else prepared_values
)
def get_db_prep_lookup(self, value, connection):
# For relational fields, use the 'target_field' attribute of the
# output_field.
target = self.lhs.target
sql = "%s"
params = (
[
[
target._base_field.get_db_prep_value(
v[0], connection, prepared=True
),
target._relative_field.get_db_prep_value(
v[1], connection, prepared=True
),
]
for v in value
]
if self.get_db_prep_lookup_value_is_iterable
else [
target._base_field.get_db_prep_value(
value[0], connection, prepared=True
),
target._relative_field.get_db_prep_value(
value[1], connection, prepared=True
),
]
)
return sql, params
@ServiceUrlField.register_lookup
class Exact(ServiceUrlFieldMixin, _Exact):
def as_sql(self, compiler, connection):
# process lhs
(
base_lhs_sql,
base_lhs_params,
relative_lhs_sql,
relative_lhs_params,
) = self.split_lhs(compiler, connection)
# process rhs
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
# combine
params = rhs_params
sql = f"{base_lhs_sql} {rhs_sql} AND {relative_lhs_sql} {rhs_sql}"
return sql, params
@ServiceUrlField.register_lookup
class In(ServiceUrlFieldMixin, _In):
"""
This realization will add additional DB query for every item in rhs list
Possible optimization is to cache Service.get_service(value)
Other solution would be not to decompose rhs value, but to combine lhs fields
But it will require additional join, which will complicate the implementation
The concatenation can slow down the DB query even more since the indexes are
usually not used with it
"""
def as_sql(self, compiler, connection):
# TODO: support connection.ops.max_in_list_size()
# process lhs
(
base_lhs_sql,
base_lhs_params,
relative_lhs_sql,
relative_lhs_params,
) = self.split_lhs(compiler, connection)
# process rhs
_, rhs_params = self.process_rhs(compiler, connection)
rhs_sql = "IN (" + ", ".join(["(%s, %s)"] * len(rhs_params)) + ")"
# combine
params = sum(rhs_params, [])
sql = f"({base_lhs_sql}, {relative_lhs_sql}) {rhs_sql}"
return sql, params | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/models/lookups.py | lookups.py |
import socket
import uuid
import warnings
from typing import Optional
from urllib.parse import urlparse, urlsplit, urlunsplit
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.functions import Length
from django.utils.translation import gettext_lazy as _
from privates.fields import PrivateMediaFileField
from simple_certmanager.models import Certificate
from solo.models import SingletonModel
from zds_client import ClientAuth
from zgw_consumers import settings as zgw_settings
from ..client import ZGWClient, get_client_class
from ..constants import APITypes, AuthTypes, NLXDirectories
from ..query import ServiceManager
from .abstract import RestAPIService
class Service(RestAPIService):
uuid = models.UUIDField(_("UUID"), default=uuid.uuid4)
api_type = models.CharField(_("type"), max_length=20, choices=APITypes.choices)
api_root = models.CharField(_("api root url"), max_length=255, unique=True)
# credentials for the API
client_id = models.CharField(max_length=255, blank=True)
secret = models.CharField(max_length=255, blank=True)
auth_type = models.CharField(
_("authorization type"),
max_length=20,
choices=AuthTypes.choices,
default=AuthTypes.zgw,
)
header_key = models.CharField(_("header key"), max_length=100, blank=True)
header_value = models.CharField(_("header value"), max_length=255, blank=True)
nlx = models.URLField(
_("NLX url"), max_length=1000, blank=True, help_text=_("NLX (outway) address")
)
user_id = models.CharField(
_("user ID"),
max_length=255,
blank=True,
help_text=_(
"User ID to use for the audit trail. Although these external API credentials are typically used by"
"this API itself instead of a user, the user ID is required."
),
)
user_representation = models.CharField(
_("user representation"),
max_length=255,
blank=True,
help_text=_("Human readable representation of the user."),
)
client_certificate = models.ForeignKey(
Certificate,
blank=True,
null=True,
help_text=_("The SSL/TLS certificate of the client"),
on_delete=models.PROTECT,
related_name="service_client",
)
server_certificate = models.ForeignKey(
Certificate,
blank=True,
null=True,
help_text=_("The SSL/TLS certificate of the server"),
on_delete=models.PROTECT,
related_name="service_server",
)
objects = ServiceManager()
class Meta:
verbose_name = _("service")
verbose_name_plural = _("services")
def __str__(self):
return f"[{self.get_api_type_display()}] {self.label}"
def save(self, *args, **kwargs):
if not self.api_root.endswith("/"):
self.api_root = f"{self.api_root}/"
if self.nlx and not self.nlx.endswith("/"):
self.nlx = f"{self.nlx}/"
super().save(*args, **kwargs)
def clean(self):
super().clean()
# validate header_key and header_value
if self.header_key and not self.header_value:
raise ValidationError(
{
"header_value": _(
"If header_key is set, header_value must also be set"
)
}
)
if not self.header_key and self.header_value:
raise ValidationError(
{"header_key": _("If header_value is set, header_key must also be set")}
)
if self.oas and self.oas_file:
raise ValidationError(
{
"oas": _("Set either oas or oas_file, not both"),
"oas_file": _("Set either oas or oas_file, not both"),
}
)
elif not self.oas and not self.oas_file:
raise ValidationError(
{
"oas": _("Set either oas or oas_file"),
"oas_file": _("Set either oas or oas_file"),
}
)
def build_client(self, **claims):
"""
Build an API client from the service configuration.
"""
_uuid = uuid.uuid4()
api_root = self.api_root
if self.nlx:
api_root = api_root.replace(self.api_root, self.nlx, 1)
Client = get_client_class()
# legacy flow
if hasattr(Client, "from_url"):
warnings.warn(
"Support for zds-client < 2.0 is deprecated", DeprecationWarning
)
dummy_detail_url = f"{api_root}dummy/{_uuid}"
client = Client.from_url(dummy_detail_url)
client.schema_url = self.oas
client.schema_file = self.oas_file
else: # 2.0.0+
client = Client(api_root, schema_url=self.oas, schema_file=self.oas_file)
if self.server_certificate:
client.server_certificate_path = (
self.server_certificate.public_certificate.path
)
if self.client_certificate:
client.client_certificate_path = (
self.client_certificate.public_certificate.path
)
if self.client_certificate.private_key:
client.client_private_key_path = (
self.client_certificate.private_key.path
)
if self.auth_type == AuthTypes.zgw:
client.auth = ClientAuth(
client_id=self.client_id,
secret=self.secret,
user_id=self.user_id,
user_representation=self.user_representation,
**claims,
)
elif self.auth_type == AuthTypes.api_key:
client.auth_value = {self.header_key: self.header_value}
return client
@classmethod
def get_service(cls, url: str) -> Optional["Service"]:
split_url = urlsplit(url)
scheme_and_domain = urlunsplit(split_url[:2] + ("", "", ""))
candidates = (
cls.objects.filter(api_root__startswith=scheme_and_domain)
.annotate(api_root_length=Length("api_root"))
.order_by("-api_root_length")
)
# select the one matching
for candidate in candidates.iterator():
if url.startswith(candidate.api_root):
return candidate
return None
@classmethod
def get_client(cls, url: str, **kwargs) -> Optional[ZGWClient]:
service = cls.get_service(url)
if not service:
return None
return service.build_client(**kwargs)
@classmethod
def get_auth_header(cls, url: str, **kwargs) -> Optional[dict]:
client = cls.get_client(url, **kwargs)
if not client:
return None
return client.auth_header
class NLXConfig(SingletonModel):
directory = models.CharField(
_("NLX directory"), max_length=50, choices=NLXDirectories.choices, blank=True
)
outway = models.URLField(
_("NLX outway address"),
blank=True,
help_text=_("Example: http://my-outway.nlx:8080"),
)
certificate = PrivateMediaFileField(
upload_to="zgw-consumers/nlx/",
blank=True,
help_text=_(
"Your organization TLS certificate for the NLX network. This is used to "
"fetch the list of available services from the NLX directory API."
),
)
certificate_key = PrivateMediaFileField(
upload_to="zgw-consumers/nlx/",
help_text=_(
"Your organization TLS private key for the NLX network. This is used to "
"fetch the list of available services from the NLX directory API."
),
blank=True,
)
class Meta:
verbose_name = _("NLX configuration")
@property
def directory_url(self) -> str:
nlx_directory_urls = zgw_settings.get_setting("NLX_DIRECTORY_URLS")
return nlx_directory_urls.get(self.directory, "")
def save(self, *args, **kwargs):
if self.outway and not self.outway.endswith("/"):
self.outway = f"{self.outway}/"
super().save(*args, **kwargs)
def clean(self):
super().clean()
if not self.outway:
return
# try to tcp connect to the port
parsed = urlparse(self.outway)
default_port = 80 if parsed.scheme == "http" else 443
port = parsed.port or default_port
nlx_outway_timeout = zgw_settings.get_setting("NLX_OUTWAY_TIMEOUT")
with socket.socket() as s:
s.settimeout(nlx_outway_timeout)
try:
s.connect((parsed.hostname, port))
except (OSError, ConnectionRefusedError):
raise ValidationError(
_("Connection refused. Please provide a correct address.")
) | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/models/services.py | services.py |
from typing import Optional
from urllib.parse import urljoin
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db.models import CharField, CheckConstraint, Field, ForeignKey, Model, Q
from django.db.models.base import Options
class ServiceUrlDescriptor:
def __init__(self, field):
self.field = field
def get_base_url(self, base_val) -> str:
return getattr(base_val, "api_root", None)
def get_base_val(self, detail_url: str):
from zgw_consumers.models import Service
return Service.get_service(detail_url)
def __get__(self, instance: Model, cls=None) -> Optional[str]:
if instance is None:
return None
base_val = getattr(instance, self.field.base_field)
base_url = self.get_base_url(base_val)
relative_val = getattr(instance, self.field.relative_field)
# todo cache value
return urljoin(base_url, relative_val)
def __set__(self, instance: Model, value: Optional[str]):
if value is None and not self.field.null:
raise ValueError(
"A 'None'-value is not allowed. Make the field "
"nullable if empty values should be supported."
)
base_val = None
relative_val = None
if value:
if not isinstance(value, str):
raise TypeError("Only string values are supported")
base_val = self.get_base_val(value)
if not base_val:
raise ValueError(
"The base part of url %s is not found in 'Service' data" % value
)
relative_val = value[len(self.get_base_url(base_val)) :]
setattr(instance, self.field.base_field, base_val)
setattr(instance, self.field.relative_field, relative_val)
# todo cache value
class ServiceUrlField(Field):
"""
Composite field to store the base and relative parts of the url separately.
This class is supposed to use with `zgw_consumers.Service` model
"""
# field flags
name = None
concrete = False
column = None
db_column = None
descriptor_class = ServiceUrlDescriptor
def __init__(self, base_field: str, relative_field: str, **kwargs):
"""
:param str base_field: name of ForeignKey field to the Service model
used for the base part of the url
:param str relative_field: name of CharField which consists of
the relative part of the url
"""
self.base_field = base_field
self.relative_field = relative_field
super().__init__(**kwargs)
def contribute_to_class(self, cls, name, private_only=False):
self.name = name
self.model = cls
cls._meta.add_field(self, private=private_only)
self._add_check_constraint(cls._meta)
setattr(cls, name, self.descriptor_class(self))
def _add_check_constraint(
self,
options: Options,
name: str = "{prefix}{base_field}_and_{relative_field}_filled",
) -> None:
"""
Create the DB constraints and add them if they're not present yet.
Check that base_field and relative_field should be both empty or filled
"""
# during migrations, the FK fields are added later, causing the constraint SQL
# building to blow up. We can ignore this at that time.
if self.model.__module__ == "__fake__":
return
empty_base_field = Q(**{f"{self.base_field}__isnull": True})
empty_relative_field = Q(**{f"{self.relative_field}__isnull": True}) | Q(
**{self.relative_field: ""}
)
both_empty = empty_base_field & empty_relative_field
both_filled = ~empty_base_field & ~empty_relative_field
constraint_name = name.format(
prefix=f"{options.app_label}_{options.model_name}_",
base_field=self.base_field,
relative_field=self.relative_field,
)
if self.null:
constraint = CheckConstraint(
name=constraint_name, check=both_empty | both_filled
)
else:
constraint = CheckConstraint(name=constraint_name, check=both_filled)
options.constraints.append(constraint)
# ensure this can be picked up by migrations by making it "explicitly defined"
if "constraints" not in options.original_attrs:
options.original_attrs["constraints"] = options.constraints
return
@property
def attname(self) -> str:
return self.name
def get_attname_column(self):
return self.attname, None
def deconstruct(self):
path = "%s.%s" % (self.__class__.__module__, self.__class__.__qualname__)
keywords = {
"base_field": self.base_field,
"relative_field": self.relative_field,
"blank": self.blank,
"null": self.null,
}
return self.name, path, [], keywords
@property
def _base_field(self) -> ForeignKey:
return self.model._meta.get_field(self.base_field)
@property
def _relative_field(self) -> CharField:
return self.model._meta.get_field(self.relative_field)
def check(self, **kwargs):
return [
*self._check_field_name(),
*self._check_base_field(),
*self._check_relative_field(),
]
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
"Field names must not end with an underscore.",
obj=self,
id="fields.E001",
)
]
else:
return []
def _check_base_field(self):
"""
Check if 'base_field' exists and if it is a FK to Service model
"""
try:
field = self.model._meta.get_field(self.base_field)
except FieldDoesNotExist:
return [
checks.Error(
"The ServiceUrlField base_field references the nonexistent field '%s'."
% self.base_field,
obj=self,
id="zgw_consumers.E001",
)
]
else:
from zgw_consumers.models import Service
if not isinstance(field, ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey."
% (self.model._meta.object_name, self.base_field),
obj=self,
id="zgw_consumers.E002",
)
]
elif field.remote_field.model != Service:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'zgw_consumers.Service'."
% (self.model._meta.object_name, self.base_field),
obj=self,
id="zgw_consumers.E003",
)
]
else:
return []
def _check_relative_field(self):
"""
Check if 'relative_field' exists
"""
try:
self.model._meta.get_field(self.relative_field)
except FieldDoesNotExist:
return [
checks.Error(
"The ServiceUrlField relative_field references the nonexistent field '%s'."
% self.relative_field,
obj=self,
id="zgw_consumers.E004",
)
]
else:
return [] | zgw-consumers | /zgw-consumers-0.26.2.tar.gz/zgw-consumers-0.26.2/zgw_consumers/models/fields.py | fields.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.