rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
try: return self._call('abortVersion', src, self._serial)
try: oids=self._call('abortVersion', src, self._serial) invalidate=self._cache.invalidate for oid in oids: invalidate(oid, src) return oids
def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: return self._call('abortVersion', src, self._serial) finally: self._lock_release()
c89a419ee8f07b8a33e4840923d2c422b67576aa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c89a419ee8f07b8a33e4840923d2c422b67576aa/ClientStorage.py
try: return self._call('commitVersion', src, dest, self._serial)
try: oids=self._call('commitVersion', src, dest, self._serial) invalidate=self._cache.invalidate if dest: for oid in oids: invalidate(oid, src) else: for oid in oids: invalidate(oid, dest) return oids
def commitVersion(self, src, dest, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) self._lock_acquire() try: return self._call('commitVersion', src, dest, self._serial) finally: self._lock_release()
c89a419ee8f07b8a33e4840923d2c422b67576aa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c89a419ee8f07b8a33e4840923d2c422b67576aa/ClientStorage.py
if p is not None: return p
if p: return p
def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p is not None: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: return p, s return pv, sv finally: self._lock_release()
c89a419ee8f07b8a33e4840923d2c422b67576aa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c89a419ee8f07b8a33e4840923d2c422b67576aa/ClientStorage.py
return p, s
if s: return p, s raise KeyError, oid
def load(self, oid, version, _stuff=None): self._lock_acquire() try: p = self._cache.load(oid, version) if p is not None: return p p, s, v, pv, sv = self._call('zeoLoad', oid) self._cache.store(oid, p, s, v, pv, sv) if not v or not version or version != v: return p, s return pv, sv finally: self._lock_release()
c89a419ee8f07b8a33e4840923d2c422b67576aa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c89a419ee8f07b8a33e4840923d2c422b67576aa/ClientStorage.py
def f(con, detail=detail, rc=sys.getrefcount):
conn_no = [0] def f(con, detail=detail, rc=sys.getrefcount, conn_no=conn_no): conn_no[0] = conn_no[0] + 1 cn = conn_no[0]
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
id=oid
id=''
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
id="%s (%s)" % (oid, d['id'])
id=d['id']
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
id="%s (%s)" % (oid, d['__name__'])
id=d['__name__']
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
'oid': id,
'conn_no': cn, 'oid': oid, 'id': id,
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
'references': con.references(oid),
'state': ob._p_changed,
def f(con, detail=detail, rc=sys.getrefcount): for oid, ob in con._cache.items(): id=oid if hasattr(ob,'__dict__'): d=ob.__dict__ if d.has_key('id'): id="%s (%s)" % (oid, d['id']) elif d.has_key('__name__'): id="%s (%s)" % (oid, d['__name__'])
fb7b9b4dff99b8d898e08be3452fe6ab91e029f9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fb7b9b4dff99b8d898e08be3452fe6ab91e029f9/DB.py
for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/zdaemon", "src/zdaemon/tests", "src/zLOG",
extensions = ["*.conf", "*.xml", "*.txt", "*.sh"] for dir in [ "ZConfig/components/basic", "ZConfig/components/logger", "ZConfig/tests/input", "ZConfig/tests/library", "ZConfig/tests/library/thing", "ZConfig/tests/library/thing/extras", "ZConfig/tests/library/widget", "ZEO", "ZODB", "zdaemon", "zdaemon/tests", "zLOG",
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/zdaemon", "src/zdaemon/tests", "src/zLOG", ]: inputdir = convert_path(inputdir) outputdir = os.path.join(outputbase, inputdir) if not os.path.exists(outputdir): dir_util.mkpath(outputdir) for pattern in ("*.conf", "*.xml", "*.txt", "*.sh"): for fn in glob.glob(os.path.join(inputdir, pattern)): cmd.copy_file(fn, os.path.join(outputbase, fn))
6d6128d31f86a5245aa9eec629d15dd78eff0bc9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d6128d31f86a5245aa9eec629d15dd78eff0bc9/setup.py
inputdir = convert_path(inputdir) outputdir = os.path.join(outputbase, inputdir)
dir = convert_path(dir) inputdir = os.path.join("src", dir) outputdir = os.path.join(outputbase, dir)
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/zdaemon", "src/zdaemon/tests", "src/zLOG", ]: inputdir = convert_path(inputdir) outputdir = os.path.join(outputbase, inputdir) if not os.path.exists(outputdir): dir_util.mkpath(outputdir) for pattern in ("*.conf", "*.xml", "*.txt", "*.sh"): for fn in glob.glob(os.path.join(inputdir, pattern)): cmd.copy_file(fn, os.path.join(outputbase, fn))
6d6128d31f86a5245aa9eec629d15dd78eff0bc9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d6128d31f86a5245aa9eec629d15dd78eff0bc9/setup.py
for pattern in ("*.conf", "*.xml", "*.txt", "*.sh"):
for pattern in extensions:
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/zdaemon", "src/zdaemon/tests", "src/zLOG", ]: inputdir = convert_path(inputdir) outputdir = os.path.join(outputbase, inputdir) if not os.path.exists(outputdir): dir_util.mkpath(outputdir) for pattern in ("*.conf", "*.xml", "*.txt", "*.sh"): for fn in glob.glob(os.path.join(inputdir, pattern)): cmd.copy_file(fn, os.path.join(outputbase, fn))
6d6128d31f86a5245aa9eec629d15dd78eff0bc9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d6128d31f86a5245aa9eec629d15dd78eff0bc9/setup.py
cmd.copy_file(fn, os.path.join(outputbase, fn))
dest = os.path.join(outputbase, fn[4:]) cmd.copy_file(fn, dest)
def copy_other_files(cmd, outputbase): for inputdir in [ "src/ZConfig/components/basic", "src/ZConfig/components/logger", "src/ZConfig/tests/input", "src/ZConfig/tests/library", "src/ZConfig/tests/library/thing", "src/ZConfig/tests/library/thing/extras", "src/ZConfig/tests/library/widget", "src/ZEO", "src/ZODB", "src/zdaemon", "src/zdaemon/tests", "src/zLOG", ]: inputdir = convert_path(inputdir) outputdir = os.path.join(outputbase, inputdir) if not os.path.exists(outputdir): dir_util.mkpath(outputdir) for pattern in ("*.conf", "*.xml", "*.txt", "*.sh"): for fn in glob.glob(os.path.join(inputdir, pattern)): cmd.copy_file(fn, os.path.join(outputbase, fn))
6d6128d31f86a5245aa9eec629d15dd78eff0bc9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d6128d31f86a5245aa9eec629d15dd78eff0bc9/setup.py
for wrap in wrappers.keys(): wrap.close() wrappers[wrap] = wrap return wrappers
for w in wrappers.keys(): w.close() return {wrap: wrap}
def _create_wrappers(self): # Create socket wrappers wrappers = {} # keys are active wrappers for domain, addr in self.addrlist: wrap = ConnectWrapper(domain, addr, self.mgr, self.client) wrap.connect_procedure() if wrap.state == "notified": for wrap in wrappers.keys(): wrap.close() wrappers[wrap] = wrap return wrappers if wrap.state != "closed": wrappers[wrap] = wrap return wrappers
4e36c87748801e5bc79717038ade3b21fa3c17f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/4e36c87748801e5bc79717038ade3b21fa3c17f4/client.py
try: from posix import fsync except: fsync=None from types import StringType
from zLOG import LOG, BLATHER, WARNING, ERROR, PANIC, register_subsystem register_subsystem('ZODB FS')
def fsIndex(): return {}
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
LOG('ZODB FS',WARNING, "%s warn: %s\n" % (packed_version, (message % data)))
LOG('ZODB FS', WARNING, "%s warn: %s\n" % (packed_version, (message % data)))
def warn(message, *data): LOG('ZODB FS',WARNING, "%s warn: %s\n" % (packed_version, (message % data)))
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
LOG('ZODB FS',ERROR,"%s ERROR: %s\n" % (packed_version, (message % data)))
LOG('ZODB FS', ERROR, "%s ERROR: %s\n" % (packed_version, (message % data)))
def error(message, *data): LOG('ZODB FS',ERROR,"%s ERROR: %s\n" % (packed_version, (message % data)))
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, (message % data)))
LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, (message % data)))
def nearPanic(message, *data): LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, (message % data)))
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message))
message = message % data LOG('ZODB FS', PANIC, "%s ERROR: %s\n" % (packed_version, message))
def panic(message, *data): message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
_packt=z64
_packt = z64
def panic(message, *data): message=message%data LOG('ZODB FS',PANIC,"%s ERROR: %s\n" % (packed_version, message)) raise CorruptedTransactionError, message
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
if not os.path.exists(file_name): create = 1
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None):
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
raise ValueError, "can\'t create a read-only file"
raise ValueError, "can't create a read-only file"
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None):
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
if create:
self._file = None if not create: try: self._file = open(file_name, read_only and 'rb' or 'r+b') except IOError, exc: if exc.errno == errno.EFBIG: raise if exc.errno == errno.ENOENT: create = 1 if os.path.exists(file_name): raise else: create = 1 if self._file is None and create:
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None):
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
else: self._file = open(file_name, read_only and 'rb' or 'r+b')
def __init__(self, file_name, create=0, read_only=0, stop=None, quota=None):
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
self._packt=stop
self._packt = stop
def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, since the old records are not copied. """
ad85dfb9cd6957f764453d68c5aa78663f064355 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ad85dfb9cd6957f764453d68c5aa78663f064355/FileStorage.py
o=objects[-1]
o=objects.pop()
def commit(self, subtransaction=None): 'Finalize the transaction'
bdec0b677a66a4a5ed79390b29cfb5b70866184e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/bdec0b677a66a4a5ed79390b29cfb5b70866184e/Transaction.py
del objects[-1]
def commit(self, subtransaction=None): 'Finalize the transaction'
bdec0b677a66a4a5ed79390b29cfb5b70866184e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/bdec0b677a66a4a5ed79390b29cfb5b70866184e/Transaction.py
size = len(rec.data) + len(rec.version)
if rec.data is None: size = len(rec.version) else: size = len(rec.data) + len(rec.version)
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._pos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), str(TimeStamp(trans.tid))) print >> file, "\toffset=%d status=%s user=%s description=%s" % \ (trans._tpos, `trans.status`, trans.user, trans.description) j = 0 tsize = 0 for rec in trans: if rec.data is None: fullclass = "undo or abort of object creation" else: modname, classname = get_pickle_metadata(rec.data) dig = md5.new(rec.data).hexdigest() fullclass = "%s.%s" % (modname, classname) # special case for testing purposes if fullclass == "ZODB.tests.MinPO.MinPO": obj = zodb_unpickle(rec.data) fullclass = "%s %s" % (fullclass, obj.value) if rec.version: version = "version=%s " % rec.version else: version = '' if rec.data_txn: # XXX It would be nice to print the transaction number # (i) but it would be too expensive to keep track of. bp = "bp=%016x" % u64(rec.data_txn) else: bp = "" if rec.data_txn: size = 8 + len(rec.version) else: size = len(rec.data) + len(rec.version) if rec.version: size += DATA_VERSION_HDR_LEN else: size += DATA_HDR_LEN tsize += size print >> file, " data #%05d oid=%016x %sclass=%s size=%d %s" % \ (j, u64(rec.oid), version, fullclass, size, bp) j += 1 print >> file i += 1 iter.close()
014108d512dda1904b4bac900de7dfde118b86ec /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/014108d512dda1904b4bac900de7dfde118b86ec/fsdump.py
pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: if not h.pnv:
self._lock_acquire() try: pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: if not h.pnv: return None pos = h.pnv continue if h.tid < tid: break pos = h.prev end_tid = h.tid if not pos:
def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end_tid for the non-version data is not affected # by versioned data records. continue
c711cf5314bebb7efd9bd4dd0877909ed1576df0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c711cf5314bebb7efd9bd4dd0877909ed1576df0/FileStorage.py
pos = h.pnv continue if h.tid < tid: break pos = h.prev end_tid = h.tid if not pos: return None if h.back: data, _, _, _ = self._loadBack_impl(oid, h.back) return data, h.tid, end_tid else: return self._file.read(h.plen), h.tid, end_tid
if h.back: data, _, _, _ = self._loadBack_impl(oid, h.back) return data, h.tid, end_tid else: return self._file.read(h.plen), h.tid, end_tid finally: self._lock_release()
def loadBefore(self, oid, tid): pos = self._lookup_pos(oid) end_tid = None while True: h = self._read_data_header(pos, oid) if h.version: # Just follow the pnv pointer to the previous # non-version data. if not h.pnv: # Object was created in version. There is no # before data to find. return None pos = h.pnv # The end_tid for the non-version data is not affected # by versioned data records. continue
c711cf5314bebb7efd9bd4dd0877909ed1576df0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c711cf5314bebb7efd9bd4dd0877909ed1576df0/FileStorage.py
errors = {} skipped = 0
global skipped
def __iter__(self): errors = {} skipped = 0 for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError, v: v = str(v) if v not in errors: if not errors: sys.stderr.write("Pickling import errors:\n") sys.stderr.write('\t'+v+'\n') errors[v] = True
9acb99e8bfe8a01f01b96120c22a640a24bda927 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/9acb99e8bfe8a01f01b96120c22a640a24bda927/conversion.py
if errors: sys.stderr.write(error_explanation) sys.stderr.write("%s database records skipped\n" % skipped)
def __iter__(self): errors = {} skipped = 0 for record in self._txn: record.tid = record.serial # transform the data record format # (including persistent references) sio = StringIO(record.data) up = Unpickler(sio) up.persistent_load = PersistentIdentifier try: classmeta = up.load() state = up.load() except ImportError, v: v = str(v) if v not in errors: if not errors: sys.stderr.write("Pickling import errors:\n") sys.stderr.write('\t'+v+'\n') errors[v] = True
9acb99e8bfe8a01f01b96120c22a640a24bda927 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/9acb99e8bfe8a01f01b96120c22a640a24bda927/conversion.py
raise CorruptedDataError, h
raise CorruptedDataError, pos
def modifiedInVersion(self, oid): self._lock_acquire() try: pos=self._index[oid] file=self._file seek=file.seek seek(pos) doid,serial,prev,tloc,vlen = unpack(">8s8s8s8sH", file.read(34)) if doid != oid: raise CorruptedDataError, h if vlen: seek(24,1) # skip plen, pnv, and pv return file.read(vlen) return '' finally: self._lock_release()
eeca2c9fc81aa63bf63c4f40aa83e523be27c1f7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/eeca2c9fc81aa63bf63c4f40aa83e523be27c1f7/FileStorage.py
def checkBuggyResolve(self):
def checkBuggyResolve1(self):
def checkBuggyResolve(self): obj = PCounter3() obj.inc()
32bad8bef01ed2f567334e4b8a2e75c484f15f0c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/32bad8bef01ed2f567334e4b8a2e75c484f15f0c/ConflictResolution.py
def checkBuggyResolve2(self): obj = PCounter4() obj.inc() oid = self._storage.new_oid() revid1 = self._dostoreNP(oid, data=zodb_pickle(obj)) obj.inc() obj.inc() revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj)) self.assertRaises(TypeError, self._dostoreNP, oid, revid=revid1, data=zodb_pickle(obj)) def checkUndoConflictResolution(self): obj = PCounter() obj.inc() oid = self._storage.new_oid() revid_a = self._dostore(oid, data=obj) obj.inc() revid_b = self._dostore(oid, revid=revid_a, data=obj) obj.inc() revid_c = self._dostore(oid, revid=revid_b, data=obj) info = self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_finish(self._transaction) def checkUndoUnresolvable(self): obj = PCounter2() obj.inc() oid = self._storage.new_oid() revid_a = self._dostore(oid, data=obj) obj.inc() revid_b = self._dostore(oid, revid=revid_a, data=obj) obj.inc() revid_c = self._dostore(oid, revid=revid_b, data=obj) info = self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) self.assertRaises(UndoError, self._storage.transactionalUndo, tid, self._transaction) self._storage.tpc_abort(self._transaction)
def checkBuggyResolve(self): obj = PCounter3() obj.inc()
32bad8bef01ed2f567334e4b8a2e75c484f15f0c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/32bad8bef01ed2f567334e4b8a2e75c484f15f0c/ConflictResolution.py
>>> x.__getnewargs__() ()
def test_basic_pickling(): """ >>> x = Simple('x', aaa=1, bbb='foo') >>> x.__getnewargs__() () >>> print_dict(x.__getstate__()) {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'} >>> f, (c,), state = x.__reduce__() >>> f.__name__ '__newobj__' >>> f.__module__ 'copy_reg' >>> c.__name__ 'Simple' >>> print_dict(state) {'__name__': 'x', 'aaa': 1, 'bbb': 'foo'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 >>> x.__setstate__({'z': 1}) >>> x.__dict__ {'z': 1} """
e2c134566a030f7217b479d26dd9f54de89fa361 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e2c134566a030f7217b479d26dd9f54de89fa361/test_ExtensionClass.py
>>> x.__getnewargs__() ()
def test_pickling_w_slots_only(): """ >>> x = SubSlotted('x', 'y', 'z') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> d >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 >>> x.s4 = 'spam' >>> d, s = x.__getstate__() >>> d >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 """
e2c134566a030f7217b479d26dd9f54de89fa361 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e2c134566a030f7217b479d26dd9f54de89fa361/test_ExtensionClass.py
>>> x.__getnewargs__() ()
def test_pickling_w_slots(): """ >>> x = SubSubSlotted('x', 'y', 'z', aaa=1, bbb='foo') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> print_dict(d) {'aaa': 1, 'bbb': 'foo'} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 >>> x.s4 = 'spam' >>> d, s = x.__getstate__() >>> print_dict(d) {'aaa': 1, 'bbb': 'foo'} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 """
e2c134566a030f7217b479d26dd9f54de89fa361 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e2c134566a030f7217b479d26dd9f54de89fa361/test_ExtensionClass.py
>>> x.__getnewargs__() ()
def test_pickling_w_slots_w_empty_dict(): """ >>> x = SubSubSlotted('x', 'y', 'z') >>> x.__getnewargs__() () >>> d, s = x.__getstate__() >>> print_dict(d) {} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 >>> x.s4 = 'spam' >>> d, s = x.__getstate__() >>> print_dict(d) {} >>> print_dict(s) {'s1': 'x', 's2': 'y', 's3': 'z', 's4': 'spam'} >>> pickle.loads(pickle.dumps(x)) == x 1 >>> pickle.loads(pickle.dumps(x, 0)) == x 1 >>> pickle.loads(pickle.dumps(x, 1)) == x 1 >>> pickle.loads(pickle.dumps(x, 2)) == x 1 """
e2c134566a030f7217b479d26dd9f54de89fa361 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e2c134566a030f7217b479d26dd9f54de89fa361/test_ExtensionClass.py
def log2(type, msg, subsys="ClientStorage:%d" % os.getpid()):
def log2(type, msg, subsys="ZCS:%d" % os.getpid()):
def log2(type, msg, subsys="ClientStorage:%d" % os.getpid()): LOG(subsys, type, msg)
8467908f82d317eaa890906426fbf5b6998d5a2b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/8467908f82d317eaa890906426fbf5b6998d5a2b/ClientStorage.py
elif o=='-M': minimize=1
elif o=='-M': minimize=1
def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:L') z=s=None data=sys.argv[0] nrep=5 minimize=0 for o, v in opts: if o=='-n': nrep=string.atoi(v) elif o=='-d': data=v elif o=='-s': s=v elif o=='-z': global zlib import zlib z=compress elif o=='-M': minimize=1 elif o=='-D': global debug os.environ['STUPID_LOG_FILE']='' os.environ['STUPID_LOG_SEVERITY']='-999' __builtins__.__debug__=1 if s: s=__import__(s, globals(), globals(), ('__doc__',)) s=s.Storage else: s=ZODB.FileStorage.FileStorage('zeo_speed.fs', create=1) data=open(data).read() db=ZODB.DB(s, # disable cache deactivation cache_size=4000, cache_deactivate_after=6000,) results={} for j in range(nrep): for r in 1, 10, 100, 1000: t=time.time() jar=db.open() get_transaction().begin() rt=jar.root() key='s%s' % r if rt.has_key(key): p=rt[key] else: rt[key]=p=P() for i in range(r): if z is not None: d=z(data) else: d=data v=getattr(p, str(i), P()) v.d=d setattr(p,str(i),v) get_transaction().commit() jar.close() sys.stderr.write("%s %s %s\n" % (j, r, time.time()-t)) sys.stdout.flush() rt=d=p=v=None # release all references if minimize: time.sleep(3) jar.cacheMinimize(3)
5f8a9a786393f09873fe8d4f497b215e1b8deb62 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/5f8a9a786393f09873fe8d4f497b215e1b8deb62/speed.py
__version__='$Revision: 1.16 $'[11:-2]
__version__='$Revision: 1.17 $'[11:-2]
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain'
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
if last < 0: last = first - last + 1
def undoLog(self, first, last, filter=None): # I think this is wrong given the handling of first and last # in FileStorage. self._lock_acquire() try: # XXX Shouldn't this be sorted? transactions = self._data.items() pos = len(transactions) r = [] i = 0 while i < last and pos: pos = pos - 1 if i < first: i = i + 1 continue tid, (p, u, d, e, t) = transactions[pos] if p: continue d = {'id': base64.encodestring(tid)[:-1], 'time': TimeStamp(tid).timeTime(), 'user_name': u, 'description': d} if e: d.update(loads(e))
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
_data[tid]=1, u, d, e, tuple(o)
_data[tid] = 1, u, d, e, tuple(o)
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex.
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
for tid in deleted: del _data[tid]
for tid in deleted: del _data[tid]
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex.
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
r[2]=None if r[3]: r[3][1][2]=None pindex=None
r[2] = None if r[3] and r[3][1]: r[3][1][2] = None
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex.
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
finally: self._lock_release()
finally: self._lock_release()
def pack(self, t, referencesf): # Packing is hard, at least when undo is supported. # Even for a simple storage like this one, packing # is pretty complex.
d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d0e472eb0b7f45af8edb9bbd980296d9aa23b5a7/DemoStorage.py
'info': c._debug_info,
'info': d,
def connectionDebugInfo(self): r=[] pools,pooll=self._pools t=time() for version, (pool, allocated, lock) in pools.items(): for c in allocated: o=c._opened r.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': c._debug_info, 'version': version, }) return r
ba1332935b8679720f3ba8be4d15aa33d1875808 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ba1332935b8679720f3ba8be4d15aa33d1875808/DB.py
name='', client=None, var=None,
name='', client=None, debug=0, var=None,
def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0):
f8411024b047be56a119c3b6f827f9a505337f1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8411024b047be56a119c3b6f827f9a505337f1a/ClientStorage.py
wait=0, read_only=0, read_only_fallback=0):
wait_for_server_on_startup=None, wait=None, read_only=0, read_only_fallback=0):
def __init__(self, addr, storage='1', cache_size=20000000, name='', client=None, var=None, min_disconnect_poll=5, max_disconnect_poll=300, wait=0, read_only=0, read_only_fallback=0):
f8411024b047be56a119c3b6f827f9a505337f1a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8411024b047be56a119c3b6f827f9a505337f1a/ClientStorage.py
ZEOStorageClass = None
ZEOStorageClass = ZEOStorage
def log(message, level=zLOG.INFO, label=None, error=None): """Internal helper to log a message using zLOG.""" zLOG.LOG(label or _label, level, message, error=error)
32bc881f308a45b6dbdcbdfecc2c1005801a356b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/32bc881f308a45b6dbdcbdfecc2c1005801a356b/StorageServer.py
class ZEOStorage: """Proxy to underlying storage for a single remote client.""" ClientStorageStubClass = ClientStub.ClientStorage def __init__(self, server, read_only=0): self.server = server self.timeout = None self.connection = None self.client = None self.storage = None self.storage_id = "uninitialized" self.transaction = None self.read_only = read_only self.locked = 0 self.log_label = _label def notifyConnected(self, conn): self.connection = conn self.client = self.ClientStorageStubClass(conn) addr = conn.addr if isinstance(addr, type("")): label = addr else: host, port = addr label = str(host) + ":" + str(port) self.log_label = _label + "/" + label def notifyDisconnected(self): if self.transaction is not None: self.log("disconnected during transaction %s" % self.transaction) self._abort() else: self.log("disconnected") def __repr__(self): tid = self.transaction and repr(self.transaction.id) if self.storage: stid = (self.storage._transaction and repr(self.storage._transaction.id)) else: stid = None name = self.__class__.__name__ return "<%s %X trans=%s s_trans=%s>" % (name, id(self), tid, stid) def log(self, msg, level=zLOG.INFO, error=None): zLOG.LOG(self.log_label, level, msg, error=error) def setup_delegation(self): """Delegate several methods to the storage""" self.versionEmpty = self.storage.versionEmpty self.versions = self.storage.versions self.history = self.storage.history self.load = self.storage.load self.loadSerial = self.storage.loadSerial self.modifiedInVersion = self.storage.modifiedInVersion try: fn = self.storage.getExtensionMethods except AttributeError: pass else: for name in fn().keys(): if not hasattr(self,name): setattr(self, name, getattr(self.storage, name)) self.lastTransaction = self.storage.lastTransaction def check_tid(self, tid, exc=None): if self.read_only: raise ReadOnlyError() caller = sys._getframe().f_back.f_code.co_name if self.transaction is None: self.log("no current transaction: %s()" % caller, zLOG.PROBLEM) if exc is not None: raise exc(None, tid) else: return 0 if self.transaction.id != tid: self.log("%s(%s) invalid; current transaction = %s" % (caller, repr(tid), repr(self.transaction.id)), zLOG.PROBLEM) if exc is not None: raise exc(self.transaction.id, tid) else: return 0 return 1 def register(self, storage_id, read_only): """Select the storage that this client will use This method must be the first one called by the client. """ if self.storage is not None: self.log("duplicate register() call") raise ValueError, "duplicate register() call" storage = self.server.storages.get(storage_id) if storage is None: self.log("unknown storage_id: %s" % storage_id) raise ValueError, "unknown storage: %s" % storage_id if not read_only and (self.read_only or storage.isReadOnly()): raise ReadOnlyError() self.read_only = self.read_only or read_only self.storage_id = storage_id self.storage = storage self.setup_delegation() self.timeout = self.server.register_connection(storage_id, self) def get_info(self): return {'length': len(self.storage), 'size': self.storage.getSize(), 'name': self.storage.getName(), 'supportsUndo': self.storage.supportsUndo(), 'supportsVersions': self.storage.supportsVersions(), 'supportsTransactionalUndo': self.storage.supportsTransactionalUndo(), 'extensionMethods': self.getExtensionMethods(), } def get_size_info(self): return {'length': len(self.storage), 'size': self.storage.getSize(), } def getExtensionMethods(self): try: e = self.storage.getExtensionMethods except AttributeError: return {} else: return e() def zeoLoad(self, oid): v = self.storage.modifiedInVersion(oid) if v: pv, sv = self.storage.load(oid, v) else: pv = sv = None try: p, s = self.storage.load(oid, '') except KeyError: if sv: p = s = None else: raise return p, s, v, pv, sv def getInvalidations(self, tid): invtid, invlist = self.server.get_invalidations(tid) if invtid is None: return None self.log("Return %d invalidations up to tid %s" % (len(invlist), u64(invtid))) return invtid, invlist def zeoVerify(self, oid, s, sv): try: os = self.storage.getSerial(oid) except KeyError: self.client.invalidateVerify((oid, '')) else: if sv: if sv != os: self.client.invalidateVerify((oid, '')) else: if s != os: self.client.invalidateVerify((oid, '')) def endZeoVerify(self): self.client.endVerify() def pack(self, time, wait=1): if wait: return run_in_thread(self._pack_impl, time) else: t = threading.Thread(target=self._pack_impl, args=(time,)) t.start() return None def _pack_impl(self, time): self.log("pack(time=%s) started..." % repr(time)) self.storage.pack(time, referencesf) self.log("pack(time=%s) complete" % repr(time)) self.server.invalidate(0, self.storage_id, None, (), self.get_size_info()) def new_oids(self, n=100): """Return a sequence of n new oids, where n defaults to 100""" if self.read_only: raise ReadOnlyError() if n <= 0: n = 1 return [self.storage.new_oid() for i in range(n)] def undo(self, transaction_id): if self.read_only: raise ReadOnlyError() oids = self.storage.undo(transaction_id) if oids: self.server.invalidate(self, self.storage_id, None, map(lambda oid: (oid, ''), oids)) return oids return () def undoInfo(self, first, last, spec): return run_in_thread(self.storage.undoInfo, first, last, spec) def undoLog(self, first, last): return run_in_thread(self.storage.undoLog, first, last) def tpc_begin(self, id, user, description, ext, tid, status): if self.read_only: raise ReadOnlyError() if self.transaction is not None: if self.transaction.id == id: self.log("duplicate tpc_begin(%s)" % repr(id)) return else: raise StorageTransactionError("Multiple simultaneous tpc_begin" " requests from one client.") self.transaction = t = Transaction() t.id = id t.user = user t.description = description t._extension = ext self.serials = [] self.invalidated = [] self.txnlog = CommitLog() self.tid = tid self.status = status def tpc_finish(self, id): if not self.check_tid(id): return assert self.locked self.storage.tpc_finish(self.transaction) tid = self.storage.lastTransaction() if self.invalidated: self.server.invalidate(self, self.storage_id, tid, self.invalidated, self.get_size_info()) self.transaction = None self.locked = 0 self.timeout.end(self) self._handle_waiting() return tid def tpc_abort(self, id): if not self.check_tid(id): return if self.locked: self.storage.tpc_abort(self.transaction) self.transaction = None self.locked = 0 self.timeout.end(self) self._handle_waiting() def _abort(self): if not self.locked: waiting = self.storage._waiting for i in range(len(waiting)): d, z = waiting[i] if z is self: del waiting[i] self.log("Closed connection removed from waiting list." " Clients waiting: %d." % len(waiting)) break if self.transaction: self.tpc_abort(self.transaction.id) def storea(self, oid, serial, data, version, id): self.check_tid(id, exc=StorageTransactionError) self.txnlog.store(oid, serial, data, version) def vote(self, id): self.check_tid(id, exc=StorageTransactionError) if self.locked: return self._vote() else: return self._wait(lambda: self._vote()) def abortVersion(self, src, id): self.check_tid(id, exc=StorageTransactionError) if self.locked: return self._abortVersion(src) else: return self._wait(lambda: self._abortVersion(src)) def commitVersion(self, src, dest, id): self.check_tid(id, exc=StorageTransactionError) if self.locked: return self._commitVersion(src, dest) else: return self._wait(lambda: self._commitVersion(src, dest)) def transactionalUndo(self, trans_id, id): self.check_tid(id, exc=StorageTransactionError) if self.locked: return self._transactionalUndo(trans_id) else: return self._wait(lambda: self._transactionalUndo(trans_id)) def _tpc_begin(self, txn, tid, status): self.locked = 1 self.storage.tpc_begin(txn, tid, status) self.timeout.begin(self) def _store(self, oid, serial, data, version): try: newserial = self.storage.store(oid, serial, data, version, self.transaction) except (SystemExit, KeyboardInterrupt): raise except Exception, err: if not isinstance(err, TransactionError): exc_info = sys.exc_info() self.log("store error: %s, %s" % exc_info[:2], zLOG.ERROR, error=exc_info) del exc_info pickler = cPickle.Pickler() pickler.fast = 1 try: pickler.dump(err, 1) except: msg = "Couldn't pickle storage exception: %s" % repr(err) self.log(msg, zLOG.ERROR) err = StorageServerError(msg) newserial = err else: if serial != "\0\0\0\0\0\0\0\0": self.invalidated.append((oid, version)) self.serials.append((oid, newserial)) def _vote(self): self.client.serialnos(self.serials) return self.storage.tpc_vote(self.transaction) def _abortVersion(self, src): oids = self.storage.abortVersion(src, self.transaction) inv = [(oid, src) for oid in oids] self.invalidated.extend(inv) return oids def _commitVersion(self, src, dest): oids = self.storage.commitVersion(src, dest, self.transaction) inv = [(oid, dest) for oid in oids] self.invalidated.extend(inv) if dest: inv = [(oid, src) for oid in oids] self.invalidated.extend(inv) return oids def _transactionalUndo(self, trans_id): oids = self.storage.transactionalUndo(trans_id, self.transaction) inv = [(oid, None) for oid in oids] self.invalidated.extend(inv) return oids def _wait(self, thunk): self._thunk = thunk if self.storage._transaction: d = Delay() self.storage._waiting.append((d, self)) self.log("Transaction blocked waiting for storage. " "Clients waiting: %d." % len(self.storage._waiting)) return d else: self.log("Transaction acquired storage lock.", zLOG.BLATHER) return self._restart() def _restart(self, delay=None): self._tpc_begin(self.transaction, self.tid, self.status) loads, loader = self.txnlog.get_loader() for i in range(loads): self._store(*loader.load()) resp = self._thunk() if delay is not None: delay.reply(resp) else: return resp def _handle_waiting(self): while self.storage._waiting: delay, zeo_storage = self.storage._waiting.pop(0) if self._restart_other(zeo_storage, delay): if self.storage._waiting: n = len(self.storage._waiting) self.log("Blocked transaction restarted. " "Clients waiting: %d" % n) else: self.log("Blocked transaction restarted.") return def _restart_other(self, zeo_storage, delay): try: zeo_storage._restart(delay) except: self.log("Unexpected error handling waiting transaction", level=zLOG.WARNING, error=sys.exc_info()) zeo_storage.connection.close() return 0 else: return 1
def close_conn(self, conn): """Internal: remove the given connection from self.connections.
32bc881f308a45b6dbdcbdfecc2c1005801a356b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/32bc881f308a45b6dbdcbdfecc2c1005801a356b/StorageServer.py
StorageServer.ZEOStorageClass = ZEOStorage
def run(self): try: result = self._method(*self._args) except (SystemExit, KeyboardInterrupt): raise except Exception: self.delay.error(sys.exc_info()) else: self.delay.reply(result)
32bc881f308a45b6dbdcbdfecc2c1005801a356b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/32bc881f308a45b6dbdcbdfecc2c1005801a356b/StorageServer.py
return None, None
return []
def getpids(self): if not os.path.exists(self.env.zeo_pid): # If there's no pid file, assume the server isn't running return None, None return map(int, open(self.env.zeo_pid).read().split())
c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c/testStart.py
ppid, pid = self.getpids() if ppid is None: return self.kill(pids=[pid])
self.kill(pids=self.getpids())
def stop_server(self): ppid, pid = self.getpids() if ppid is None: return self.kill(pids=[pid])
c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c/testStart.py
def testNoPort(self):
def testErrNoPort(self):
def testNoPort(self): outp = self.system("-s") self.assert_(outp.find("No port specified") != -1)
c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c/testStart.py
ppid, pid = self.getpids() os.kill(pid, signal.SIGHUP)
self.kill(signal.SIGUSR2, pids=self.getpids())
def testLogRestart(self): port = 9090 logfile1 = tempfile.mktemp(suffix="log") logfile2 = tempfile.mktemp(suffix="log") os.environ["STUPID_LOG_FILE"] = logfile1 os.environ["EVENT_LOG_FILE"] = logfile1
c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c5f32a03f4cc1973fe24cbb4fb7fe39ac21fa72c/testStart.py
raise ConflictError, oid
raise ConflictError, "transaction already invalidated"
def tpc_begin(self, transaction, sub=None): if self._invalid(None): # Some nitwit invalidated everything! raise ConflictError, oid self._invalidating=[]
14ebbc297165d4b3f298a95161d5f00be5c99f7c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/14ebbc297165d4b3f298a95161d5f00be5c99f7c/Connection.py
if key[:3]=='_p_':
k=key[:3] if k=='_p_' or k=='_v_':
def __setattr__(self,key,value): ' ' if key[:3]=='_p_': self.__dict__[key]=value return
eb86ea4b33c89a52efca0653f0cf9fc43616355f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/eb86ea4b33c89a52efca0653f0cf9fc43616355f/Persistence.py
-h adddress -- host address to listen on
-h address -- host address to listen on
def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' and a.find('=') > 0 and last != '-S'): # lame, sorry a = a.split("=") os.environ[a[0]] = "=".join(a[1:]) continue args.append(a) last = a usage="""%s [options] [filename] where options are: -D -- Run in debug mode -d -- Set STUPID_LOG_SEVERITY to -300 -U -- Unix-domain socket file to listen on -u username or uid number The username to run the ZEO server as. You may want to run the ZEO server as 'nobody' or some other user with limited resouces. The only works under Unix, and if ZServer is started by root. -p port -- port to listen on -h adddress -- host address to listen on -s -- Don't use zdeamon -S storage_name=module_path:attr_name -- A storage specification where: storage_name -- is the storage name used in the ZEO protocol. This is the name that you give as the optional 'storage' keyword argument to the ClientStorage constructor. module_path -- This is the path to a Python module that defines the storage object(s) to be served. The module path should omit the prefix (e.g. '.py'). attr_name -- This is the name to which the storage object is assigned in the module. -P file -- Run under profile and dump output to file. Implies the -s flag. if no file name is specified, then %s is used. """ % (me, env.fs) try: opts, args = getopt.getopt(args, 'p:Dh:U:sS:u:P:d') except getopt.error, msg: print usage print msg sys.exit(1) port = None debug = 0 host = '' unix = None Z = 1 UID = 'nobody' prof = None detailed = 0 fs = None for o, v in opts: if o =='-p': port = int(v) elif o =='-h': host = v elif o =='-U': unix = v elif o =='-u': UID = v elif o =='-D': debug = 1 elif o =='-d': detailed = 1 elif o =='-s': Z = 0 elif o =='-P': prof = v if prof: Z = 0 if port is None and unix is None: print usage print 'No port specified.' sys.exit(1) if args: if len(args) > 1: print usage print 'Unrecognized arguments: ', " ".join(args[1:]) sys.exit(1) fs = args[0] if debug: os.environ['Z_DEBUG_MODE'] = '1' if detailed: os.environ['STUPID_LOG_SEVERITY'] = '-300' rotate_logs() # reinitialize zLOG set_uid(UID) if Z: try: import posix except: pass else: import zdaemon zdaemon.run(sys.argv, env.zeo_pid) try: if Z: # Change current directory (for core dumps etc.) try: os.chdir(env.var) except os.error: LOG('ZEO/start.py', WARNING, "Couldn't chdir to %s" % env.var) else: LOG('ZEO/start.py', INFO, "Changed directory to %s" % env.var) import ZEO.StorageServer, asyncore storages = {} for o, v in opts: if o == '-S': n, m = v.split("=", 1) if m.find(":") >= 0: # we got an attribute name m, a = m.split(':') else: # attribute name must be same as storage name a=n storages[n]=get_storage(m,a) if not storages: from ZODB.FileStorage import FileStorage storages['1'] = FileStorage(fs or env.fs) # Try to set up a signal handler setup_signals(storages) items = storages.items() items.sort() for kv in items: LOG('ZEO/start.py', INFO, 'Serving %s:\t%s' % kv) if not unix: unix = host, port ZEO.StorageServer.StorageServer(unix, storages) if not Z: try: pid = os.getpid() except: pass # getpid not supported else: f = open(env.zeo_pid, 'w') f.write("%s\n" % pid) f.close() except: # Log startup exception and tell zdaemon not to restart us. info = sys.exc_info() try: LOG("ZEO/start.py", PANIC, "Startup exception", error=info) except: pass import traceback traceback.print_exception(*info) sys.exit(0) try: try: ThreadedAsync.loop() finally: if os.path.isfile(env.zeo_pid): os.unlink(env.zeo_pid) except SystemExit: raise except: info = sys.exc_info() try: LOG("ZEO/start.py", PANIC, "Unexpected error", error=info) except: pass import traceback traceback.print_exception(*info) sys.exit(1)
d6fd39fad08b0e8898ea86cecd01b6d83ba8c2bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d6fd39fad08b0e8898ea86cecd01b6d83ba8c2bc/start.py
ThreadedAsync.loop()
ThreadedAsync.LoopCallback.loop()
def main(argv): me = argv[0] sys.path.insert(0, directory(me, 2)) global LOG, INFO, ERROR from zLOG import LOG, INFO, WARNING, ERROR, PANIC from ZEO.util import Environment env = Environment(me) # XXX hack for profiling support global unix, storages, asyncore args = [] last = '' for a in argv[1:]: if (a[:1] != '-' and a.find('=') > 0 and last != '-S'): # lame, sorry a = a.split("=") os.environ[a[0]] = "=".join(a[1:]) continue args.append(a) last = a usage="""%s [options] [filename] where options are: -D -- Run in debug mode -d -- Set STUPID_LOG_SEVERITY to -300 -U -- Unix-domain socket file to listen on -u username or uid number The username to run the ZEO server as. You may want to run the ZEO server as 'nobody' or some other user with limited resouces. The only works under Unix, and if ZServer is started by root. -p port -- port to listen on -h adddress -- host address to listen on -s -- Don't use zdeamon -S storage_name=module_path:attr_name -- A storage specification where: storage_name -- is the storage name used in the ZEO protocol. This is the name that you give as the optional 'storage' keyword argument to the ClientStorage constructor. module_path -- This is the path to a Python module that defines the storage object(s) to be served. The module path should omit the prefix (e.g. '.py'). attr_name -- This is the name to which the storage object is assigned in the module. -P file -- Run under profile and dump output to file. Implies the -s flag. if no file name is specified, then %s is used. """ % (me, env.fs) try: opts, args = getopt.getopt(args, 'p:Dh:U:sS:u:P:d') except getopt.error, msg: print usage print msg sys.exit(1) port = None debug = 0 host = '' unix = None Z = 1 UID = 'nobody' prof = None detailed = 0 fs = None for o, v in opts: if o =='-p': port = int(v) elif o =='-h': host = v elif o =='-U': unix = v elif o =='-u': UID = v elif o =='-D': debug = 1 elif o =='-d': detailed = 1 elif o =='-s': Z = 0 elif o =='-P': prof = v if prof: Z = 0 if port is None and unix is None: print usage print 'No port specified.' sys.exit(1) if args: if len(args) > 1: print usage print 'Unrecognized arguments: ', " ".join(args[1:]) sys.exit(1) fs = args[0] if debug: os.environ['Z_DEBUG_MODE'] = '1' if detailed: os.environ['STUPID_LOG_SEVERITY'] = '-300' rotate_logs() # reinitialize zLOG set_uid(UID) if Z: try: import posix except: pass else: import zdaemon zdaemon.run(sys.argv, env.zeo_pid) try: if Z: # Change current directory (for core dumps etc.) try: os.chdir(env.var) except os.error: LOG('ZEO/start.py', WARNING, "Couldn't chdir to %s" % env.var) else: LOG('ZEO/start.py', INFO, "Changed directory to %s" % env.var) import ZEO.StorageServer, asyncore storages = {} for o, v in opts: if o == '-S': n, m = v.split("=", 1) if m.find(":") >= 0: # we got an attribute name m, a = m.split(':') else: # attribute name must be same as storage name a=n storages[n]=get_storage(m,a) if not storages: from ZODB.FileStorage import FileStorage storages['1'] = FileStorage(fs or env.fs) # Try to set up a signal handler setup_signals(storages) items = storages.items() items.sort() for kv in items: LOG('ZEO/start.py', INFO, 'Serving %s:\t%s' % kv) if not unix: unix = host, port ZEO.StorageServer.StorageServer(unix, storages) if not Z: try: pid = os.getpid() except: pass # getpid not supported else: f = open(env.zeo_pid, 'w') f.write("%s\n" % pid) f.close() except: # Log startup exception and tell zdaemon not to restart us. info = sys.exc_info() try: LOG("ZEO/start.py", PANIC, "Startup exception", error=info) except: pass import traceback traceback.print_exception(*info) sys.exit(0) try: try: ThreadedAsync.loop() finally: if os.path.isfile(env.zeo_pid): os.unlink(env.zeo_pid) except SystemExit: raise except: info = sys.exc_info() try: LOG("ZEO/start.py", PANIC, "Unexpected error", error=info) except: pass import traceback traceback.print_exception(*info) sys.exit(1)
d6fd39fad08b0e8898ea86cecd01b6d83ba8c2bc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/d6fd39fad08b0e8898ea86cecd01b6d83ba8c2bc/start.py
opts, args = getopt.getopt(sys.argv[1:], "s:")
opts, args = getopt.getopt(sys.argv[1:], "ls:")
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file try: f = open(filename, "rb") except IOError, msg: print "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = ZEOCacheSimulation(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior while 1: # Read a record r = f.read(24) if len(r) < 24: break # Decode it ts, code, oid, serial = struct.unpack(">ii8s8s", r) dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
try: f = open(filename, "rb") except IOError, msg: print "can't open %s: %s" % (filename, msg) return 1
if filename.endswith(".gz"): try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": f = sys.stdin else: try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file try: f = open(filename, "rb") except IOError, msg: print "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = ZEOCacheSimulation(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior while 1: # Read a record r = f.read(24) if len(r) < 24: break # Decode it ts, code, oid, serial = struct.unpack(">ii8s8s", r) dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
sim = ZEOCacheSimulation(cachelimit)
sim = simclass(cachelimit)
def main(): # Parse options cachelimit = 20*1000*1000 try: opts, args = getopt.getopt(sys.argv[1:], "s:") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-s': cachelimit = int(float(a) * 1e6) if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file try: f = open(filename, "rb") except IOError, msg: print "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = ZEOCacheSimulation(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior while 1: # Read a record r = f.read(24) if len(r) < 24: break # Decode it ts, code, oid, serial = struct.unpack(">ii8s8s", r) dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
def __init__(self):
def __init__(self, cachelimit): self.cachelimit = cachelimit
def __init__(self): # Initialize global statistics self.epoch = None self.total_loads = 0 self.total_hits = 0 # Subclass must increment self.total_invals = 0 self.total_writes = 0 # Reset per-run statistics and set up simulation data self.restart()
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
self.invals += 1 self.total_invals += 1
def event(self, ts, dlen, _version, code, _current, oid, _serial): # Record first and last timestamp seen if self.ts0 is None: self.ts0 = ts if self.epoch is None: self.epoch = ts self.ts1 = ts
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
Simulation.__init__(self) self.filelimit = cachelimit / 2
Simulation.__init__(self, cachelimit)
def __init__(self, cachelimit): # Initialize base class Simulation.__init__(self) # Store simulation parameters self.filelimit = cachelimit / 2 # Initialize additional global statistics self.total_flips = 0
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
if self.filesize[self.current] + size > self.filelimit:
if self.filesize[self.current] + size > self.cachelimit / 2:
def write(self, oid, size): # Fudge because size is rounded up to multiples of 256. (31 # is header overhead per cache record; 127 is to compensate # for rounding up to multiples of 256.) size = size + 31 - 127 if self.filesize[self.current] + size > self.filelimit: # Cache flip self.flips += 1 self.total_flips += 1 self.current = 1 - self.current self.filesize[self.current] = 4 self.fileoids[self.current] = {} self.filesize[self.current] += size self.fileoids[self.current][oid] = 1
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
format = "%12s %9s %8s %8s %6s %6s %5s %6s"
format = "%12s %9s %8s %8s %6s %6s %6s %6s"
def inval(self, oid): if self.fileoids[self.current].get(oid): del self.fileoids[self.current][oid] elif self.fileoids[1 - self.current].get(oid): del self.fileoids[1 - self.current][oid]
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
if self.loads: self.report()
self.report()
def finish(self): if self.loads: self.report() if self.total_loads: print (self.format + " OVERALL") % ( time.ctime(self.epoch)[4:-8], duration(self.ts1 - self.epoch), self.total_loads, self.total_hits, self.total_invals, self.total_writes, self.total_flips, hitrate(self.total_loads, self.total_hits))
7c6cd709b29c98e3a0df9e9501bbc9857e03429a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/7c6cd709b29c98e3a0df9e9501bbc9857e03429a/simul.py
def pack(self, t, wait=0):
def pack(self, t, wait=None): if wait is not None: wait = MTDelay()
def pack(self, t, wait=0): t = threading.Thread(target=self._pack, args=(t, wait)) t.start()
ea06d610f173112e41866673927585a6d26f436b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ea06d610f173112e41866673927585a6d26f436b/StorageServer.py
def _pack(self, t, wait=0):
if wait is not None: return wait def _pack(self, t, delay):
def pack(self, t, wait=0): t = threading.Thread(target=self._pack, args=(t, wait)) t.start()
ea06d610f173112e41866673927585a6d26f436b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ea06d610f173112e41866673927585a6d26f436b/StorageServer.py
if wait:
if delay is not None:
def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if wait: raise else: # XXX Why doesn't we broadcast on wait? if not wait: # Broadcast new size statistics self.server.invalidate(0, self.__storage_id, (), self.get_size_info())
ea06d610f173112e41866673927585a6d26f436b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ea06d610f173112e41866673927585a6d26f436b/StorageServer.py
if not wait:
if delay is None:
def _pack(self, t, wait=0): try: self.__storage.pack(t, referencesf) except: self._log('Pack failed for %s' % self.__storage_id, zLOG.ERROR, error=sys.exc_info()) if wait: raise else: # XXX Why doesn't we broadcast on wait? if not wait: # Broadcast new size statistics self.server.invalidate(0, self.__storage_id, (), self.get_size_info())
ea06d610f173112e41866673927585a6d26f436b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ea06d610f173112e41866673927585a6d26f436b/StorageServer.py
if not self._handle_waiting(): self._transaction = None self.strategy = None
self._transaction = None self.strategy = None self._handle_waiting()
def tpc_finish(self, id): if not self._check_tid(id): return invalidated = self.strategy.tpc_finish() if invalidated: self.server.invalidate(self, self.__storage_id, invalidated, self.get_size_info()) if not self._handle_waiting(): self._transaction = None self.strategy = None
1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9/StorageServer.py
if not self._handle_waiting(): self._transaction = None self.strategy = None
self._transaction = None self.strategy = None self._handle_waiting()
def tpc_abort(self, id): if not self._check_tid(id): return self.strategy.tpc_abort() if not self._handle_waiting(): self._transaction = None self.strategy = None
1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9/StorageServer.py
"%d clients waiting." % len(self.__storage._waiting))
"Clients waiting: %d." % len(self.__storage._waiting))
def wait(self): if self.__storage._transaction: d = Delay() self.__storage._waiting.append((d, self)) self._log("Transaction blocked waiting for storage. " "%d clients waiting." % len(self.__storage._waiting)) return d else: self.restart()
1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9/StorageServer.py
break if self.__storage._waiting: n = len(self.__storage._waiting) self._log("Blocked transaction restarted. " "%d clients waiting." % n) else: self._log("Blocked transaction restarted.")
if self.__storage._waiting: n = len(self.__storage._waiting) self._log("Blocked transaction restarted. " "Clients waiting: %d" % n) else: self._log("Blocked transaction restarted.") return
def _handle_waiting(self): while self.__storage._waiting: delay, zeo_storage = self.__storage._waiting.pop(0) if self._restart(zeo_storage, delay): break if self.__storage._waiting: n = len(self.__storage._waiting) self._log("Blocked transaction restarted. " "%d clients waiting." % n) else: self._log("Blocked transaction restarted.")
1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f9a2369082ff0d17f8ce1430fe22ba1a52aebd9/StorageServer.py
if hasattr(zLOG.log_write, 'reinitialize'): zLOG.log_write.reinitialize() else: zLOG._stupid_dest=None
init = getattr(zLOG, 'initialize', None) if init is not None: init()
def rotate_logs(): import zLOG if hasattr(zLOG.log_write, 'reinitialize'): zLOG.log_write.reinitialize() else: # Hm, lets at least try to take care of the stupid logger: zLOG._stupid_dest=None
97336dde0e21bfff0a2ab8e9388ecc83dcc99234 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/97336dde0e21bfff0a2ab8e9388ecc83dcc99234/start.py
buf = ("------\n" "%s %s %s %s\n%s" % (log_time(), severity_string(severity), subsystem, summary, detail)) else: buf = ("------\n" "%s %s %s %s" % (log_time(), severity_string(severity), subsystem, summary)) print >> _log_dest, buf
buf.append(str(detail))
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return
b3a1c826c2afe594a2aaf9df7f2ee5090c5f83c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/b3a1c826c2afe594a2aaf9df7f2ee5090c5f83c4/MinimalLogger.py
print >> _log_dest, ''.join(lines) except: print >> _log_dest, "%s: %s" % error[:2]
buf.append(''.join(lines)) except '': buf.append("%s: %s" % error[:2]) buf.append("") _log_dest.write("\n".join(buf))
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return
b3a1c826c2afe594a2aaf9df7f2ee5090c5f83c4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/b3a1c826c2afe594a2aaf9df7f2ee5090c5f83c4/MinimalLogger.py
if self.__onCommitAction is not None:
if self.__onCommitActions is not None:
def tpc_abort(self, transaction): if self.__onCommitAction is not None: del self.__onCommitActions self._storage.tpc_abort(transaction) cache=self._cache cache.invalidate(self._invalidated) cache.invalidate(self._invalidating) self._invalidate_creating()
53bc991f9693a4ab9a32366419dde963d069876f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/53bc991f9693a4ab9a32366419dde963d069876f/Connection.py
conn.setDebugInfo(REQUEST.get('PATH_INFO','(No path info)'))
conn.setDebugInfo(REQUEST.environ, REQUEST.other)
def __bobo_traverse__(self, REQUEST=None, name=None): db, aname, version_support = self._stuff if version_support is not None and REQUEST is not None: version=REQUEST.get(version_support,'') else: version='' conn=db.open(version)
653877e5666945f6e89b9f8913ba1b6c860f7b1e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/653877e5666945f6e89b9f8913ba1b6c860f7b1e/ZApplication.py
'tpc_finish', 'undo', 'undoLog', 'undoInfo', 'versionEmpty',
'tpc_finish', 'undo', 'undoLog', 'undoInfo', 'versionEmpty', 'versions',
def log_info(self, message, type='info'): if type=='error': type=ERROR else: type=INFO LOG('ZEO Server', type, message)
e117f8e08fcbc85b8d3b9fc755438bd8300f2065 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e117f8e08fcbc85b8d3b9fc755438bd8300f2065/StorageServer.py
if len(m) > 60: m=m[:60]+' ...'
if len(m) > 90: m=m[:90]+' ...'
def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 60: m=m[:60]+' ...' blather('message_input', m)
e117f8e08fcbc85b8d3b9fc755438bd8300f2065 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e117f8e08fcbc85b8d3b9fc755438bd8300f2065/StorageServer.py
r=dump(r,1)
try: r=dump(r,1) except: r=StorageServerError("Couldn't pickle result %s" % `r`) dump('',1) r=dump(r,1) rt='E'
def message_input(self, message, dump=dump, Unpickler=Unpickler, StringIO=StringIO, None=None): if __debug__: m=`message` if len(m) > 60: m=m[:60]+' ...' blather('message_input', m)
e117f8e08fcbc85b8d3b9fc755438bd8300f2065 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e117f8e08fcbc85b8d3b9fc755438bd8300f2065/StorageServer.py
p, s = storage.load(oid,'')
try: p, s = storage.load(oid,'') except KeyError: if sv: p=s=None else: raise
def zeoLoad(self, oid): storage=self.__storage v=storage.modifiedInVersion(oid) if v: pv, sv = storage.load(oid, v) else: pv=sv=None p, s = storage.load(oid,'') return p, s, v, pv, sv
e117f8e08fcbc85b8d3b9fc755438bd8300f2065 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e117f8e08fcbc85b8d3b9fc755438bd8300f2065/StorageServer.py
__version__='$Revision: 1.14 $'[11:-2]
__version__='$Revision: 1.15 $'[11:-2]
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain'
cdd848ac02b18d359dbc179e24ffbbf43ce9c1bd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/cdd848ac02b18d359dbc179e24ffbbf43ce9c1bd/DemoStorage.py
oids.append(oid)
def abortVersion(self, src, transaction): if transaction is not self._transaction: raise POSException.StorageTransactionError(self, transaction) if not src: raise POSException.VersionCommitError("Invalid version")
cdd848ac02b18d359dbc179e24ffbbf43ce9c1bd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/cdd848ac02b18d359dbc179e24ffbbf43ce9c1bd/DemoStorage.py
new_con = self._db.databases[database_name].open()
new_con = self._db.databases[database_name].open( transaction_manager=self.transaction_manager, mvcc=self._mvcc, version=self._version, synch=self._synch, )
def get_connection(self, database_name): """Return a Connection for the named database.""" connection = self.connections.get(database_name) if connection is None: new_con = self._db.databases[database_name].open() self.connections.update(new_con.connections) new_con.connections = self.connections connection = new_con return connection
68ef2b040b5891e15b9463954a8574b69550af80 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/68ef2b040b5891e15b9463954a8574b69550af80/Connection.py
file.seek(p+16)
file.seek(pos-p+8)
def _redundant_pack(self, file, pos): file.seek(pos-8) p=u64(file.read(8)) file.seek(p+16) return file.read(1) not in ' u'
fc8470e295102d697d2ea0593c7a7bd2132c087f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fc8470e295102d697d2ea0593c7a7bd2132c087f/FileStorage.py
'The database has already been packed to a later time')
'The database has already been packed to a later time\n' 'or no changes have been made since the last pack')
def pack(self, t, referencesf): """Copy data from the current database file to a packed file Non-current records from transactions with time-stamp strings less than packtss are ommitted. As are all undone records. Also, data back pointers that point before packtss are resolved and the associated data are copied, since the old records are not copied. """
fc8470e295102d697d2ea0593c7a7bd2132c087f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fc8470e295102d697d2ea0593c7a7bd2132c087f/FileStorage.py
if connection.has_key(version) and not temporary:
if connections.has_key(version) and not temporary:
def open(self, version='', transaction=None, temporary=0, force=None, waitflag=1): """Return a object space (AKA connection) to work in
682cd342075f643e431f31cd3f1717e767e96e0d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/682cd342075f643e431f31cd3f1717e767e96e0d/DB.py
return t
return t, [1, 3, 5, 7, 11]
def _build_degenerate_tree(self): # Build the buckets and chain them together. bucket11 = IISet([11])
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py
t = self._build_degenerate_tree() self.assertEqual(len(t), 5) self.assertEqual(list(t.keys()), [1, 3, 5, 7, 11])
t, keys = self._build_degenerate_tree() self.assertEqual(len(t), len(keys)) self.assertEqual(list(t.keys()), keys)
def testDegenerateBasicOps(self): t = self._build_degenerate_tree() self.assertEqual(len(t), 5) self.assertEqual(list(t.keys()), [1, 3, 5, 7, 11]) # has_key actually returns the depth of a bucket. self.assertEqual(t.has_key(1), 4) self.assertEqual(t.has_key(3), 4) self.assertEqual(t.has_key(5), 6) self.assertEqual(t.has_key(7), 5) self.assertEqual(t.has_key(11), 5) for i in 0, 2, 4, 6, 8, 9, 10, 12: self.assertEqual(t.has_key(i), 0)
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py
self.assertEqual(list(tree.keys()), keys)
sorted_keys = keys[:] sorted_keys.sort() self.assertEqual(list(tree.keys()), sorted_keys)
def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) self.assertEqual(list(tree.keys()), keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = min(keys) hikey = max(keys) self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py
lokey = min(keys) hikey = max(keys)
lokey = sorted_keys[0] hikey = sorted_keys[-1]
def _checkRanges(self, tree, keys): self.assertEqual(len(tree), len(keys)) self.assertEqual(list(tree.keys()), keys) for k in keys: self.assert_(tree.has_key(k)) if keys: lokey = min(keys) hikey = max(keys) self.assertEqual(lokey, tree.minKey()) self.assertEqual(hikey, tree.maxKey()) else: lokey = hikey = 42
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py
t = self._build_degenerate_tree() self._checkRanges(t, [1, 3, 5, 7, 11])
t, keys = self._build_degenerate_tree() self._checkRanges(t, keys) def XXXtestDeletes(self): t, keys = self._build_degenerate_tree() for oneperm in permutations(keys): t, keys = self._build_degenerate_tree() for key in oneperm: t.remove(key) keys.remove(key) self._checkRanges(t, keys) del t
def testRanges(self): t = self._build_degenerate_tree() self._checkRanges(t, [1, 3, 5, 7, 11])
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py
def main(): TextTestRunner().run(test_suite())
6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6d37f216bcd1982dfe1fc67ff9df9c9cfcf30f18/testBTrees.py