rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
mon_addr = zo.monitor_address.address
|
mon_addr = zo.monitor_address
|
def main(): global pid pid = os.getpid() label = 'zeoserver:%d' % pid log(label, 'starting') # We don't do much sanity checking of the arguments, since if we get it # wrong, it's a bug in the test suite. keep = 0 configfile = None # Parse the arguments and let getopt.error percolate opts, args = getopt.getopt(sys.argv[1:], 'kC:') for opt, arg in opts: if opt == '-k': keep = 1 elif opt == '-C': configfile = arg zo = ZEOOptions() zo.realize(["-C", configfile]) zeo_port = int(zo.address[1]) # XXX a hack if zo.auth_protocol == "plaintext": import ZEO.tests.auth_plaintext # Open the config file and let ZConfig parse the data there. Then remove # the config file, otherwise we'll leave turds. # The rest of the args are hostname, portnum test_port = zeo_port + 1 test_addr = ('localhost', test_port) addr = ('localhost', zeo_port) log(label, 'creating the storage server') storage = zo.storages[0].open() mon_addr = None if zo.monitor_address: mon_addr = zo.monitor_address.address server = StorageServer( zo.address, {"1": storage}, read_only=zo.read_only, invalidation_queue_size=zo.invalidation_queue_size, transaction_timeout=zo.transaction_timeout, monitor_address=mon_addr, auth_protocol=zo.auth_protocol, auth_database=zo.auth_database, auth_realm=zo.auth_realm) try: log(label, 'creating the test server, keep: %s', keep) t = ZEOTestServer(test_addr, server, keep) except socket.error, e: if e[0] <> errno.EADDRINUSE: raise log(label, 'addr in use, closing and exiting') storage.close() cleanup(storage) sys.exit(2) t.register_socket(server.dispatcher) # Create daemon suicide thread d = Suicide(test_addr) d.setDaemon(1) d.start() # Loop for socket events log(label, 'entering ThreadedAsync loop') ThreadedAsync.LoopCallback.loop()
|
31fa209ffd400f08df78abba94c3d19204ef4e56 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/31fa209ffd400f08df78abba94c3d19204ef4e56/zeoserver.py
|
except ClientDisconnected:
|
except (ClientDisconnected, thread.error), err: get_transaction().abort()
|
def checkReconnection(self): """Check that the client reconnects when a server restarts."""
|
e670f33c2413614ea64909df1c421b339e433d32 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e670f33c2413614ea64909df1c421b339e433d32/testZEO.py
|
o=objects[-1]
|
o=objects.pop()
|
def commit(self, subtransaction=None): 'Finalize the transaction'
|
bd19915024199a14de29dd3e73c75b5eaf5e7d64 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/bd19915024199a14de29dd3e73c75b5eaf5e7d64/Transaction.py
|
del objects[-1]
|
def commit(self, subtransaction=None): 'Finalize the transaction'
|
bd19915024199a14de29dd3e73c75b5eaf5e7d64 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/bd19915024199a14de29dd3e73c75b5eaf5e7d64/Transaction.py
|
|
if i < first: i = i+1 continue
|
def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos > 39: seek(pos-8) pos=pos-u64(read(8))-8 if i < first: i = i+1 continue seek(pos) h=read(23) tid, tl, status, ul, dl, el = unpack(">8s8scHHH", h) if tid < packt: break if status != ' ': continue u=ul and read(ul) or '' d=dl and read(dl) or '' d={'id': encode(tid+p64(pos))[:22], 'time': TimeStamp(tid).timeTime(), 'user_name': u, 'description': d} if el: try: e=loads(read(el)) d.update(e) except: pass if filter is None or filter(d): append(d) i=i+1 return r finally: self._lock_release()
|
1dd8de3bd8e5b8996ad4b887e09c3b15e31a79a1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1dd8de3bd8e5b8996ad4b887e09c3b15e31a79a1/FileStorage.py
|
|
append(d)
|
if i >= first: append(d)
|
def undoLog(self, first, last, filter=None): self._lock_acquire() try: packt=self._packt if packt is None: raise POSException.UndoError, ( 'Undo is currently disabled for database maintenance.<p>') pos=self._pos if pos < 39: return [] file=self._file seek=file.seek read=file.read unpack=struct.unpack strip=string.strip encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos > 39: seek(pos-8) pos=pos-u64(read(8))-8 if i < first: i = i+1 continue seek(pos) h=read(23) tid, tl, status, ul, dl, el = unpack(">8s8scHHH", h) if tid < packt: break if status != ' ': continue u=ul and read(ul) or '' d=dl and read(dl) or '' d={'id': encode(tid+p64(pos))[:22], 'time': TimeStamp(tid).timeTime(), 'user_name': u, 'description': d} if el: try: e=loads(read(el)) d.update(e) except: pass if filter is None or filter(d): append(d) i=i+1 return r finally: self._lock_release()
|
1dd8de3bd8e5b8996ad4b887e09c3b15e31a79a1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1dd8de3bd8e5b8996ad4b887e09c3b15e31a79a1/FileStorage.py
|
fs = ZODB.FileStorage.FileStorage(fs_name, create=1) s, server, pid = forker.start_zeo(fs, domain=domain)
|
s, server, pid = forker.start_zeo("FileStorage", (fs_name, 1), domain=domain)
|
def main(args): opts, args = getopt.getopt(args, 'zd:n:Ds:LMt:U') s = None compress = None data=sys.argv[0] nrep=5 minimize=0 detailed=1 cache = None domain = 'AF_INET' threads = 1 for o, v in opts: if o=='-n': nrep = int(v) elif o=='-d': data = v elif o=='-s': s = v elif o=='-z': import zlib compress = zlib.compress elif o=='-L': minimize=1 elif o=='-M': detailed=0 elif o=='-D': global debug os.environ['STUPID_LOG_FILE']='' os.environ['STUPID_LOG_SEVERITY']='-999' debug = 1 elif o == '-C': cache = 'speed' elif o == '-U': domain = 'AF_UNIX' elif o == '-t': threads = int(v) zeo_pipe = None if s: s = __import__(s, globals(), globals(), ('__doc__',)) s = s.Storage server = None else: fs = ZODB.FileStorage.FileStorage(fs_name, create=1) s, server, pid = forker.start_zeo(fs, domain=domain) data=open(data).read() db=ZODB.DB(s, # disable cache deactivation cache_size=4000, cache_deactivate_after=6000,) print "Beginning work..." results={1:[], 10:[], 100:[], 1000:[]} if threads > 1: import threading l = [] for i in range(threads): t = threading.Thread(target=work, args=(db, results, nrep, compress, data, detailed, minimize, i)) l.append(t) for t in l: t.start() for t in l: t.join() else: work(db, results, nrep, compress, data, detailed, minimize) if server is not None: server.close() os.waitpid(pid, 0) if detailed: print '-'*24 print "num\tmean\tmin\tmax" for r in 1, 10, 100, 1000: times = [] for time, conf in results[r]: times.append(time) t = mean(times) print "%d\t%.4f\t%.4f\t%.4f" % (r, t, min(times), max(times))
|
8815646ea15ef7cfc1f31f4bdb7843a93ca3acfa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/8815646ea15ef7cfc1f31f4bdb7843a93ca3acfa/speed.py
|
if not self._connected: raise ClientDisconnected() try: r=self._call(self.__begin, id, user, desc, ext)
|
try: if not self._connected: raise ClientDisconnected( "This action is temporarily unavailable.<p>") r=self._call(self.__begin, id, user, desc, ext)
|
def tpc_begin(self, transaction): self._lock_acquire() try: if self._transaction is transaction: return
|
8f606cb0e4f6d9c34d7073501ad536f247ad1c2f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/8f606cb0e4f6d9c34d7073501ad536f247ad1c2f/ClientStorage.py
|
if not self._read_only_fallback:
|
if not self._read_only_fallback or self.is_connected():
|
def testConnection(self, conn): """Return a pair (stub, preferred).
|
5cf04dacd0d2e4367f3a43c24e026ff31c750f78 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/5cf04dacd0d2e4367f3a43c24e026ff31c750f78/ClientStorage.py
|
if v:
|
if ver:
|
def setVersionCacheSize(self, v): self._version_cache_size=v for ver in self._pools[0].keys(): if v: for c in self._pools[0][ver][1]: c._cache.cache_size=v
|
96d783ca9c76189d804fb5578157fee54c475baa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/96d783ca9c76189d804fb5578157fee54c475baa/DB.py
|
if invalid(oid): raise ConflictError, `oid`
|
if serial == '\0\0\0\0\0\0\0\0': self._creating.append(oid) else: if invalid(oid) or invalid(None): raise ConflictError, `oid` self._invalidating.append(oid)
|
# def persistent_id(object,
|
c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab/Connection.py
|
self._storage.tpc_finish(transaction, self.tpc_finish_)
|
if self._tmp is not None: self._storage.tpc_finish(transaction, self._invalidate_sub) self._storage._creating[:0]=self._creating del self._creating[:] else: self._storage.tpc_finish(transaction, self._invalidate_invalidating)
|
def tpc_finish(self, transaction):
|
c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab/Connection.py
|
def tpc_finish_(self):
|
def _invalidate_invalidating(self):
|
def tpc_finish_(self): invalidate=self._db.invalidate for oid in self._invalidating: invalidate(oid, self)
|
c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c223ab347b8ac8efc83d162ecfbefdc3c8b0b9ab/Connection.py
|
'supportsUndo', 'supportsVersions', 'undo', 'undoLog',
|
'supportsUndo', 'supportsVersions', 'undoLog',
|
def __init__(self, storage, pool_size=7, cache_size=400, cache_deactivate_after=60, version_pool_size=3, version_cache_size=100, version_cache_deactivate_after=10, ): """Create an object database.
|
aa7bd65bf531f5e1d9cc0d49f865467fe76c800a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/aa7bd65bf531f5e1d9cc0d49f865467fe76c800a/DB.py
|
if ul > tl or dl > tl or el > tl or tl < (23+ul+dl+el): return 0
|
if tl < (23+ul+dl+el): return 0
|
def _sane(self, index, pos): """Sanity check saved index data by reading the last undone trans
|
6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2/FileStorage.py
|
if ul > tl or dl > tl or el > tl or tl < (23+ul+dl+el):
|
if tl < (23+ul+dl+el):
|
def read_index(file, name, index, vindex, tindex, stop='\377'*8, ltid=z64, start=4L, maxoid=z64, recover=0, read_only=0): read=file.read seek=file.seek seek(0,2) file_size=file.tell() if file_size: if file_size < start: raise FileStorageFormatError, file.name seek(0) if read(4) != packed_version: raise FileStorageFormatError, name else: if not read_only: file.write(packed_version) return 4L, maxoid, ltid index_get=index.get vndexpos=vindex.get pos=start seek(start) unpack=struct.unpack tid='\0'*7+'\1' while 1: # Read the transaction record h=read(23) if not h: break if len(h) != 23: if not read_only: warn('%s truncated at %s', name, pos) seek(pos) file.truncate() break tid, stl, status, ul, dl, el = unpack(">8s8scHHH",h) if el < 0: el=t32-el if tid <= ltid: warn("%s time-stamp reduction at %s", name, pos) ltid=tid tl=U64(stl) if pos+(tl+8) > file_size or status=='c': # Hm, the data were truncated or the checkpoint flag wasn't # cleared. They may also be corrupted, # in which case, we don't want to totally lose the data. if not read_only: warn("%s truncated, possibly due to damaged records at %s", name, pos) _truncate(file, name, pos) break if status not in ' up': warn('%s has invalid status, %s, at %s', name, status, pos) if ul > tl or dl > tl or el > tl or tl < (23+ul+dl+el): # We're in trouble. Find out if this is bad data in the # middle of the file, or just a turd that Win 9x dropped # at the end when the system crashed. # Skip to the end and read what should be the transaction length # of the last transaction. seek(-8, 2) rtl=U64(read(8)) # Now check to see if the redundant transaction length is # reasonable: if file_size - rtl < pos or rtl < 23: nearPanic('%s has invalid transaction header at %s', name, pos) if not read_only: warn("It appears that there is invalid data at the end of " "the file, possibly due to a system crash. %s " "truncated to recover from bad data at end." % name) _truncate(file, name, pos) break else: if recover: return pos, None, None panic('%s has invalid transaction header at %s', name, pos) if tid >= stop: break tpos=pos tend=tpos+tl if status=='u': # Undone transaction, skip it seek(tend) h=read(8) if h != stl: if recover: return tpos, None, None panic('%s has inconsistent transaction length at %s', name, pos) pos=tend+8 continue pos=tpos+(23+ul+dl+el) while pos < tend: # Read the data records for this transaction seek(pos) h=read(42) oid,serial,sprev,stloc,vlen,splen = unpack(">8s8s8s8sH8s", h) prev=U64(sprev) tloc=U64(stloc) plen=U64(splen) dlen=42+(plen or 8) tindex[oid]=pos if vlen: dlen=dlen+(16+vlen) seek(8,1) pv=U64(read(8)) version=read(vlen) # Jim says: "It's just not worth the bother." #if vndexpos(version, 0) != pv: # panic("%s incorrect previous version pointer at %s", # name, pos) vindex[version]=pos if pos+dlen > tend or tloc != tpos: if recover: return tpos, None, None panic("%s data record exceeds transaction record at %s", name, pos) if index_get(oid,0) != prev: if prev: if recover: return tpos, None, None error("%s incorrect previous pointer at %s", name, pos) else: warn("%s incorrect previous pointer at %s", name, pos) pos=pos+dlen if pos != tend: if recover: return tpos, None, None panic("%s data records don't add up at %s",name,tpos) # Read the (intentionally redundant) transaction length seek(pos) h=read(8) if h != stl: if recover: return tpos, None, None panic("%s redundant transaction length check failed at %s", name, pos) pos=pos+8 for oid, p in tindex.items(): maxoid=max(maxoid,oid) index[oid]=p # Record the position tindex.clear() return pos, maxoid, ltid
|
6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2/FileStorage.py
|
if ul > tl or dl > tl or el > tl or tl < (23+ul+dl+el):
|
if tl < (23+ul+dl+el):
|
def next(self, index=0): file=self._file seek=file.seek read=file.read pos=self._pos
|
6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6ae3c752ee37a9a22f3f6a46cc54c722d7b143f2/FileStorage.py
|
return {
|
info = {
|
def get_info(self): storage=self.__storage return { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), 'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(), }
|
65391a99a36c9cea82237bd568435c496337e4b6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/65391a99a36c9cea82237bd568435c496337e4b6/StorageServer.py
|
'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(),
|
def get_info(self): storage=self.__storage return { 'length': len(storage), 'size': storage.getSize(), 'name': storage.getName(), 'supportsUndo': storage.supportsUndo(), 'supportsVersions': storage.supportsVersions(), 'supportsTransactionalUndo': storage.supportsTransactionalUndo(), }
|
65391a99a36c9cea82237bd568435c496337e4b6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/65391a99a36c9cea82237bd568435c496337e4b6/StorageServer.py
|
|
if len(h) != 16: raise ExportError, 'Truncated export file'
|
if len(h) != 16: raise POSException.ExportError, 'Truncated export file'
|
def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it."
|
cd71da2184a9b1e8412dceb3b649aadecff69b98 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/cd71da2184a9b1e8412dceb3b649aadecff69b98/ExportImport.py
|
if len(p) != l: raise ExportError, 'Truncated export file'
|
if len(p) != l: raise POSException.ExportError, 'Truncated export file'
|
def persistent_load(ooid, Ghost=Ghost, StringType=StringType, atoi=string.atoi, TupleType=type(()), oids=oids, wrote_oid=wrote_oid, new_oid=new_oid): "Remap a persistent id to a new ID and create a ghost for it."
|
cd71da2184a9b1e8412dceb3b649aadecff69b98 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/cd71da2184a9b1e8412dceb3b649aadecff69b98/ExportImport.py
|
self._dst = ZODB.FileStorage.FileStorage('Dest.fs')
|
self._dst = FileStorage('Dest.fs')
|
def checkRecoverUndoInVersion(self): oid = self._storage.new_oid() version = "aVersion" revid_a = self._dostore(oid, data=MinPO(91)) revid_b = self._dostore(oid, revid=revid_a, version=version, data=MinPO(92)) revid_c = self._dostore(oid, revid=revid_b, version=version, data=MinPO(93)) self._undo(self._storage.undoInfo()[0]['id'], oid) self._commitVersion(version, '') self._undo(self._storage.undoInfo()[0]['id'], oid)
|
ff316ee1a2de3bd16df9610338321b7edbdef29c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ff316ee1a2de3bd16df9610338321b7edbdef29c/RecoveryStorage.py
|
data=self.tryToResolveConflict( oid, cserial, serial, _loadBack(self._file, oid, p64(pre)), cdata)
|
try: bdata = _loadBack(self._file, oid, p64(pre))[0] except KeyError: raise UndoError("_loadBack() failed for %s" % repr(oid)) data=self.tryToResolveConflict(oid, cserial, serial, bdata, cdata)
|
def _transactionalUndoRecord(self, oid, pos, serial, pre, version): """Get the indo information for a data record
|
95f9483c0e63189a798935197d9ea4b309189fe2 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/95f9483c0e63189a798935197d9ea4b309189fe2/FileStorage.py
|
except '':
|
except:
|
def log(self, subsystem, severity, summary, detail, error): if _log_dest is None or severity < _log_level: return buf = ["------"] line = ("%s %s %s %s" % (log_time(), severity_string(severity), subsystem, summary)) if not textwrap or len(line) < 80: buf.append(line) else: buf.extend(textwrap.wrap(line, width=79, subsequent_indent=" "*20, break_long_words=0))
|
e3f91bf2e6cf5c93b4053906f0e5f52341ec2915 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e3f91bf2e6cf5c93b4053906f0e5f52341ec2915/MinimalLogger.py
|
r = f_read(8) if len(r) < 8:
|
r = f_read(10) if len(r) < 10:
|
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation if o == '-l': simclass = LRUCacheSimulation if o == '-y': simclass = AltZEOCacheSimulation if o == '-z': simclass = ZEOCacheSimulation if o == '-s': cachelimit = int(float(a)*MB) if o == '-X': heuristic = 1 if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file if filename.endswith(".gz"): # Open gzipped file try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": # Read from stdin f = sys.stdin else: # Open regular file try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = simclass(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior offset = 0 records = 0 f_read = f.read struct_unpack = struct.unpack while 1: # Read a record and decode it r = f_read(8) if len(r) < 8: break offset += 8 ts, code = struct_unpack(">ii", r) if ts == 0: # Must be a misaligned record caused by a crash ##print "Skipping 8 bytes at offset", offset-8 continue r = f_read(16) if len(r) < 16: break offset += 16 records += 1 oid, serial = struct_unpack(">8s8s", r) # Decode the code dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
|
3b800118518258887ede08034bdfd32db3dbcad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3b800118518258887ede08034bdfd32db3dbcad5/simul.py
|
offset += 8 ts, code = struct_unpack(">ii", r)
|
offset += 10 ts, code, lenoid = struct_unpack(">iiH", r)
|
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation if o == '-l': simclass = LRUCacheSimulation if o == '-y': simclass = AltZEOCacheSimulation if o == '-z': simclass = ZEOCacheSimulation if o == '-s': cachelimit = int(float(a)*MB) if o == '-X': heuristic = 1 if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file if filename.endswith(".gz"): # Open gzipped file try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": # Read from stdin f = sys.stdin else: # Open regular file try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = simclass(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior offset = 0 records = 0 f_read = f.read struct_unpack = struct.unpack while 1: # Read a record and decode it r = f_read(8) if len(r) < 8: break offset += 8 ts, code = struct_unpack(">ii", r) if ts == 0: # Must be a misaligned record caused by a crash ##print "Skipping 8 bytes at offset", offset-8 continue r = f_read(16) if len(r) < 16: break offset += 16 records += 1 oid, serial = struct_unpack(">8s8s", r) # Decode the code dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
|
3b800118518258887ede08034bdfd32db3dbcad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3b800118518258887ede08034bdfd32db3dbcad5/simul.py
|
r = f_read(16) if len(r) < 16:
|
r = f_read(8 + lenoid) if len(r) < 8 + lenoid:
|
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation if o == '-l': simclass = LRUCacheSimulation if o == '-y': simclass = AltZEOCacheSimulation if o == '-z': simclass = ZEOCacheSimulation if o == '-s': cachelimit = int(float(a)*MB) if o == '-X': heuristic = 1 if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file if filename.endswith(".gz"): # Open gzipped file try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": # Read from stdin f = sys.stdin else: # Open regular file try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = simclass(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior offset = 0 records = 0 f_read = f.read struct_unpack = struct.unpack while 1: # Read a record and decode it r = f_read(8) if len(r) < 8: break offset += 8 ts, code = struct_unpack(">ii", r) if ts == 0: # Must be a misaligned record caused by a crash ##print "Skipping 8 bytes at offset", offset-8 continue r = f_read(16) if len(r) < 16: break offset += 16 records += 1 oid, serial = struct_unpack(">8s8s", r) # Decode the code dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
|
3b800118518258887ede08034bdfd32db3dbcad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3b800118518258887ede08034bdfd32db3dbcad5/simul.py
|
offset += 16
|
offset += 8 + lenoid
|
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation if o == '-l': simclass = LRUCacheSimulation if o == '-y': simclass = AltZEOCacheSimulation if o == '-z': simclass = ZEOCacheSimulation if o == '-s': cachelimit = int(float(a)*MB) if o == '-X': heuristic = 1 if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file if filename.endswith(".gz"): # Open gzipped file try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": # Read from stdin f = sys.stdin else: # Open regular file try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = simclass(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior offset = 0 records = 0 f_read = f.read struct_unpack = struct.unpack while 1: # Read a record and decode it r = f_read(8) if len(r) < 8: break offset += 8 ts, code = struct_unpack(">ii", r) if ts == 0: # Must be a misaligned record caused by a crash ##print "Skipping 8 bytes at offset", offset-8 continue r = f_read(16) if len(r) < 16: break offset += 16 records += 1 oid, serial = struct_unpack(">8s8s", r) # Decode the code dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
|
3b800118518258887ede08034bdfd32db3dbcad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3b800118518258887ede08034bdfd32db3dbcad5/simul.py
|
oid, serial = struct_unpack(">8s8s", r)
|
serial, oid = struct_unpack(">8s%ds" % lenoid, r)
|
def main(): # Parse options MB = 1000*1000 cachelimit = 20*MB simclass = ZEOCacheSimulation heuristic = 0 try: opts, args = getopt.getopt(sys.argv[1:], "bflyzs:X") except getopt.error, msg: usage(msg) return 2 for o, a in opts: if o == '-b': simclass = BuddyCacheSimulation if o == '-f': simclass = SimpleCacheSimulation if o == '-l': simclass = LRUCacheSimulation if o == '-y': simclass = AltZEOCacheSimulation if o == '-z': simclass = ZEOCacheSimulation if o == '-s': cachelimit = int(float(a)*MB) if o == '-X': heuristic = 1 if len(args) != 1: usage("exactly one file argument required") return 2 filename = args[0] # Open file if filename.endswith(".gz"): # Open gzipped file try: import gzip except ImportError: print >>sys.stderr, "can't read gzipped files (no module gzip)" return 1 try: f = gzip.open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 elif filename == "-": # Read from stdin f = sys.stdin else: # Open regular file try: f = open(filename, "rb") except IOError, msg: print >>sys.stderr, "can't open %s: %s" % (filename, msg) return 1 # Create simulation object sim = simclass(cachelimit) # Print output header sim.printheader() # Read trace file, simulating cache behavior offset = 0 records = 0 f_read = f.read struct_unpack = struct.unpack while 1: # Read a record and decode it r = f_read(8) if len(r) < 8: break offset += 8 ts, code = struct_unpack(">ii", r) if ts == 0: # Must be a misaligned record caused by a crash ##print "Skipping 8 bytes at offset", offset-8 continue r = f_read(16) if len(r) < 16: break offset += 16 records += 1 oid, serial = struct_unpack(">8s8s", r) # Decode the code dlen, version, code, current = (code & 0x7fffff00, code & 0x80, code & 0x7e, code & 0x01) # And pass it to the simulation sim.event(ts, dlen, version, code, current, oid, serial) # Finish simulation sim.finish() # Exit code from main() return 0
|
3b800118518258887ede08034bdfd32db3dbcad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3b800118518258887ede08034bdfd32db3dbcad5/simul.py
|
raise ValueError, "unknown address in list: %s" % repr(a)
|
raise ValueError, ( "unknown address in list: %s" % repr(addr))
|
def _parse_addrs(self, addrs): # Return a list of (addr_type, addr) pairs.
|
e1dbb1bf5990761c257d2cbe7062d7e236a63a77 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e1dbb1bf5990761c257d2cbe7062d7e236a63a77/client.py
|
break
|
def _commitVersion(self, src, dest, transaction, abort=None): # call after checking arguments and acquiring lock srcpos = self._vindex_get(src, 0) spos = p64(srcpos) # middle holds bytes 16:34 of a data record: # pos of transaction, len of version name, data length # commit version never writes data, so data length is always 0 middle = struct.pack(">8sH8s", p64(self._pos), len(dest), z64)
|
1f4e5bb4873974e2b6b701a90c4e38e337d71d3b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f4e5bb4873974e2b6b701a90c4e38e337d71d3b/FileStorage.py
|
|
except:
|
except KeyError:
|
def pack(self, t, referencesf): """Copy data from the current database file to a packed file
|
1f4e5bb4873974e2b6b701a90c4e38e337d71d3b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f4e5bb4873974e2b6b701a90c4e38e337d71d3b/FileStorage.py
|
p=file()
|
p=file(1)
|
def persistent_id(object,self=self,stackup=stackup): if (not hasattr(object, '_p_oid') or type(object) is ClassType): return None
|
0a526e552373759f61c9dd56a8fc95ae4da8c961 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/0a526e552373759f61c9dd56a8fc95ae4da8c961/Connection.py
|
(old and new) of the object in conflict. (Serial numbers are closely related [equal?] to transaction IDs; a ConflictError may be triggered by a serial number mismatch.)
|
related to conflict. The first is the revision of object that is in conflict, the second is the revision of that the current transaction read when it started.
|
def __str__(self): return _fmt_oid(self.args[0])
|
0b0b84d2e290a95e3fc88b0c062d150d60cc5e8b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/0b0b84d2e290a95e3fc88b0c062d150d60cc5e8b/POSException.py
|
zLOG.LOG("RUNSVR", severity, msg, "", error)
|
zLOG.LOG(_label, severity, msg, "", error)
|
def _log(msg, severity=zLOG.INFO, error=None): """Internal: generic logging function.""" zLOG.LOG("RUNSVR", severity, msg, "", error)
|
b3d5cd6606c60cd92fb3d5f6253feaccbd31e725 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/b3d5cd6606c60cd92fb3d5f6253feaccbd31e725/runzeo.py
|
def notifyDisconnected(self, ignored): LOG("ClientStorage", PROBLEM, "Disconnected from storage") self._connected=0 thread.start_new_thread(self._call.connect,(0,)) try: self._commit_lock_release() except: pass
|
952c0d8f64227c4e18e7b95a5c535a34669a10d0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/952c0d8f64227c4e18e7b95a5c535a34669a10d0/ClientStorage.py
|
||
print time.ctime(self.begin),
|
t = time.ctime(self.begin)
|
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url
|
fd0704e89826264b973c25a56a77a2ed8350b58b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fd0704e89826264b973c25a56a77a2ed8350b58b/parsezeolog.py
|
print self.vote - self.begin,
|
d_vote = self.vote - self.begin
|
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url
|
fd0704e89826264b973c25a56a77a2ed8350b58b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fd0704e89826264b973c25a56a77a2ed8350b58b/parsezeolog.py
|
print "*",
|
d_vote = "*"
|
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url
|
fd0704e89826264b973c25a56a77a2ed8350b58b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fd0704e89826264b973c25a56a77a2ed8350b58b/parsezeolog.py
|
print self.finish - self.begin,
|
d_finish = self.finish - self.begin
|
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url
|
fd0704e89826264b973c25a56a77a2ed8350b58b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fd0704e89826264b973c25a56a77a2ed8350b58b/parsezeolog.py
|
print "*", print self.user, self.url
|
d_finish = "*" print self.fmt % (time.ctime(self.begin), d_vote, d_finish, self.user, self.url)
|
def report(self): """Print a report about the transaction""" print time.ctime(self.begin), if hasattr(self, "vote"): print self.vote - self.begin, else: print "*", if hasattr(self, "finish"): print self.finish - self.begin, else: print "*", print self.user, self.url
|
fd0704e89826264b973c25a56a77a2ed8350b58b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/fd0704e89826264b973c25a56a77a2ed8350b58b/parsezeolog.py
|
self.add("monitor_address", "zeo.monitor_address", "m:", "monitor=", self.handle_monitor_address)
|
self.add("monitor_address", "zeo.monitor_address.address", "m:", "monitor=", self.handle_monitor_address)
|
def add_zeo_options(self): self.add(None, None, "a:", "address=", self.handle_address) self.add(None, None, "f:", "filename=", self.handle_filename) self.add("family", "zeo.address.family") self.add("address", "zeo.address.address", required="no server address specified; use -a or -C") self.add("read_only", "zeo.read_only", default=0) self.add("invalidation_queue_size", "zeo.invalidation_queue_size", default=100) self.add("transaction_timeout", "zeo.transaction_timeout", "t:", "timeout=", float) self.add("monitor_address", "zeo.monitor_address", "m:", "monitor=", self.handle_monitor_address) self.add('auth_protocol', 'zeo.authentication_protocol', None, 'auth-protocol=', default=None) self.add('auth_database', 'zeo.authentication_database', None, 'auth-database=') self.add('auth_realm', 'zeo.authentication_realm', None, 'auth-realm=')
|
c4ac2ac596272a14a763d58e77cd810f1ddbec43 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/c4ac2ac596272a14a763d58e77cd810f1ddbec43/runzeo.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25)
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25)
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25)
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkSimpleTransactionalUndo(self): oid = self._storage.new_oid() revid = self._dostore(oid, data=23) revid = self._dostore(oid, revid=revid, data=24) revid = self._dostore(oid, revid=revid, data=25)
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkTwoObjectUndo(self): # Convenience p31, p32, p51, p52 = map(pickle.dumps, (31, 32, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p31, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p51, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p32, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p52, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Make sure the objects have the current value data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 32 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 52 # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[0]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 2 assert oid1 in oids and oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 31 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 51
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p30, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p50, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p31, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p51, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p32, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p52, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Make sure the objects have the current value data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 32 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 52 # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[0]['id'] tid1 = info[1]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) oids1 = self._storage.transactionalUndo(tid1, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) # We get the finalization stuff called an extra time: self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 2 assert len(oids1) == 2 assert oid1 in oids and oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 30 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 50
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkTwoObjectUndoAtOnce(self): # Convenience p30, p31, p32, p50, p51, p52 = map(pickle.dumps, (30, 31, 32, 50, 51, 52)) oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = revid2 = ZERO # Store two objects in the same transaction self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p30, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p50, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p31, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p51, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p32, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p52, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Make sure the objects have the current value data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 32 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 52 # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[0]['id'] tid1 = info[1]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) oids1 = self._storage.transactionalUndo(tid1, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) # We get the finalization stuff called an extra time: self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 2 assert len(oids1) == 2 assert oid1 in oids and oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 30 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 50
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, data=51) # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p32, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p52, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[0]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 2 assert oid1 in oids and oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 31 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 51 # Like the above, but this time, the second transaction contains only # one object. self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p33, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p53, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Update in different transactions revid1 = self._dostore(oid1, revid=revid1, data=34) revid2 = self._dostore(oid2, revid=revid2, data=54) # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 1 assert oid1 in oids and not oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 33 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 54
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkTwoObjectUndoAgain(self): p32, p33, p52, p53 = map(pickle.dumps, (32, 33, 52, 53)) # Like the above, but the first revision of the objects are stored in # different transactions. oid1 = self._storage.new_oid() oid2 = self._storage.new_oid() revid1 = self._dostore(oid1, data=31) revid2 = self._dostore(oid2, data=51) # Update those same two objects self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p32, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p52, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[0]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 2 assert oid1 in oids and oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 31 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 51 # Like the above, but this time, the second transaction contains only # one object. self._storage.tpc_begin(self._transaction) revid1 = self._storage.store(oid1, revid1, p33, '', self._transaction) revid2 = self._storage.store(oid2, revid2, p53, '', self._transaction) # Finish the transaction self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert revid1 == revid2 # Update in different transactions revid1 = self._dostore(oid1, revid=revid1, data=34) revid2 = self._dostore(oid2, revid=revid2, data=54) # Now attempt to undo the transaction containing two objects info =self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) oids = self._storage.transactionalUndo(tid, self._transaction) self._storage.tpc_vote(self._transaction) self._storage.tpc_finish(self._transaction) assert len(oids) == 1 assert oid1 in oids and not oid2 in oids data, revid1 = self._storage.load(oid1, '') assert pickle.loads(data) == 33 data, revid2 = self._storage.load(oid2, '') assert pickle.loads(data) == 54
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info =self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) self.assertRaises(POSException.UndoError, self._storage.transactionalUndo, tid, self._transaction) self._storage.tpc_abort(self._transaction) # Now have more fun: object1 and object2 are in the same transaction, # which we'll try to undo to, but one of them has since modified in # different transaction, so the undo should fail. oid1 = oid revid1 = revid_c oid2 = self._storage.new_oid() revid2 = ZERO p81, p82, p91, p92 = map(pickle.dumps, (81, 82, 91, 92))
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
info =self._storage.undoInfo()
|
info = self._storage.undoInfo()
|
def checkNotUndoable(self): # Set things up so we've got a transaction that can't be undone oid = self._storage.new_oid() revid_a = self._dostore(oid, data=51) revid_b = self._dostore(oid, revid=revid_a, data=52) revid_c = self._dostore(oid, revid=revid_b, data=53) # Start the undo info =self._storage.undoInfo() tid = info[1]['id'] self._storage.tpc_begin(self._transaction) self.assertRaises(POSException.UndoError, self._storage.transactionalUndo, tid, self._transaction) self._storage.tpc_abort(self._transaction) # Now have more fun: object1 and object2 are in the same transaction, # which we'll try to undo to, but one of them has since modified in # different transaction, so the undo should fail. oid1 = oid revid1 = revid_c oid2 = self._storage.new_oid() revid2 = ZERO p81, p82, p91, p92 = map(pickle.dumps, (81, 82, 91, 92))
|
f8d1d748f6f1881c067d94a9a0868707bc5ec509 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/f8d1d748f6f1881c067d94a9a0868707bc5ec509/TransactionalUndoStorage.py
|
def XXXtestEmptyFirstBucketReportedByGuido(self):
|
def testEmptyFirstBucketReportedByGuido(self):
|
def XXXtestEmptyFirstBucketReportedByGuido(self): b = self.t for i in xrange(29972): # reduce to 29971 and it works b[i] = i for i in xrange(30): # reduce to 29 and it works del b[i] b[i+40000] = i
|
84ace7a5da26eedb2f0248f43425c3f18cc59bcd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/84ace7a5da26eedb2f0248f43425c3f18cc59bcd/testBTrees.py
|
def XXXtestDeletes(self):
|
def testDeletes(self):
|
def XXXtestDeletes(self): # Delete keys in all possible orders, checking each tree along # the way.
|
84ace7a5da26eedb2f0248f43425c3f18cc59bcd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/84ace7a5da26eedb2f0248f43425c3f18cc59bcd/testBTrees.py
|
def XXXtestDeletes(self): # Delete keys in all possible orders, checking each tree along # the way.
|
84ace7a5da26eedb2f0248f43425c3f18cc59bcd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/84ace7a5da26eedb2f0248f43425c3f18cc59bcd/testBTrees.py
|
||
if self.filter is None or self.filter(d):
|
if self.filter is None or self.filter(dict):
|
def search(self): """Search for another record.""" dict = self._readnext() if self.filter is None or self.filter(d): if self.i >= self.first: self.results.append(dict) self.i += 1
|
07652619c22fd4c5d8562426a14bf07160e4c115 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/07652619c22fd4c5d8562426a14bf07160e4c115/FileStorage.py
|
if not (th.status == " " or th.status == "p"):
|
if not th.status in " pu":
|
def checkTxn(self, th, pos): if th.tid <= self.ltid: self.fail(pos, "time-stamp reduction: %s <= %s", _fmt_oid(th.tid), _fmt_oid(self.ltid)) self.ltid = th.tid if th.status == "c": self.fail(pos, "transaction with checkpoint flag set") if not (th.status == " " or th.status == "p"): self.fail(pos, "invalid transaction status: %r", th.status) if th.tlen < th.headerlen(): self.fail(pos, "invalid transaction header: " "txnlen (%d) < headerlen(%d)", th.tlen, th.headerlen())
|
1375b51ede3a278dadcf9ff3bc94462af19b5b1e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1375b51ede3a278dadcf9ff3bc94462af19b5b1e/fspack.py
|
__version__='$Revision: 1.3 $'[11:-2]
|
__version__='$Revision: 1.4 $'[11:-2]
|
def info(RESPONSE): RESPONSE['Content-type']= 'text/plain'
|
790aaedb1613a0f34f64e37303c780c26f86c2cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/790aaedb1613a0f34f64e37303c780c26f86c2cf/DemoStorage.py
|
if i < first: continue
|
if i < first: i = i+1 continue
|
def undoLog(self, first, last, filter=None): self._lock_acquire() try: transactions=self._data.items() pos=len(transactions) encode=base64.encodestring r=[] append=r.append i=0 while i < last and pos: pos=pos-1 if i < first: continue tid, (p, u, d, e, t) = transactions[pos] if p: continue d={'id': encode(tid)[:-1], 'time': TimeStamp(tid).timeTime(), 'user_name': u, 'description': d} if e: d.update(loads(e))
|
790aaedb1613a0f34f64e37303c780c26f86c2cf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/790aaedb1613a0f34f64e37303c780c26f86c2cf/DemoStorage.py
|
if index[oid]==srcpos:
|
if index_get(oid, None) == srcpos:
|
def commitVersion(self, src, dest, transaction, abort=None): # We are going to commit by simply storing back pointers.
|
04894d011d29abeaa669f40164eff5dec4d7c8ac /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/04894d011d29abeaa669f40164eff5dec4d7c8ac/FileStorage.py
|
p = self.klass()
|
p = P()
|
def testInterface(self): self.assert_(IPersistent.isImplementedByInstancesOf(Persistent), "%s does not implement IPersistent" % Persistent) p = Persistent() self.assert_(IPersistent.isImplementedBy(p), "%s does not implement IPersistent" % p)
|
3a176dce81d807462cc28891ed7cbf72c929345d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/3a176dce81d807462cc28891ed7cbf72c929345d/test_persistent.py
|
invalid=self._invalid
|
def commit(self, object, transaction): oid=object._p_oid if oid is None or object._p_jar is not self: oid = self.new_oid() object._p_jar=self object._p_oid=oid
|
9adbc8bc81ef9414c8569fb3f43bdca6aac3e742 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/9adbc8bc81ef9414c8569fb3f43bdca6aac3e742/Connection.py
|
|
if read(4) == '<?xm':
|
magic=read(4) if magic == '<?xm':
|
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction!
|
09d5bfd447962ccfbd9635ecea96ea1be8706f92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/09d5bfd447962ccfbd9635ecea96ea1be8706f92/ExportImport.py
|
else: file.seek(0) if file.read(4) != 'ZEXP': raise POSException.ExportError, 'Invalid export header'
|
if magic != 'ZEXP': raise POSException.ExportError, 'Invalid export header'
|
def importFile(self, file, clue=''): # This is tricky, because we need to work in a transaction!
|
09d5bfd447962ccfbd9635ecea96ea1be8706f92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/09d5bfd447962ccfbd9635ecea96ea1be8706f92/ExportImport.py
|
self.t = None
|
def tearDown(self): self.t = None del self.t
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
|
def _getRoot(self):
|
if self.storage is not None: self.storage.close() self.storage.cleanup() def openDB(self):
|
def tearDown(self): self.t = None del self.t
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
s = FileStorage(n) db = DB(s) root = db.open().root() return root def _closeDB(self, root): root._p_jar._db.close() root = None def _delDB(self): os.system('rm fs_tmp__*')
|
self.storage = FileStorage(n) self.db = DB(self.storage)
|
def _getRoot(self): from ZODB.FileStorage import FileStorage from ZODB.DB import DB n = 'fs_tmp__%s' % os.getpid() s = FileStorage(n) db = DB(s) root = db.open().root() return root
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class NormalSetTests(Base): """ Test common to all set types """ class ExtendedSetTests(NormalSetTests):
|
class SetTests(Base):
|
def testFailMergeInsert(self): base, b1, b2, bm, e1, e2, items = self._setupConflict() b1[-99999]=-99999 b1[e1[0][0]]=e1[0][1] b2[99999]=99999 b2[e1[0][0]]=e1[0][1] test_merge(base, b1, b2, bm, 'merge conflicting inserts', should_fail=1)
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
base=self.t
|
base = self.t
|
def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067]
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
b1=base.__class__(base) b2=base.__class__(base) bm=base.__class__(base) items=base.keys()
|
b1 = base.__class__(base.keys()) b2 = base.__class__(base.keys()) bm = base.__class__(base.keys()) items = base.keys()
|
def _setupConflict(self): l=[ -5124, -7377, 2274, 8801, -9901, 7327, 1565, 17, -679, 3686, -3607, 14, 6419, -5637, 6040, -4556, -8622, 3847, 7191, -4067]
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
if expected is None: expected=((((),),),)
|
if expected is None: expected = ((((),),),)
|
def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: pass # ConflictError is the only exception that should occur else: assert 0, message else: merged=o1._p_resolveConflict(s1, s2, s3) assert merged==expected, message
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
except (ConflictError, ValueError), err: pass
|
except ConflictError, err: pass
|
def test_merge(o1, o2, o3, expect, message='failed to merge', should_fail=0): s1=o1.__getstate__() s2=o2.__getstate__() s3=o3.__getstate__() expected=expect.__getstate__() if expected is None: expected=((((),),),) if should_fail: try: merged=o1._p_resolveConflict(s1, s2, s3) except (ConflictError, ValueError), err: pass # ConflictError is the only exception that should occur else: assert 0, message else: merged=o1._p_resolveConflict(s1, s2, s3) assert merged==expected, message
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestIOSets(ExtendedSetTests, TestCase):
|
class TestIOSets(SetTests, TestCase):
|
def setUp(self): self.t = IIBTree()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestOOSets(ExtendedSetTests, TestCase):
|
class TestOOSets(SetTests, TestCase):
|
def setUp(self): self.t = IOSet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestIISets(ExtendedSetTests, TestCase):
|
class TestIISets(SetTests, TestCase):
|
def setUp(self): self.t = OOSet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestOISets(ExtendedSetTests, TestCase):
|
class TestOISets(SetTests, TestCase):
|
def setUp(self): self.t = IISet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestIOTreeSets(NormalSetTests, TestCase):
|
class TestIOTreeSets(SetTests, TestCase):
|
def setUp(self): self.t = OISet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestOOTreeSets(NormalSetTests, TestCase):
|
class TestOOTreeSets(SetTests, TestCase):
|
def setUp(self): self.t = IOTreeSet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestIITreeSets(NormalSetTests, TestCase):
|
class TestIITreeSets(SetTests, TestCase):
|
def setUp(self): self.t = OOTreeSet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class TestOITreeSets(NormalSetTests, TestCase):
|
class TestOITreeSets(SetTests, TestCase):
|
def setUp(self): self.t = IITreeSet()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
class NastyConfict(Base, TestCase): def setUp(self): self.t = OOBTree() def testResolutionBlowsUp(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) self.t.update({1:2, 2:3}) get_transaction().commit() copy.update({3:4}) get_transaction().commit() list(copy.values()) def testBucketSplitConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t numtoadd = 16 candidate = 60 while numtoadd: if not b.has_key(candidate): b[candidate] = candidate numtoadd -= 1 candidate += 1 state = b.__getstate__() self.assertEqual(len(state) , 2) self.assertEqual(len(state[0]), 7) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 75) self.assertEqual(state[0][5], 120) get_transaction().commit() b = copy for i in range(112, 116): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.assertRaises(ConflictError, get_transaction().commit) get_transaction().abort() def testEmptyBucketConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t for k in 60, 64, 68, 72, 76, 80, 84, 88: del b[k] state = b.__getstate__() self.assertEqual(len(state) , 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() b = copy for k in 92, 96, 100, 104, 108, 112, 116: del b[k] state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() self.assertRaises(AssertionError, b._check) def testEmptyBucketNoConflict(self): b = self.t for i in range(0, 200, 4): b[i] = i state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) self.openDB() r1 = self.db.open().root() r1["t"] = self.t get_transaction().commit() r2 = self.db.open().root() copy = r2["t"] list(copy.values()) self.assertEqual(self.t._p_serial, copy._p_serial) b = self.t b[1] = 1 state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 5) self.assertEqual(state[0][1], 60) self.assertEqual(state[0][3], 120) get_transaction().commit() b = copy for k in range(120, 200, 4): del b[k] state = b.__getstate__() self.assertEqual(len(state), 2) self.assertEqual(len(state[0]), 3) self.assertEqual(state[0][1], 60) get_transaction().commit() b._check()
|
def setUp(self): self.t = OIBucket()
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
|
TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = makeSuite(TestOOSets, 'test') TIOTreeSet = makeSuite(TestIOTreeSets, 'test') TOOTreeSet = makeSuite(TestOOTreeSets, 'test') TOITreeSet = makeSuite(TestIOTreeSets, 'test') TIITreeSet = makeSuite(TestOOTreeSets, 'test') TIOBucket = makeSuite(TestIOBuckets, 'test') TOOBucket = makeSuite(TestOOBuckets, 'test') TOIBucket = makeSuite(TestOIBuckets, 'test') TIIBucket = makeSuite(TestIIBuckets, 'test') alltests = TestSuite((TIOSet, TOOSet, TOISet, TIISet, TIOTreeSet, TOOTreeSet, TOITreeSet, TIITreeSet, TIOBucket, TOOBucket, TOIBucket, TIIBucket, TOOBTree, TIOBTree, TOIBTree, TIIBTree)) return alltests def lsubtract(l1, l2): l1=list(l1) l2=list(l2) l = filter(lambda x, l1=l1: x not in l1, l2) l = l + filter(lambda x, l2=l2: x not in l2, l1) return l def realseq(itemsob): return map(lambda x: x, itemsob) def main(): TextTestRunner().run(test_suite()) if __name__ == '__main__': main()
|
suite = TestSuite() for k in (TestIOBTrees, TestOOBTrees, TestOIBTrees, TestIIBTrees, TestIOSets, TestOOSets, TestOISets, TestIISets, TestIOTreeSets, TestOOTreeSets, TestOITreeSets, TestIITreeSets, TestIOBuckets, TestOOBuckets, TestOIBuckets, TestIIBuckets, NastyConfict): suite.addTest(makeSuite(k)) return suite
|
def test_suite(): TIOBTree = makeSuite(TestIOBTrees, 'test') TOOBTree = makeSuite(TestOOBTrees, 'test') TOIBTree = makeSuite(TestOIBTrees, 'test') TIIBTree = makeSuite(TestIIBTrees, 'test') TIOSet = makeSuite(TestIOSets, 'test') TOOSet = makeSuite(TestOOSets, 'test') TOISet = makeSuite(TestIOSets, 'test') TIISet = makeSuite(TestOOSets, 'test') TIOTreeSet = makeSuite(TestIOTreeSets, 'test') TOOTreeSet = makeSuite(TestOOTreeSets, 'test') TOITreeSet = makeSuite(TestIOTreeSets, 'test') TIITreeSet = makeSuite(TestOOTreeSets, 'test') TIOBucket = makeSuite(TestIOBuckets, 'test') TOOBucket = makeSuite(TestOOBuckets, 'test') TOIBucket = makeSuite(TestOIBuckets, 'test') TIIBucket = makeSuite(TestIIBuckets, 'test') alltests = TestSuite((TIOSet, TOOSet, TOISet, TIISet, TIOTreeSet, TOOTreeSet, TOITreeSet, TIITreeSet, TIOBucket, TOOBucket, TOIBucket, TIIBucket, TOOBTree, TIOBTree, TOIBTree, TIIBTree)) return alltests
|
298631a7e286bf71eba01d02ca831dd4f9af0cf6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/298631a7e286bf71eba01d02ca831dd4f9af0cf6/testConflict.py
|
try: get_transaction().register(self) self._p_changed=1
|
try: get_transaction().register(self)
|
def __changed__(self,v=-1): old=self._p_changed if v != -1: if v and not old and self._p_jar is not None: try: get_transaction().register(self) self._p_changed=1 except: pass
|
748e006e0d038032861e2524177f21ba4bee997b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/748e006e0d038032861e2524177f21ba4bee997b/Persistence.py
|
def open(self, read_only=0): addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1) def checkWriteMethods(self): if hasattr(ZODB, "__version__"): ReadOnlyStorage.ReadOnlyStorage.checkWriteMethods(self) class FileStorageTests(GenericTests): """Test ZEO backed by a FileStorage.""" level = 2
|
def open(self, read_only=0): # XXX Needed to support ReadOnlyStorage tests. Ought to be a # cleaner way. addr = self._storage._addr self._storage.close() self._storage = ClientStorage(addr, read_only=read_only, wait=1)
|
6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6/testZEO.py
|
|
def getStorage(self):
|
def getConfig(self):
|
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type BDBFullStorage name %s </Storage> """ % self._envdir
|
6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6/testZEO.py
|
def getStorage(self):
|
def getConfig(self):
|
def getStorage(self): self._envdir = tempfile.mktemp() return """\ <Storage> type MappingStorage name %s </Storage> """ % self._envdir
|
6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6220253d62df7c1ef0f7e9feba2ad3f01b6d57c6/testZEO.py
|
def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) result.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': d, 'version': version, }) self._connectionMap(f)
|
for version, pool in self._pools.items(): for c in pool.all_as_list(): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache)) result.append({ 'opened': o and ("%s (%.2fs)" % (ctime(o), t-o)), 'info': d, 'version': version, })
|
def f(c): o = c._opened d = c._debug_info if d: if len(d) == 1: d = d[0] else: d = '' d = "%s (%s)" % (d, len(c._cache))
|
ce4ac7b408fa6fc8823447bbc23b5a82a38b6394 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/ce4ac7b408fa6fc8823447bbc23b5a82a38b6394/DB.py
|
commitVersion=abortVersion
|
def close(self): pass
|
e354e393b298266867eeeb579cda9f0c33423afa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e354e393b298266867eeeb579cda9f0c33423afa/BaseStorage.py
|
|
raise UndoError, 'non-undoable transaction'
|
raise POSException.UndoError, 'non-undoable transaction'
|
def undo(self, transaction_id): raise UndoError, 'non-undoable transaction'
|
e354e393b298266867eeeb579cda9f0c33423afa /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e354e393b298266867eeeb579cda9f0c33423afa/BaseStorage.py
|
level=zLOG.BLATHER)
|
level=zLOG.DEBUG)
|
def handle_request(self, msgid, flags, name, args): if not self.check_method(name): msg = "Invalid method name: %s on %s" % (name, repr(self.obj)) raise ZRPCError(msg) if __debug__: self.log("calling %s%s" % (name, short_repr(args)), level=zLOG.BLATHER)
|
21a4fba62d94444a29956b4fd405f8d4f7f5e766 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/21a4fba62d94444a29956b4fd405f8d4f7f5e766/connection.py
|
(msgid, short_repr(reply)), level=zLOG.DEBUG)
|
(msgid, short_repr(reply)), level=zLOG.TRACE)
|
def wait(self, msgid): """Invoke asyncore mainloop and wait for reply.""" if __debug__: self.log("wait(%d), async=%d" % (msgid, self.is_async()), level=zLOG.TRACE) if self.is_async(): self._pull_trigger()
|
21a4fba62d94444a29956b4fd405f8d4f7f5e766 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/21a4fba62d94444a29956b4fd405f8d4f7f5e766/connection.py
|
zLOG.LOG("winserver", zLOG.BLATHER, "map: %r" % asyncore.socket_map)
|
zLOG.LOG(label, zLOG.DEBUG, "map: %r" % asyncore.socket_map)
|
def main(port, storage_name, rawargs): klass = load_storage_class(storage_name) args = [] for arg in rawargs: if arg.startswith('='): arg = eval(arg[1:], {'__builtins__': {}}) args.append(arg) storage = klass(*args) zeo_port = int(port) test_port = zeo_port + 1 t = ZEOTestServer(('', test_port), storage) serv = ZEO.StorageServer.StorageServer(('', zeo_port), {'1': storage}) import zLOG while asyncore.socket_map: zLOG.LOG("winserver", zLOG.BLATHER, "map: %r" % asyncore.socket_map) asyncore.poll(30.0)
|
bb4a24c54d24ae1bfc7f4b6049c814266c569e20 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/bb4a24c54d24ae1bfc7f4b6049c814266c569e20/winserver.py
|
tsize = 0
|
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), str(TimeStamp(trans.tid))) print >> file, "\toffset=%d status=%s user=%s description=%s" % \ (trans._tpos, `trans.status`, trans.user, trans.description) j = 0 tsize = 0 for rec in trans: if rec.data is None: fullclass = "undo or abort of object creation" else: modname, classname = get_pickle_metadata(rec.data) dig = md5.new(rec.data).hexdigest() fullclass = "%s.%s" % (modname, classname) # special case for testing purposes if fullclass == "ZODB.tests.MinPO.MinPO": obj = zodb_unpickle(rec.data) fullclass = "%s %s" % (fullclass, obj.value) if rec.version: version = "version=%s " % rec.version else: version = '' if rec.data_txn: # XXX It would be nice to print the transaction number # (i) but it would be too expensive to keep track of. bp = "bp=%016x" % u64(rec.data_txn) else: bp = "" if rec.data_txn: size = 8 + len(rec.version) else: if rec.data is None: # XXX why is rec.data None and rec.data_txn False? size = len(rec.version) else: size = len(rec.data) + len(rec.version) if rec.version: size += DATA_VERSION_HDR_LEN else: size += DATA_HDR_LEN tsize += size print >> file, " data #%05d oid=%016x %sclass=%s size=%d %s" % \ (j, u64(rec.oid), version, fullclass, size, bp) j += 1 print >> file i += 1 iter.close()
|
1f4f4c598de467410a507c16d3b2a99a80edfd4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f4f4c598de467410a507c16d3b2a99a80edfd4d/fsdump.py
|
|
tsize += size
|
def fsdump(path, file=None, with_offset=1): i = 0 iter = FileIterator(path) for trans in iter: if with_offset: print >> file, ("Trans #%05d tid=%016x time=%s size=%d" % (i, u64(trans.tid), str(TimeStamp(trans.tid)), trans._tend - trans._tpos)) else: print >> file, "Trans #%05d tid=%016x time=%s" % \ (i, u64(trans.tid), str(TimeStamp(trans.tid))) print >> file, "\toffset=%d status=%s user=%s description=%s" % \ (trans._tpos, `trans.status`, trans.user, trans.description) j = 0 tsize = 0 for rec in trans: if rec.data is None: fullclass = "undo or abort of object creation" else: modname, classname = get_pickle_metadata(rec.data) dig = md5.new(rec.data).hexdigest() fullclass = "%s.%s" % (modname, classname) # special case for testing purposes if fullclass == "ZODB.tests.MinPO.MinPO": obj = zodb_unpickle(rec.data) fullclass = "%s %s" % (fullclass, obj.value) if rec.version: version = "version=%s " % rec.version else: version = '' if rec.data_txn: # XXX It would be nice to print the transaction number # (i) but it would be too expensive to keep track of. bp = "bp=%016x" % u64(rec.data_txn) else: bp = "" if rec.data_txn: size = 8 + len(rec.version) else: if rec.data is None: # XXX why is rec.data None and rec.data_txn False? size = len(rec.version) else: size = len(rec.data) + len(rec.version) if rec.version: size += DATA_VERSION_HDR_LEN else: size += DATA_HDR_LEN tsize += size print >> file, " data #%05d oid=%016x %sclass=%s size=%d %s" % \ (j, u64(rec.oid), version, fullclass, size, bp) j += 1 print >> file i += 1 iter.close()
|
1f4f4c598de467410a507c16d3b2a99a80edfd4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/1f4f4c598de467410a507c16d3b2a99a80edfd4d/fsdump.py
|
|
try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try:
|
if hasattr(signal, 'SIGXFSZ'): signal.signal(signal.SIGXFSZ, signal.SIG_IGN) if hasattr(signal, 'SIGTERM'): signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) if hasattr(signal, 'SIGUSR2'):
|
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: signal.signal(signal.SIGUSR2, rotate_logs_handler) except: pass
|
6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3/start.py
|
except: pass
|
def setup_signals(storages): try: import signal except ImportError: return try: xfsz = signal.SIFXFSZ except AttributeError: pass else: signal.signal(xfsz, signal.SIG_IGN) signal.signal(signal.SIGTERM, lambda sig, frame: shutdown(storages)) signal.signal(signal.SIGHUP, lambda sig, frame: shutdown(storages, 0)) try: signal.signal(signal.SIGUSR2, rotate_logs_handler) except: pass
|
6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3/start.py
|
|
import signal signal.signal(signal.SIGHUP, rotate_logs_handler)
|
def rotate_logs_handler(signum, frame): rotate_logs() import signal signal.signal(signal.SIGHUP, rotate_logs_handler)
|
6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/6827c2809d8f5e12cd2e9e16d948fed3d3d6c8d3/start.py
|
|
"""getExtensionMethods
|
"""getExtensionMethods
|
def getExtensionMethods(self): """getExtensionMethods
|
e7f02120c290fdebb7ad7a241c43804a06e4e1a3 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/e7f02120c290fdebb7ad7a241c43804a06e4e1a3/BaseStorage.py
|
if v < 0: v=t32-v
|
if v < 0: v=t32+v
|
def u64(v, unpack=struct.unpack): h, v = unpack(">ii", v) if v < 0: v=t32-v if h: if h < 0: h=t32-h v=h*t32+v return v
|
28ab567c0c7426ce62c85a6ba9a3cae2bc71a843 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10048/28ab567c0c7426ce62c85a6ba9a3cae2bc71a843/utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.