rem
stringlengths
1
322k
add
stringlengths
0
2.05M
context
stringlengths
4
228k
meta
stringlengths
156
215
return self.question + self.nugget_list[random.randint(0, len(self.nugget_list))]
return self.question + self.nugget_list[random.randint(0, len(self.nugget_list) - 1)]
def getSentence(self): return self.question + self.nugget_list[random.randint(0, len(self.nugget_list))]
54ad0d407555c5733db7f137d934e31fa6becd68 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14781/54ad0d407555c5733db7f137d934e31fa6becd68/module_nugget.py
self.archive_max = 246
self.archive_max = 255
def __init__(self, archive_num): self.question = "Did you know, " #archive_max checked 13-8-2009 self.archive_max = 246 self.archive_num = archive_num if self.archive_num > self.archive_max: self.archive_num = random.randint(1, self.archive_max) self.url = "http://en.wikipedia.org/wiki/Wikipedia:Recent_additions_" + str(self.archive_num) Nuggets.__init__(self)
54ad0d407555c5733db7f137d934e31fa6becd68 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14781/54ad0d407555c5733db7f137d934e31fa6becd68/module_nugget.py
if n.address == address:
aliases = socket.getaddrinfo(n.address[0], n.address[1], socket.AF_INET, socket.SOCK_STREAM) aliases = [a[4] for a in addresses] if address in aliases:
def buildProtocol(self, address): if re.match("[^a-z]+", address.host): log.error("Kludge fix for twisted.words weirdness") fqdn = socket.getfqdn(address.host) address = (fqdn, address.port) else: address = (address.host, address.port)
a81c8367ce399ceb13cec106b510d3328774c286 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/14781/a81c8367ce399ceb13cec106b510d3328774c286/pyfibot.py
ph.sendPacket(data, addr, broadcast=True)
ph.sendPacket(send_data, addr, broadcast=True)
def cb(tries): # Ack timeout callback
0e4caa489e60610812cfd57370ae31ec3cda29e3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8524/0e4caa489e60610812cfd57370ae31ec3cda29e3/core.py
def broadcastHeader(self, kind, src_ipp, hops=64, flags=0):
def broadcastHeader(self, kind, src_ipp, hops=32, flags=0):
def broadcastHeader(self, kind, src_ipp, hops=64, flags=0): # Build the header used for all broadcast packets packet = [kind] packet.append(self.main.osm.me.ipp) packet.append(struct.pack('!BB', hops, flags)) packet.append(src_ipp) return packet
0e4caa489e60610812cfd57370ae31ec3cda29e3 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8524/0e4caa489e60610812cfd57370ae31ec3cda29e3/core.py
self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + SipConf.my_address
self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + str(SipConf.my_address)
def __init__(self, body = None): SipGenericHF.__init__(self, body) self.parsed = True if body == None: self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + SipConf.my_address
23c9cd79b4126717763b6ec9124e33fd8eef446f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8853/23c9cd79b4126717763b6ec9124e33fd8eef446f/SipCallId.py
self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + SipConf.my_address
self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + str(SipConf.my_address)
def genCallId(self): self.body = md5(str((random() * 1000000000L) + time())).hexdigest() + '@' + SipConf.my_address
23c9cd79b4126717763b6ec9124e33fd8eef446f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8853/23c9cd79b4126717763b6ec9124e33fd8eef446f/SipCallId.py
elif code > 100 and code < 200 and self.ua.expire_time != None:
elif code < 200 and self.ua.expire_time != None:
def recvResponse(self, resp): body = resp.getBody() code, reason = resp.getSCode() scode = (code, reason, body) if self.ua.no_reply_timer != None: self.ua.no_reply_timer.cancel() self.ua.no_reply_timer = None if code == 100 and self.ua.no_progress_time != None: self.ua.no_progress_timer = TimeoutAbs(self.ua.no_progress_expires, self.ua.no_progress_time) elif code > 100 and code < 200 and self.ua.expire_time != None: self.ua.expire_timer = TimeoutAbs(self.ua.expires, self.ua.expire_time) if code == 100: if self.ua.p100_ts == None: self.ua.p100_ts = resp.rtime self.ua.equeue.append(CCEventRing(scode, rtime = resp.rtime, origin = self.ua.origin)) return None if self.ua.no_progress_timer != None: self.ua.no_progress_timer.cancel() self.ua.no_progress_timer = None if code < 200 and self.ua.expire_time != None: self.ua.expire_timer = TimeoutAbs(self.ua.expires, self.ua.expire_time) if code < 200: self.ua.last_scode = code event = CCEventRing(scode, rtime = resp.rtime, origin = self.ua.origin) if body != None: if self.ua.on_remote_sdp_change != None: self.ua.on_remote_sdp_change(body, lambda x: self.ua.delayed_remote_sdp_update(event, x)) self.ua.p1xx_ts = resp.rtime return (UacStateRinging, self.ua.ring_cbs, resp.rtime, self.ua.origin, code) else: self.ua.rSDP = body.getCopy() else: self.ua.rSDP = None self.ua.equeue.append(event) self.ua.p1xx_ts = resp.rtime return (UacStateRinging, self.ua.ring_cbs, resp.rtime, self.ua.origin, code) if self.ua.expire_timer != None: self.ua.expire_timer.cancel() self.ua.expire_timer = None if code >= 200 and code < 300: if resp.countHFs('contact') > 0: self.ua.rTarget = resp.getHFBody('contact').getUrl().getCopy() self.ua.routes = [x.getCopy() for x in resp.getHFBodys('record-route')] self.ua.routes.reverse() if len(self.ua.routes) > 0: if not self.ua.routes[0].getUrl().lr: self.ua.routes.append(SipRoute(address = SipAddress(url = self.ua.rTarget.getCopy()))) self.ua.rTarget = self.ua.routes.pop(0).getUrl() self.ua.rAddr = self.ua.rTarget.getAddr() else: self.ua.rAddr = self.ua.routes[0].getAddr() else: self.ua.rAddr = self.ua.rTarget.getAddr() self.ua.rUri.setTag(resp.getHFBody('to').getTag()) event = CCEventConnect(scode, rtime = resp.rtime, origin = self.ua.origin) self.ua.startCreditTimer(resp.rtime) if body != None: if self.ua.on_remote_sdp_change != None: self.ua.on_remote_sdp_change(body, lambda x: self.ua.delayed_remote_sdp_update(event, x)) self.ua.connect_ts = resp.rtime return (UaStateConnected, self.ua.conn_cbs, resp.rtime, self.ua.origin) else: self.ua.rSDP = body.getCopy() else: self.ua.rSDP = None self.ua.equeue.append(event) self.ua.connect_ts = resp.rtime return (UaStateConnected, self.ua.conn_cbs, resp.rtime, self.ua.origin) if code in (301, 302) and resp.countHFs('contact') > 0: scode = (code, reason, body, resp.getHFBody('contact').getUrl().getCopy()) self.ua.equeue.append(CCEventRedirect(scode, rtime = resp.rtime, origin = self.ua.origin)) else: event = CCEventFail(scode, rtime = resp.rtime, origin = self.ua.origin) try: event.reason = resp.getHFBody('reason') except: pass self.ua.equeue.append(event) self.ua.disconnect_ts = resp.rtime return (UaStateFailed, self.ua.fail_cbs, resp.rtime, self.ua.origin, code)
083d47c3f1f9c417a8a290afb0688d6fbcb15edb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8853/083d47c3f1f9c417a8a290afb0688d6fbcb15edb/UacStateTrying.py
self.ua.equeqe.append(CCEventRedirect(scode, rtime = resp.rtime, origin = self.ua.origin))
self.ua.equeue.append(CCEventRedirect(scode, rtime = resp.rtime, origin = self.ua.origin))
def recvResponse(self, resp): body = resp.getBody() code, reason = resp.getSCode() scode = (code, reason, body) if code < 200: self.ua.equeue.append(CCEventRing(scode, rtime = resp.rtime, origin = self.ua.origin)) return None if code >= 200 and code < 300: event = CCEventConnect(scode, rtime = resp.rtime, origin = self.ua.origin) if body != None: if self.ua.on_remote_sdp_change != None: self.ua.on_remote_sdp_change(body, lambda x: self.ua.delayed_remote_sdp_update(event, x)) return (UaStateConnected,) else: self.ua.rSDP = body.getCopy() else: self.ua.rSDP = None self.ua.equeue.append(event) return (UaStateConnected,) if code in (301, 302) and resp.countHFs('contact') > 0: scode = (code, reason, body, resp.getHFBody('contact').getUrl().getCopy()) self.ua.equeqe.append(CCEventRedirect(scode, rtime = resp.rtime, origin = self.ua.origin)) elif code in (408, 481): # If the response for a request within a dialog is a 481 # (Call/Transaction Does Not Exist) or a 408 (Request Timeout), the UAC # SHOULD terminate the dialog. A UAC SHOULD also terminate a dialog if # no response at all is received for the request (the client # transaction would inform the TU about the timeout.) event = CCEventDisconnect(rtime = resp.rtime, origin = self.ua.origin) try: event.reason = resp.getHFBody('reason') except: pass self.ua.equeue.append(event) self.ua.cancelCreditTimer() self.ua.disconnect_ts = resp.rtime return (UaStateDisconnected, self.ua.disc_cbs, resp.rtime, self.ua.origin) else: event = CCEventFail(scode, rtime = resp.rtime, origin = self.ua.origin) try: event.reason = resp.getHFBody('reason') except: pass self.ua.equeue.append(event) return (UaStateConnected,)
c37da33c24c738b20eabe767f384a533b11d0cb0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/8853/c37da33c24c738b20eabe767f384a533b11d0cb0/UacStateUpdating.py
mmncfp.write(title + '\n\n') mmncfp.write('MMRRC mmncfp.write('Strain' + reportlib.TAB) mmncfp.write('Genotypes' + reportlib.TAB) mmncfp.write(reportlib.CRT)
mmrrcfp.write(title + '\n\n') mmrrcfp.write('MMRRC mmrrcfp.write('Strain' + reportlib.TAB) mmrrcfp.write('Genotypes' + reportlib.TAB) mmrrcfp.write(reportlib.CRT)
def mmrrc(): mmrrcfp = reportlib.init(sys.argv[0], outputdir = os.environ['QCOUTPUTDIR'], fileExt = '.mmrrc.rpt') title = 'MMRRC Strains w/ Genotype Associations where the Markers/Alleles of the Strain record\n' + \ 'do not exactly match the Markers/Alleles of the Genotype record.' mmncfp.write(title + '\n\n') mmncfp.write('MMRRC#' + reportlib.TAB) mmncfp.write('Strain' + reportlib.TAB) mmncfp.write('Genotypes' + reportlib.TAB) mmncfp.write(reportlib.CRT) # MMNC Strains w/ Genotype Associations; exclude wild type alleles db.sql('''select distinct sa.accID, s.strain, g._Genotype_key, g._Strain_key, a._Marker_key, a._Allele_key into #strains from PRB_Strain s, PRB_Strain_Genotype g, GXD_AlleleGenotype a, ALL_Allele aa, ACC_Accession sa where s.strain like "%/Mmnc" and s._Strain_key = g._Strain_key and g._Genotype_key = a._Genotype_key and a._Allele_key = aa._Allele_key and aa.isWildType = 0 and s._Strain_key = sa._Object_key and sa._MGIType_key = 10 and sa._LogicalDB_key = 38 and sa.preferred = 1 ''', None) db.sql('create index idx1 on #strains(_Strain_key)', None) printReport(mmrrcfp)
6b5b1b0d2e420fc9514b16a9a53dfd53af597e41 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/340/6b5b1b0d2e420fc9514b16a9a53dfd53af597e41/PRB_StrainJAX3.py
coords = polar_to_cartesian(self.base_x,
coords = polar_to_cartesian(self.base_x,
def to_point(self): '''Convert from PolarPoint to (cartesian) Point object'''
ce2b4b6fe9cf3418e9c7ba05c5479e2063ca2945 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/ce2b4b6fe9cf3418e9c7ba05c5479e2063ca2945/polar.py
d = ConnectDialog(self.myParent, connection_string)
d = ConnectDialog(self.myParent, str(TOPSerial))
def connect_action(self, event):
8769665e8945c23bc1da0785cf42c85fac6bf5f4 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/8769665e8945c23bc1da0785cf42c85fac6bf5f4/totalopenstation-gui.py
cs += + str(int(eval("self.option%s_value.get()" % n)))
cs += str(int(eval("self.option%s_value.get()" % n)))
def connect_action(self, event):
e13bcd56a59ec96602d1ae56413a2e14db8c7392 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/e13bcd56a59ec96602d1ae56413a2e14db8c7392/totalopenstation-gui.py
iformat = __import__('totalopenstation.models.%s' % module,
imodel = __import__('totalopenstation.models.%s' % module,
def connect_action(self, event):
e13bcd56a59ec96602d1ae56413a2e14db8c7392 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/e13bcd56a59ec96602d1ae56413a2e14db8c7392/totalopenstation-gui.py
mc = iformats.ModelConnector(chosen_port)
mc = imodel.ModelConnector(chosen_port)
def connect_action(self, event):
e13bcd56a59ec96602d1ae56413a2e14db8c7392 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/e13bcd56a59ec96602d1ae56413a2e14db8c7392/totalopenstation-gui.py
if x == '1.00' and y == '1.00' and z == '1.00': is_point = False
if x == '1.00' and y == '1.00' and z == '1.00': is_point = False
def is_point(self, line):
c487a87ce311df7c26ef360066542dcf83831fe6 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/c487a87ce311df7c26ef360066542dcf83831fe6/leica_tcr_705.py
points.append(p.to_point())
points.append(p.to_point())
def _points(self): points = [] for row in rows: fs = row.split(',') if fs[0] == 'ST': x = fs[-3] # FIXME NEZ coord order shouldn't be hardcoded y = fs[-2] z = fs[-1] bp = BasePoint(x=x, y=y, z=z, ih=0) if fs[0] == 'SS': angle = fs[4] z_angle = fs[5] dist = fs[3] th = fs[2] p = PolarPoint(dist=dist, angle=angle, z_angle=z_angle, th=th, angle_type='gon', base_point=bp) points.append(p.to_point()) return points
b195cafca142d20cbb939dd94e783a26fb9e282a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/5205/b195cafca142d20cbb939dd94e783a26fb9e282a/nikon_raw_v200.py
pass
webbrowser.open(link)
def url(dialog, link, data=None): pass
d424f4badedea026e86b750a7af75c76e8fe8c34 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/d424f4badedea026e86b750a7af75c76e8fe8c34/main.py
tmp = "/tmp" for file in os.listdir(tmp): try: if file.startswith("Flash"): filepath = os.path.join(tmp, file)
for filepath in commands.getoutput("pgrep -f flashplayer | xargs -I PID find /proc/PID/fd -lname '/tmp/Flash*'").split("\n"): if filepath != "": try:
def _check_for_Flash(self): try: tmp = "/tmp" ## look for filenames that begin with 'Flash' for file in os.listdir(tmp): try: if file.startswith("Flash"): filepath = os.path.join(tmp, file) duration = utils.getFLVLength(filepath)
a84893f830ac0afc45ec7b6a342c7e128c06e74d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/a84893f830ac0afc45ec7b6a342c7e128c06e74d/core.py
except Exception, data: logging.error("Exception: " + str(data))
except Exception, data: logging.error("Exception: " + str(data))
def _check_for_Flash(self): try: tmp = "/tmp" ## look for filenames that begin with 'Flash' for file in os.listdir(tmp): try: if file.startswith("Flash"): filepath = os.path.join(tmp, file) duration = utils.getFLVLength(filepath)
a84893f830ac0afc45ec7b6a342c7e128c06e74d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/a84893f830ac0afc45ec7b6a342c7e128c06e74d/core.py
TIMER_OPTIONS_LIST = [(_("5 minutes"), 300.0), (_("10 minutes"), 600.0), (_("15 minutes"), 900.0), (_("30 minutes"), 1800.0), (_("1 hour"), 3600.0), (_("2 hours"), 7200.0), (_("3 hours"), 10800.0), (_("4 hours"), 14400.0)] time_menuitem = get("time_menuitem") submenu = gtk.Menu() for label, t in TIMER_OPTIONS_LIST: menuItem = gtk.MenuItem(label=label) menuItem.connect('activate', self.on_time_submenuitem_activate, t) submenu.append(menuItem) separator = gtk.SeparatorMenuItem() submenu.append(separator) menuItem = gtk.MenuItem(label=_("Other...")) menuItem.connect('activate', self.on_other_submenuitem_activate) submenu.append(menuItem) time_menuitem.set_submenu(submenu) submenu.show_all()
def __init__(self): self.Core = core.Caffeine()
11ecae0b360dfa542f7f4abcc0350bd1db7f3add /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/11ecae0b360dfa542f7f4abcc0350bd1db7f3add/main.py
def on_about_button_clicked (self, button, data=None): response = self.about_dialog.run() self.about_dialog.hide()
def on_about_button_clicked (self, button, data=None):
11ecae0b360dfa542f7f4abcc0350bd1db7f3add /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/11ecae0b360dfa542f7f4abcc0350bd1db7f3add/main.py
def on_time_submenuitem_activate(self, menuitem, time): self.timedActivation(time)
def on_time_menuitem_activate(self, menuitem, data=None): self.othertime_dialog.show_all()
def on_time_submenuitem_activate(self, menuitem, time):
11ecae0b360dfa542f7f4abcc0350bd1db7f3add /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/11ecae0b360dfa542f7f4abcc0350bd1db7f3add/main.py
def _run_dialog(self):
def on_about_menuitem_activate(self, menuitem, data=None): if appindicator_avail: gtk.gdk.threads_enter() self.about_dialog.set_position (gtk.WIN_POS_CENTER_ALWAYS)
def _run_dialog(self): response = self.about_dialog.run() self.about_dialog.destroy() return False
11ecae0b360dfa542f7f4abcc0350bd1db7f3add /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/11ecae0b360dfa542f7f4abcc0350bd1db7f3add/main.py
self.about_dialog.destroy() return False def on_about_menuitem_activate(self, menuitem, data=None): gobject.idle_add(self._run_dialog) def on_other_submenuitem_activate(self, menuitem, data=None): self.othertime_dialog.show_all()
self.about_dialog.hide() if appindicator_avail: gtk.gdk.threads_leave()
def _run_dialog(self): response = self.about_dialog.run() self.about_dialog.destroy() return False
11ecae0b360dfa542f7f4abcc0350bd1db7f3add /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/11ecae0b360dfa542f7f4abcc0350bd1db7f3add/main.py
self.Core.setActivated(True)
self.Core.setActivated(False)
def quit(self): ### Do anything that needs to be done before quitting. logging.info("Caffeine is preparing to quit")
941357d61d3ff9036ff03944f1d0b4ad6797a889 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/1563/941357d61d3ff9036ff03944f1d0b4ad6797a889/main.py
inode = self.inode_read(ino) inode["uid"] = uid inode["gid"] = gid
if uid == -1 and gid == -1: return inode = self.inode_read(ino) if uid != -1: inode["uid"] = uid if gid != -1: inode["gid"] = gid if inode["mode"] & (S_IXUSR|S_IXGRP|S_IXOTH): inode["mode"] &= ~(S_ISUID|S_ISGID) inode.set_time_fields(change=True)
def os_chown(self, path, uid, gid): ino = self.ino_from_path(path) inode = self.inode_read(ino) inode["uid"] = uid inode["gid"] = gid self.inode_write(ino, inode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"])
def _rmdir(self, pino, name): offset, dirent = self._scandir(pino, name) ino = dirent["ino"] inode = self.inode_read(ino)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
dirent["ino"], inode["nlink"])
ino, inode["nlink"]) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) else: try: self.dir_iter(ino, inode).next() except StopIteration: pass else: raise ClfsError(ENOTEMPTY)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
self.chain_shorten(dirent["ino"], 0)
self.chain_shorten(ino, 0)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
dirinode = self.inode_read(dirino)
self.inode_write(ino, Inode()) pinode = self.inode_read(pino)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
pinode["nlink"] -= 1 self.dir_remove(pino, offset, pinode) self.inode_write(pino, pinode)
def _rmdir(self, dirino, name): offset, dirent = self._scandir(dirino, name) inode = self.inode_read(dirent["ino"]) if not S_ISDIR(inode["mode"]): raise ClfsError(ENOTDIR) if inode["nlink"] > 2: raise ClfsError(ENOTEMPTY) if inode["nlink"] < 2: logger.error( "Directory inode %i has invalid link count %i", dirent["ino"], inode["nlink"]) # free all the clusters of the directory self.chain_shorten(dirent["ino"], 0) # this is now invalidated and useless del inode dirinode = self.inode_read(dirino) # we're removing a subdir dirinode["nlink"] -= 1 self.dir_remove(dirino, offset, dirinode) self.inode_write(dirino, dirinode)
092f290bc5197295a6329f7d14fedc254e561243 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/092f290bc5197295a6329f7d14fedc254e561243/clfs.py
class ClfsError(Exception):
class ClfsError(OSError):
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
a84a2929a18f9791e9b3c2a51556bedfe767a67c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a84a2929a18f9791e9b3c2a51556bedfe767a67c/clfs.py
self.errno = errno
OSError.__init__(self, errno, strerror(errno))
def __init__(self, errno): self.errno = errno
a84a2929a18f9791e9b3c2a51556bedfe767a67c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a84a2929a18f9791e9b3c2a51556bedfe767a67c/clfs.py
def create_node(self, path, type): #pdb.set_trace() node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0, links=1) assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
a84a2929a18f9791e9b3c2a51556bedfe767a67c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a84a2929a18f9791e9b3c2a51556bedfe767a67c/clfs.py
new_inode = Inode(type=type, size=0, links=1)
new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1
def create_node(self, path, type): #pdb.set_trace() node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0, links=1) assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
a84a2929a18f9791e9b3c2a51556bedfe767a67c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a84a2929a18f9791e9b3c2a51556bedfe767a67c/clfs.py
doc.write("Players sorted by descending protection zone lock time remaining. Also shown is their level, vocation, guild, and most recent victim.")
doc.add_tag("p", data="Players sorted by descending protection zone lock time remaining. Also shown is their level, vocation, guild, and most recent victim. Shown first are those that are still PZL'd. The second table contains those that should have lost their PZL by now.") column_count = 6 if not world: column_count += 1
def pz_locked(doc, pageContext): world = pageContext.get_selected_world() curtime = int(time.time()) limits = (0, 30) #limits = (0, 200) doc.write("Players sorted by descending protection zone lock time remaining. Also shown is their level, vocation, guild, and most recent victim.") with stattab_table_tag(doc.open_tag): with doc.open_tag("tr", inline=False): if not world: doc.add_tag("th", "World") doc.add_tag("th", "PZ Lock End") doc.add_tag("th", "Killer") doc.add_tag("th", "Level") doc.add_tag("th", "Vocation") doc.add_tag("th", "Guild") doc.add_tag("th", "Last Victim") rowColor = stattab_row_class() for pzlock in dbiface.get_last_pzlocks(world, limits): killerInfo = dbiface.get_char(pzlock["killer"]) #pdb.set_trace() pzEndStamp = dbiface.pz_end(pzlock) if world is None or killerInfo["world"] == world: rowAttrs = {"class": rowColor.next()} if pzEndStamp < int(time.time()): rowAttrs["class"] += " greyed" with doc.open_tag("tr", attrs=rowAttrs, inline=False): assert killerInfo["name"] == pzlock["killer"] if not world: doc.add_tag("td", killerInfo["world"]) doc.add_tag("td", human_time_diff(pzEndStamp)) doc.add_tag("td", char_link(pzlock["killer"])) for field in ("level", "vocation"): doc.add_tag("td", killerInfo[field]) doc.add_tag("td", pageContext.guild_link(killerInfo["guild"])) doc.add_tag("td", char_link(pzlock["victim"]))
e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d/pages.py
with doc.open_tag("tr", inline=False): if not world: doc.add_tag("th", "World") doc.add_tag("th", "PZ Lock End") doc.add_tag("th", "Killer") doc.add_tag("th", "Level") doc.add_tag("th", "Vocation") doc.add_tag("th", "Guild") doc.add_tag("th", "Last Victim")
def add_header_row(): with doc.open_tag("tr", inline=False): if not world: doc.add_tag("th", "World") doc.add_tag("th", "PZ Lock End") doc.add_tag("th", "Killer") doc.add_tag("th", "Level") doc.add_tag("th", "Vocation") doc.add_tag("th", "Guild") doc.add_tag("th", "Last Victim") add_header_row()
def pz_locked(doc, pageContext): world = pageContext.get_selected_world() curtime = int(time.time()) limits = (0, 30) #limits = (0, 200) doc.write("Players sorted by descending protection zone lock time remaining. Also shown is their level, vocation, guild, and most recent victim.") with stattab_table_tag(doc.open_tag): with doc.open_tag("tr", inline=False): if not world: doc.add_tag("th", "World") doc.add_tag("th", "PZ Lock End") doc.add_tag("th", "Killer") doc.add_tag("th", "Level") doc.add_tag("th", "Vocation") doc.add_tag("th", "Guild") doc.add_tag("th", "Last Victim") rowColor = stattab_row_class() for pzlock in dbiface.get_last_pzlocks(world, limits): killerInfo = dbiface.get_char(pzlock["killer"]) #pdb.set_trace() pzEndStamp = dbiface.pz_end(pzlock) if world is None or killerInfo["world"] == world: rowAttrs = {"class": rowColor.next()} if pzEndStamp < int(time.time()): rowAttrs["class"] += " greyed" with doc.open_tag("tr", attrs=rowAttrs, inline=False): assert killerInfo["name"] == pzlock["killer"] if not world: doc.add_tag("td", killerInfo["world"]) doc.add_tag("td", human_time_diff(pzEndStamp)) doc.add_tag("td", char_link(pzlock["killer"])) for field in ("level", "vocation"): doc.add_tag("td", killerInfo[field]) doc.add_tag("td", pageContext.guild_link(killerInfo["guild"])) doc.add_tag("td", char_link(pzlock["victim"]))
e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d/pages.py
if pzEndStamp < int(time.time()):
if not doing_still_pzlocked_rows:
def pz_locked(doc, pageContext): world = pageContext.get_selected_world() curtime = int(time.time()) limits = (0, 30) #limits = (0, 200) doc.write("Players sorted by descending protection zone lock time remaining. Also shown is their level, vocation, guild, and most recent victim.") with stattab_table_tag(doc.open_tag): with doc.open_tag("tr", inline=False): if not world: doc.add_tag("th", "World") doc.add_tag("th", "PZ Lock End") doc.add_tag("th", "Killer") doc.add_tag("th", "Level") doc.add_tag("th", "Vocation") doc.add_tag("th", "Guild") doc.add_tag("th", "Last Victim") rowColor = stattab_row_class() for pzlock in dbiface.get_last_pzlocks(world, limits): killerInfo = dbiface.get_char(pzlock["killer"]) #pdb.set_trace() pzEndStamp = dbiface.pz_end(pzlock) if world is None or killerInfo["world"] == world: rowAttrs = {"class": rowColor.next()} if pzEndStamp < int(time.time()): rowAttrs["class"] += " greyed" with doc.open_tag("tr", attrs=rowAttrs, inline=False): assert killerInfo["name"] == pzlock["killer"] if not world: doc.add_tag("td", killerInfo["world"]) doc.add_tag("td", human_time_diff(pzEndStamp)) doc.add_tag("td", char_link(pzlock["killer"])) for field in ("level", "vocation"): doc.add_tag("td", killerInfo[field]) doc.add_tag("td", pageContext.guild_link(killerInfo["guild"])) doc.add_tag("td", char_link(pzlock["victim"]))
e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/e8c01a7a2c0aee291735f4d3cf0f1bf1195fef6d/pages.py
import curses class TermInfo(): _ANSI_COLORS = """BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE""".split() _STRING_CAPS = """NORMAL=sgr0""".split() def __init__(self, stream=sys.stdout): curses.setupterm(None, stream.fileno()) for prefix, capname in [("FG_", "setaf"), ("BG_", "setab")]: for index, color in zip(range(len(self._ANSI_COLORS)), self._ANSI_COLORS): setattr(self, prefix + color, curses.tparm(curses.tigetstr(capname), index)) for strcap in self._STRING_CAPS: attr, capname = strcap.split("=") setattr(self, attr, curses.tigetstr(capname)) self.stream = stream def reset(self): self.immediate(self.NORMAL) def immediate(self, tistr): self.stream.write(tistr) self.stream.flush() def save_color(self): return None def set_color(self, color): self.immediate(getattr(self, "FG_" + color.upper())) def reset_color(self): self.reset() return TermInfo
try: import curses except ImportError: pass else: class TermInfo(): _ANSI_COLORS = """BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE""".split() _STRING_CAPS = """NORMAL=sgr0""".split() def __init__(self, stream=sys.stdout): curses.setupterm(None, stream.fileno()) for prefix, capname in [("FG_", "setaf"), ("BG_", "setab")]: for index, color in zip(range(len(self._ANSI_COLORS)), self._ANSI_COLORS): setattr(self, prefix + color, curses.tparm(curses.tigetstr(capname), index)) for strcap in self._STRING_CAPS: attr, capname = strcap.split("=") setattr(self, attr, curses.tigetstr(capname)) self.stream = stream def reset(self): self.immediate(self.NORMAL) def immediate(self, tistr): self.stream.write(tistr) self.stream.flush() def save_color(self): return None def set_color(self, color): self.immediate(getattr(self, "FG_" + color.upper())) def reset_color(self): self.reset() return TermInfo
def curses_color(): import curses class TermInfo(): # this. is. ANSIIIII!!! _ANSI_COLORS = """BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE""".split() _STRING_CAPS = """NORMAL=sgr0""".split() def __init__(self, stream=sys.stdout): # isatty might be needed here? curses.setupterm(None, stream.fileno()) for prefix, capname in [("FG_", "setaf"), ("BG_", "setab")]: for index, color in zip(range(len(self._ANSI_COLORS)), self._ANSI_COLORS): setattr(self, prefix + color, curses.tparm(curses.tigetstr(capname), index)) for strcap in self._STRING_CAPS: attr, capname = strcap.split("=") setattr(self, attr, curses.tigetstr(capname)) self.stream = stream #def __del__(self): # self.reset() def reset(self): self.immediate(self.NORMAL) def immediate(self, tistr): self.stream.write(tistr) self.stream.flush() def save_color(self): return None def set_color(self, color): self.immediate(getattr(self, "FG_" + color.upper())) def reset_color(self): #self.immediate(self.__color) self.reset() return TermInfo
5d26939fc8b6d54f9088b896d20c46709f4e4dcc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/5d26939fc8b6d54f9088b896d20c46709f4e4dcc/termclr.py
try: import curses except ImportError: import ctypes try: from ctypes import wintypes finally: pass return windows_color()
for a in [curses_color, windows_color]: b = a() if b != None: return b
def select_color(): try: import curses except ImportError: import ctypes try: from ctypes import wintypes finally: pass return windows_color() else: return curses_color() assert False
5d26939fc8b6d54f9088b896d20c46709f4e4dcc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/5d26939fc8b6d54f9088b896d20c46709f4e4dcc/termclr.py
return curses_color() assert False
assert False
def select_color(): try: import curses except ImportError: import ctypes try: from ctypes import wintypes finally: pass return windows_color() else: return curses_color() assert False
5d26939fc8b6d54f9088b896d20c46709f4e4dcc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/5d26939fc8b6d54f9088b896d20c46709f4e4dcc/termclr.py
def __get__(self, instance, owner): #assert instance == None, instance return property.__get__(self, owner)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
assert a[2] != None return collections.namedtuple(
assert a[2] != None, "None is reserved for indicating uninitialized fields" return namedtuple(
def ClfsStructField(*args): a = list(args) if len(a) < 3: a.append(None) else: assert a[2] != None return collections.namedtuple( "ClfsStructField", ("name", "format", "initval") )(*a)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
__metaclass__ = ClfsStructType _fields_ = ()
__metaclass__ = ClfsStructType _fields_ = ()
def all_fields(): for base in bases: if hasattr(base, "fields"): for field in base.fields: yield field for a in attrs["_fields_"]: yield ClfsStructField(*a)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
def from_fileobj(class_, fileobj): return class_.unpack(fileobj.read(class_.size))
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
assert len(unpacked) == 1 instance[field.name] = unpacked[0]
if len(unpacked): instance[field.name], = unpacked
def unpack(class_, buffer): instance = class_() offset = 0 for field in instance.fields: #assert field.name in instance.__values unpacked = struct.unpack_from(field.format, buffer, offset) assert len(unpacked) == 1 instance[field.name] = unpacked[0] #instance.__values[field.name] = unpacked[0] offset += struct.calcsize(field.format) assert offset == len(buffer) return instance
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
assert field.name in self.__values, "Field %r is uninitialized" % (field.name,) value = self.__values[field.name]
def pack(self): buffer = "" for field in self.fields: assert field.name in self.__values, "Field %r is uninitialized" % (field.name,) value = self.__values[field.name] try: buffer += struct.pack(field.format, value) except struct.error as exc: raise struct.error("Error packing %r into %s.%s, %s" % (value, self.__class__.__name__, field.name, exc.message)) assert len(buffer) == self.size return buffer
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
buffer += struct.pack(field.format, value)
value = (self.__values[field.name],) except KeyError: value = () try: buffer += struct.pack(field.format, *value)
def pack(self): buffer = "" for field in self.fields: assert field.name in self.__values, "Field %r is uninitialized" % (field.name,) value = self.__values[field.name] try: buffer += struct.pack(field.format, value) except struct.error as exc: raise struct.error("Error packing %r into %s.%s, %s" % (value, self.__class__.__name__, field.name, exc.message)) assert len(buffer) == self.size return buffer
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
("inode", "I"),)
("ino", "I"),) assert DirEntry.size == 128, DirEntry.size
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
("type", "B"),
("mode", "I"), ("__pad0", "4x"),
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
("links", "I"),)
("nlink", "I"), ("uid", "I"), ("gid", "I"), ("rdev", "Q"), ("atime", "I"), ("atimens", "I"), ("mtime", "I"), ("mtimens", "I"), ("ctime", "I"), ("ctimens", "I"), ("__pad1", "68x"),) def get_st_times(self): return dict(( ("st_" + a, self[a] + self[a + "ns"] / (10 ** 9)) for a in (b + "time" for b in "amc"))) assert Inode.size == 128, Inode.size
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
assert BootRecord.size <= 256
assert BootRecord.size <= 256, "This will clobber the root directory entry"
def __repr__(self): return "<{0} {1}>".format(self.__class__.__name__, self.__values)
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0)
def __init__(self, path): self.f = open(path, "r+b")
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.master_region_cluster_count = br["mstrclrs"] self.allocation_table_cluster_count = br["atabclrs"] self.data_region_cluster_count = br["dataclrs"] self.filesystem_cluster_count = \ self.master_region_cluster_count + \ self.allocation_table_cluster_count + \ self.data_region_cluster_count
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
self.filesystem_cluster_count = \ self.master_region_cluster_count + \
@property def filesystem_cluster_count(self): return self.master_region_cluster_count + \
def __init__(self, path=None, fileobj=None): assert bool(path) ^ bool(fileobj) if path: self.f = open(path, "r+b") else: self.f = fileobj self.f.seek(0) br = BootRecord.from_fileobj(self.f) assert br["ident"].rstrip("\0") == "clfs", repr(br["ident"]) assert br["version"] == 1 self.cluster_size = br["clrsize"] self.master_region_cluster_count = br["mstrclrs"] self.allocation_table_cluster_count = br["atabclrs"] self.data_region_cluster_count = br["dataclrs"] self.filesystem_cluster_count = \ self.master_region_cluster_count + \ self.allocation_table_cluster_count + \ self.data_region_cluster_count
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
if inode_struct["type"] != TYPE_DIRECTORY:
if not S_ISDIR(inode_struct["mode"]):
def read_directory(self, inode): inode_struct = self.get_inode_struct(inode) if inode_struct["type"] != TYPE_DIRECTORY: raise ClfsError(ENOTDIR) offset = 0 while offset < inode_struct["size"]: dirent = DirEntry.unpack(self.read_inode_data( inode, offset, DirEntry.size)) if dirent["name"].rstrip("\0"): yield dirent offset += dirent.size
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
for dirent in self.read_directory(cur_dirent["inode"]):
for dirent in self.read_directory(cur_dirent["ino"]):
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
def iter_allocation_table(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): yield struct.unpack("I", self.f.read(4))[0] def claim_free_cluster(self): self.seek_cluster(self.master_region_cluster_count) for index in xrange(self.data_region_cluster_count): cluster = struct.unpack("I", self.f.read(4))[0] if cluster == CLUSTER_FREE: self.f.seek(-4, os.SEEK_CUR) self.f.write(struct.pack("I", CLUSTER_END_OF_CHAIN)) return index + self.master_region_cluster_count + self.allocation_table_cluster_count else: assert False, "Filesystem is full?" def first_data_region_cluster_number(self): return self.master_region_cluster_count + self.allocation_table_cluster_count def valid_data_region_cluster_number(self, clno): return self.first_data_region_cluster_number() \ <= clno \ < self.filesystem_cluster_count def seek_cluster_number(self, clno): assert self.valid_data_region_cluster_number(clno), clno self.safe_seek(self.cluster_size * self.master_region_cluster_count + 4 * (clno - self.first_data_region_cluster_number())) def set_cluster_number(self, clno, value): self.seek_cluster_number(clno) logging.debug("Setting cluster number %i->%i", clno, value) self.f.write(struct.pack("I", value))
dirent_for_path = get_dir_entry
def get_dir_entry(self, path): for name in path.split("/"): if not name: cur_dirent = self.get_root_dir_entry() else: # pdb.set_trace() for dirent in self.read_directory(cur_dirent["inode"]): if dirent["name"].rstrip("\0") == name: cur_dirent = dirent break else: raise ClfsError(ENOENT) return cur_dirent
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode)
def write_inode_data(self, ino, offset, buffer): inode_struct = self.get_inode_struct(ino)
#def write(self, path, buf, offset):
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
inode,
ino,
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + max(offset + write_size, inode_struct["size"]) assert new_size == expected_size, (new_size, expected_size) #assert new_size == # just update it anyway, i'll have to update times too inode_struct["size"] = new_size - data_offset #pdb.set_trace() assert (inode_struct.size, new_size) == self.write_to_chain( inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset return write_size
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset
ino, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(ino)["size"] == new_size - data_offset
def write_inode_data(self, inode, offset, buffer): inode_struct = self.get_inode_struct(inode) data_offset = inode_struct.size write_size, new_size = self.write_to_chain( inode, inode_struct["size"] + data_offset, offset + data_offset, buffer) assert write_size == len(buffer), write_size expected_size = data_offset + max(offset + write_size, inode_struct["size"]) assert new_size == expected_size, (new_size, expected_size) #assert new_size == # just update it anyway, i'll have to update times too inode_struct["size"] = new_size - data_offset #pdb.set_trace() assert (inode_struct.size, new_size) == self.write_to_chain( inode, new_size, 0, inode_struct.pack()) assert self.get_inode_struct(inode)["size"] == new_size - data_offset return write_size
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
def create_node(self, path, type):
def create_node(self, path, mode): """Create an allocate a new inode, update relevant structures elsewhere"""
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster())
create_rootdir = bool( (not node_basename) and (node_dirname == parent_dirname == "/")) if create_rootdir: assert S_ISDIR(mode) new_inode = Inode(size=0, uid=0, gid=0, rdev=0, mode=mode) sec, nsec = time_as_posix_spec(time()) for field_name in ("atime", "mtime", "ctime"): new_inode[field_name] = sec for field_name in ("atimens", "mtimens", "ctimens"): new_inode[field_name] = nsec del sec, nsec if S_ISDIR(mode): new_inode["nlink"] = 2 else: new_inode["nlink"] = 1 new_dirent = DirEntry(ino=self.claim_free_cluster()) if create_rootdir: new_dirent["name"] = "/" assert new_dirent["ino"] == self.first_data_region_cluster_number, new_dirent["ino"] else: parent_ino = self.dirent_for_path(node_dirname)["ino"] for sibling_dirent in self.read_directory(parent_ino): if sibling_dirent["name"] == node_basename: raise ClfsError(EEXIST) else: new_dirent["name"] = node_basename assert (new_inode.size, new_inode.size) == self.write_to_chain( cluster=new_dirent["ino"], size=0, offset=0, buffer=new_inode.pack())
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack()) def create_filesystem(device_path): device_file = open(device_path, "r+b") device_size = os.fstat(device_file.fileno()).st_size
if create_rootdir: self.seek_root_dirent() self.f.write(new_dirent.pack()) else: assert new_dirent.size == self.write_inode_data( ino=parent_ino, offset=self.get_inode_struct(parent_ino)["size"], buffer=new_dirent.pack(),) def generate_bootrecord(device_size):
def create_node(self, path, type): node_dirname, node_basename = os.path.split(path) parent_dirname, parent_basename = os.path.split(node_dirname) parent_dirent = self.get_dir_entry(node_dirname) parent_inode_struct = self.get_inode_struct(parent_dirent["inode"]) for dirent in self.read_directory(parent_dirent["inode"]): if dirent["name"].rstrip("\0") == node_basename: raise ClfsError(EEXIST) new_dirent = DirEntry(name=node_basename, inode=self.claim_free_cluster()) # write the new dirent at the end of the parent directory assert new_dirent.size == self.write_inode_data( parent_dirent["inode"], parent_inode_struct["size"], new_dirent.pack(),) # initialize the new inode #pdb.set_trace() new_inode = Inode(type=type, size=0) if type == TYPE_DIRECTORY: new_inode["links"] = 2 elif type == TYPE_REGULAR_FILE: new_inode["links"] = 1 assert (new_inode.size, new_inode.size) == self.write_to_chain( new_dirent["inode"], 0, 0, new_inode.pack())
a2d8f4bc0f5dd94eba8767e5d77b1beb78415227 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/a2d8f4bc0f5dd94eba8767e5d77b1beb78415227/clfs.py
gtk.STOCK_NEW, gtk.RESPONSE_OK,
gtk.STOCK_OK, gtk.RESPONSE_OK,
def add_share(self, button): namelbl = gtk.Label("Share name:")
30b8e466313264e796bc0cdbbf29133edd27f53b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/30b8e466313264e796bc0cdbbf29133edd27f53b/lanshare-gtk.py
subprocess.check_call(["xdg-open", url])
try: subprocess.check_call(["xdg-open", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(None, "explore", url, None, None, SW_SHOWNORMAL)
aa8d0df17029f62f89138533b9d720ad3637a2ca /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/aa8d0df17029f62f89138533b9d720ad3637a2ca/lanshare-gtk.py
subprocess.check_call(["nautilus", url])
try: subprocess.check_call(["nautilus", url]) except subprocess.CalledProcessError as exc: print exc else: return
def browse_peer_by_url(self, url): """Open the given peer URL with the most natural file manager for the current platform that we can find""" import os, subprocess if os.name == "nt": # this is how it's done programmatically? except that it won't invoke # the default file manager (explorer) on winders #ShellExecute(None, "explore", url, None, None, SW_SHOWNORMAL)
aa8d0df17029f62f89138533b9d720ad3637a2ca /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/aa8d0df17029f62f89138533b9d720ad3637a2ca/lanshare-gtk.py
cluster_read_size = min(read_size, self.cluster_size - read_offset)
cluster_read_size = min(read_size, self.cluster_size - read_offset, chain_size)
def read_from_chain(self, first_cluster, chain_size, read_offset, read_size): if chain_size <= 0: return "" #assert read_offset + read_size <= chain_size, (read_offset, read_size, chain_size) if read_offset > self.cluster_size: return self.read_from_chain( self.next_cluster(first_cluster), chain_size - self.cluster_size, read_offset - self.cluster_size, read_size) cluster_read_size = min(read_size, self.cluster_size - read_offset) buffer = self.read_cluster(first_cluster, read_offset, cluster_read_size) assert len(buffer) == cluster_read_size return buffer + self.read_from_chain( self.next_cluster(first_cluster), chain_size - self.cluster_size, 0, read_size - cluster_read_size)
3f4785d30f23e0a51e7ffe033ff2807ba7950baf /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/4811/3f4785d30f23e0a51e7ffe033ff2807ba7950baf/clfs.py
elif a.name in nopub_actions:
if a.name in nopub_actions:
def trans_include(repo_uri, fargs, transaction=None): basedirs = [] timestamp_files = [] error_occurred = False opts, pargs = getopt.getopt(fargs, "d:T:") for opt, arg in opts: if opt == "-d": basedirs.append(arg) elif opt == "-T": timestamp_files.append(arg) if transaction == None: try: trans_id = os.environ["PKG_TRANS_ID"] except KeyError: usage(_("No transaction ID specified in $PKG_TRANS_ID"), cmd="include") xport, pub = setup_transport_and_pubs(repo_uri) t = trans.Transaction(repo_uri, trans_id=trans_id, xport=xport, pub=pub) else: t = transaction if not pargs: filelist = [("<stdin>", sys.stdin)] else: try: filelist = [(f, file(f)) for f in pargs] except IOError, e: error(e, cmd="include") return 1 lines = [] # giant string of all input files concatenated together linecnts = [] # tuples of starting line number, ending line number linecounter = 0 # running total for filename, f in filelist: try: data = f.read() except IOError, e: error(e, cmd="include") return 1 lines.append(data) linecnt = len(data.splitlines()) linecnts.append((linecounter, linecounter + linecnt)) linecounter += linecnt m = pkg.manifest.Manifest() try: m.set_content("\n".join(lines)) except apx.InvalidPackageErrors, err: e = err.errors[0] lineno = e.lineno for i, tup in enumerate(linecnts): if lineno > tup[0] and lineno <= tup[1]: filename = filelist[i][0] lineno -= tup[0] break else: filename = "???" lineno = "???" error(_("File %s line %s: %s") % (filename, lineno, e), cmd="include") return 1 invalid_action = False for a in m.gen_actions(): # don't publish this action if a.name == "set" and a.attrs["name"] in ["pkg.fmri", "fmri"]: continue elif a.name == "file": path, bd = pkg.actions.set_action_data( a.hash, a, basedirs) basename = os.path.basename(a.attrs["path"]) for pattern in timestamp_files: if fnmatch.fnmatch(basename, pattern): ts = pkg.misc.time_to_timestamp( os.stat(path).st_mtime) a.attrs["timestamp"] = ts break elif a.name == "license": pkg.actions.set_action_data(a.hash, a, basedirs) elif a.name in nopub_actions: error(_("invalid action for publication: %s") % str(a), cmd="include") invalid_action = True else: t.add(a) if invalid_action: return 3 else: return 0
3a9017a9005277592b7e9176ca034ce4f0f56657 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/3a9017a9005277592b7e9176ca034ce4f0f56657/publish.py
self.pkgsend_bulk(durl, self.example_pkg10)
self.pkgsend_bulk(durl, self.dup_lines_pkg10)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024/t_pkg_search.py
self.pkg("search 'example_pkg:set:pkg.fmri:'")
self.pkg("search 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024/t_pkg_search.py
self.pkg("search -o pkg.shortfmri '*6*'")
self.pkg("search -o pkg.shortfmri 'a'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024/t_pkg_search.py
self.pkg("install example_pkg") self.pkg("search -l 'example_pkg:set:pkg.fmri:'")
self.pkg("install dup_lines") self.pkg("search -l 'dup_lines:set:pkg.fmri:'")
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024/t_pkg_search.py
self.pkg("search -l -o pkg.shortfmri,action.key '*6*'") expected_number_of_lines = 9 if "pkg.fmri" in self.output: expected_number_of_lines += 1 self.debug("Expected number of lines:%s" % expected_number_of_lines) self.assertEqual(len(self.output.splitlines()), expected_number_of_lines)
self.pkg("search -l -o pkg.shortfmri,action.key 'a'") self.assertEqual(len(self.output.splitlines()), 4)
def test_bug_12271_14088(self): """Check that consecutive duplicate lines are removed and that having a single option to -o still prints the header."""
91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/91ef234d0a5ec5f6b7bb4f3b824ac58d17cae024/t_pkg_search.py
("show_on_expected_fail", 'f',
("showonexpectedfail", 'f',
def run(self): # nuke everything print("deleting " + dist_dir) shutil.rmtree(dist_dir, True) print("deleting " + build_dir) shutil.rmtree(build_dir, True) print("deleting " + root_dir) shutil.rmtree(root_dir, True) print("deleting " + pkgs_dir) shutil.rmtree(pkgs_dir, True) print("deleting " + extern_dir) shutil.rmtree(extern_dir, True)
242bb923ba8d0e60fae68b0e07beb32219c36f39 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/242bb923ba8d0e60fae68b0e07beb32219c36f39/setup.py
self.show_on_expected_fail = 0
self.showonexpectedfail = 0
def initialize_options(self): self.only = "" self.baselinefile = "" self.verbosemode = 0 self.parseable = 0 self.genbaseline = 0 self.timing = 0 self.coverage = 0 self.stoponerr = 0 self.debugoutput = 0 self.show_on_expected_fail = 0 self.startattest = "" self.archivedir = ""
242bb923ba8d0e60fae68b0e07beb32219c36f39 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/242bb923ba8d0e60fae68b0e07beb32219c36f39/setup.py
remote_publishers=True)
remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr/share/locale") global_settings.client_name = "pkgrecv" target = os.environ.get("PKG_DEST", None) src_uri = os.environ.get("PKG_SRC", None) try: opts, pargs = getopt.getopt(sys.argv[1:], "c:d:hkm:nrs:") except getopt.GetoptError, e: usage(_("Illegal option -- %s") % e.opt) for opt, arg in opts: if opt == "-c": cache_dir = arg elif opt == "-d": target = arg elif opt == "-h": usage(retcode=0) elif opt == "-k": keep_compressed = True elif opt == "-n": list_newest = True elif opt == "-r": recursive = True elif opt == "-s": src_uri = arg elif opt == "-m": if arg == "all-timestamps": all_timestamps = True elif arg == "all-versions": all_versions = True else: usage(_("Illegal option value -- %s") % arg) if not src_uri: usage(_("a source repository must be provided")) if not cache_dir: cache_dir = tempfile.mkdtemp(dir=temp_root) # Only clean-up cache dir if implicitly created by pkgrecv. # User's cache-dirs should be preserved tmpdirs.append(cache_dir) incoming_dir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(incoming_dir) # Create transport and transport config xport, xport_cfg = transport.setup_transport() xport_cfg.cached_download_dir = cache_dir xport_cfg.incoming_download_dir = incoming_dir # Since publication destionations may only have one repository # configured per publisher, create destination as separate transport # in case source and destination have identical publisher configuration # but different repository endpoints. dest_xport, dest_xport_cfg = transport.setup_transport() dest_xport_cfg.cached_download_dir = cache_dir dest_xport_cfg.incoming_download_dir = incoming_dir # Configure src publisher src_pub = transport.setup_publisher(src_uri, "source", xport, xport_cfg, remote_publishers=True) tracker = get_tracker() if list_newest: if pargs or len(pargs) > 0: usage(_("-n takes no options")) fmri_list = fetch_catalog(src_pub, tracker, xport) list_newest_fmris(fmri_list) return 0 if pargs == None or len(pargs) == 0: usage(_("must specify at least one pkgfmri")) republish = False if not target: target = basedir = os.getcwd() elif target.find("://") != -1: basedir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(basedir) republish = True targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) # Files have to be decompressed for republishing. keep_compressed = False if target.startswith("file://"): # Check to see if the repository exists first. try: t = trans.Transaction(target, xport=dest_xport, pub=targ_pub) except trans.TransactionRepositoryInvalidError, e: txt = str(e) + "\n\n" txt += _("To create a repository, use the " "pkgsend command.") abort(err=txt) except trans.TransactionRepositoryConfigError, e: txt = str(e) + "\n\n" txt += _("The repository configuration for " "the repository located at '%s' is not " "valid or the specified path does not " "exist. Please correct the configuration " "of the repository or create a new " "one.") % target abort(err=txt) except trans.TransactionError, e: abort(err=e) else: basedir = target if not os.path.exists(basedir): try: os.makedirs(basedir, misc.PKG_DIR_MODE) except: error(_("Unable to create basedir '%s'.") % \ basedir) return 1 xport_cfg.pkgdir = basedir if republish: targ_fmris = fetch_catalog(targ_pub, tracker, dest_xport) all_fmris = fetch_catalog(src_pub, tracker, xport) fmri_arguments = pargs fmri_list = prune(list(set(expand_matching_fmris(all_fmris, fmri_arguments))), all_versions, all_timestamps) if recursive: msg(_("Retrieving manifests for dependency evaluation ...")) tracker.evaluate_start() fmri_list = prune(get_dependencies(src_uri, fmri_list, basedir, tracker), all_versions, all_timestamps) tracker.evaluate_done() def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), "")) # First, retrieve the manifests and calculate package transfer sizes. npkgs = len(fmri_list) nfiles = 0 nbytes = 0 if not recursive: msg(_("Retrieving manifests for package evaluation ...")) tracker.evaluate_start(npkgs=npkgs) retrieve_list = [] while fmri_list: f = fmri_list.pop() if republish and f in targ_fmris: msg(_("Skipping %s: already present " "at destination") % f) continue m = get_manifest(f, basedir) pkgdir = os.path.join(basedir, f.get_dir_path()) mfile = xport.multi_file_ni(src_pub, pkgdir, not keep_compressed, tracker) nf, nb = add_hashes_to_multi(m, mfile) nfiles += nf nbytes += nb retrieve_list.append((f, mfile)) tracker.evaluate_progress(fmri=f) tracker.evaluate_done() # Next, retrieve and store the content for each package. msg(_("Retrieving package content ...")) tracker.download_set_goal(len(retrieve_list), nfiles, nbytes) publish_list = [] while retrieve_list: f, mfile = retrieve_list.pop() tracker.download_start_pkg(f.get_fmri(include_scheme=False)) if mfile: mfile.wait_files() if not download_start: download_start = True if republish: publish_list.append(f) tracker.download_end_pkg() tracker.download_done() tracker.reset() # Finally, republish the packages if needed. while publish_list: f = publish_list.pop() msg(_("Republishing %s ...") % f) m = get_manifest(f, basedir) # Get first line of original manifest so that inclusion of the # scheme can be determined. use_scheme = True contents = get_manifest(f, basedir, contents=True) if contents.splitlines()[0].find("pkg:/") == -1: use_scheme = False pkg_name = f.get_fmri(include_scheme=use_scheme) pkgdir = os.path.join(basedir, f.get_dir_path()) # This is needed so any previous failures for a package # can be aborted. trans_id = get_basename(f) if not targ_pub: targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) try: t = trans.Transaction(target, pkg_name=pkg_name, trans_id=trans_id, xport=dest_xport, pub=targ_pub) # Remove any previous failed attempt to # to republish this package. try: t.close(abandon=True) except: # It might not exist already. pass t.open() for a in m.gen_actions(): if a.name == "set" and \ a.attrs.get("name", "") in ("fmri", "pkg.fmri"): # To be consistent with the server, # the fmri can't be added to the # manifest. continue if hasattr(a, "hash"): fname = os.path.join(pkgdir, a.hash) a.data = lambda: open(fname, "rb") t.add(a) t.close() except trans.TransactionError, e: abort(err=e) return 1 # Dump all temporary data. cleanup() return 0
54a1907958e2c6b602122542c2e59adf94d2cefb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/54a1907958e2c6b602122542c2e59adf94d2cefb/pull.py
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def main_func(): global cache_dir, download_start, xport, xport_cfg all_timestamps = False all_versions = False keep_compressed = False list_newest = False recursive = False src_uri = None target = None incoming_dir = None src_pub = None targ_pub = None temp_root = misc.config_temp_root() gettext.install("pkg", "/usr/share/locale") global_settings.client_name = "pkgrecv" target = os.environ.get("PKG_DEST", None) src_uri = os.environ.get("PKG_SRC", None) try: opts, pargs = getopt.getopt(sys.argv[1:], "c:d:hkm:nrs:") except getopt.GetoptError, e: usage(_("Illegal option -- %s") % e.opt) for opt, arg in opts: if opt == "-c": cache_dir = arg elif opt == "-d": target = arg elif opt == "-h": usage(retcode=0) elif opt == "-k": keep_compressed = True elif opt == "-n": list_newest = True elif opt == "-r": recursive = True elif opt == "-s": src_uri = arg elif opt == "-m": if arg == "all-timestamps": all_timestamps = True elif arg == "all-versions": all_versions = True else: usage(_("Illegal option value -- %s") % arg) if not src_uri: usage(_("a source repository must be provided")) if not cache_dir: cache_dir = tempfile.mkdtemp(dir=temp_root) # Only clean-up cache dir if implicitly created by pkgrecv. # User's cache-dirs should be preserved tmpdirs.append(cache_dir) incoming_dir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(incoming_dir) # Create transport and transport config xport, xport_cfg = transport.setup_transport() xport_cfg.cached_download_dir = cache_dir xport_cfg.incoming_download_dir = incoming_dir # Since publication destionations may only have one repository # configured per publisher, create destination as separate transport # in case source and destination have identical publisher configuration # but different repository endpoints. dest_xport, dest_xport_cfg = transport.setup_transport() dest_xport_cfg.cached_download_dir = cache_dir dest_xport_cfg.incoming_download_dir = incoming_dir # Configure src publisher src_pub = transport.setup_publisher(src_uri, "source", xport, xport_cfg, remote_publishers=True) tracker = get_tracker() if list_newest: if pargs or len(pargs) > 0: usage(_("-n takes no options")) fmri_list = fetch_catalog(src_pub, tracker, xport) list_newest_fmris(fmri_list) return 0 if pargs == None or len(pargs) == 0: usage(_("must specify at least one pkgfmri")) republish = False if not target: target = basedir = os.getcwd() elif target.find("://") != -1: basedir = tempfile.mkdtemp(dir=temp_root) tmpdirs.append(basedir) republish = True targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) # Files have to be decompressed for republishing. keep_compressed = False if target.startswith("file://"): # Check to see if the repository exists first. try: t = trans.Transaction(target, xport=dest_xport, pub=targ_pub) except trans.TransactionRepositoryInvalidError, e: txt = str(e) + "\n\n" txt += _("To create a repository, use the " "pkgsend command.") abort(err=txt) except trans.TransactionRepositoryConfigError, e: txt = str(e) + "\n\n" txt += _("The repository configuration for " "the repository located at '%s' is not " "valid or the specified path does not " "exist. Please correct the configuration " "of the repository or create a new " "one.") % target abort(err=txt) except trans.TransactionError, e: abort(err=e) else: basedir = target if not os.path.exists(basedir): try: os.makedirs(basedir, misc.PKG_DIR_MODE) except: error(_("Unable to create basedir '%s'.") % \ basedir) return 1 xport_cfg.pkgdir = basedir if republish: targ_fmris = fetch_catalog(targ_pub, tracker, dest_xport) all_fmris = fetch_catalog(src_pub, tracker, xport) fmri_arguments = pargs fmri_list = prune(list(set(expand_matching_fmris(all_fmris, fmri_arguments))), all_versions, all_timestamps) if recursive: msg(_("Retrieving manifests for dependency evaluation ...")) tracker.evaluate_start() fmri_list = prune(get_dependencies(src_uri, fmri_list, basedir, tracker), all_versions, all_timestamps) tracker.evaluate_done() def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), "")) # First, retrieve the manifests and calculate package transfer sizes. npkgs = len(fmri_list) nfiles = 0 nbytes = 0 if not recursive: msg(_("Retrieving manifests for package evaluation ...")) tracker.evaluate_start(npkgs=npkgs) retrieve_list = [] while fmri_list: f = fmri_list.pop() if republish and f in targ_fmris: msg(_("Skipping %s: already present " "at destination") % f) continue m = get_manifest(f, basedir) pkgdir = os.path.join(basedir, f.get_dir_path()) mfile = xport.multi_file_ni(src_pub, pkgdir, not keep_compressed, tracker) nf, nb = add_hashes_to_multi(m, mfile) nfiles += nf nbytes += nb retrieve_list.append((f, mfile)) tracker.evaluate_progress(fmri=f) tracker.evaluate_done() # Next, retrieve and store the content for each package. msg(_("Retrieving package content ...")) tracker.download_set_goal(len(retrieve_list), nfiles, nbytes) publish_list = [] while retrieve_list: f, mfile = retrieve_list.pop() tracker.download_start_pkg(f.get_fmri(include_scheme=False)) if mfile: mfile.wait_files() if not download_start: download_start = True if republish: publish_list.append(f) tracker.download_end_pkg() tracker.download_done() tracker.reset() # Finally, republish the packages if needed. while publish_list: f = publish_list.pop() msg(_("Republishing %s ...") % f) m = get_manifest(f, basedir) # Get first line of original manifest so that inclusion of the # scheme can be determined. use_scheme = True contents = get_manifest(f, basedir, contents=True) if contents.splitlines()[0].find("pkg:/") == -1: use_scheme = False pkg_name = f.get_fmri(include_scheme=use_scheme) pkgdir = os.path.join(basedir, f.get_dir_path()) # This is needed so any previous failures for a package # can be aborted. trans_id = get_basename(f) if not targ_pub: targ_pub = transport.setup_publisher(target, "target", dest_xport, dest_xport_cfg, remote_publishers=True) try: t = trans.Transaction(target, pkg_name=pkg_name, trans_id=trans_id, xport=dest_xport, pub=targ_pub) # Remove any previous failed attempt to # to republish this package. try: t.close(abandon=True) except: # It might not exist already. pass t.open() for a in m.gen_actions(): if a.name == "set" and \ a.attrs.get("name", "") in ("fmri", "pkg.fmri"): # To be consistent with the server, # the fmri can't be added to the # manifest. continue if hasattr(a, "hash"): fname = os.path.join(pkgdir, a.hash) a.data = lambda: open(fname, "rb") t.add(a) t.close() except trans.TransactionError, e: abort(err=e) return 1 # Dump all temporary data. cleanup() return 0
54a1907958e2c6b602122542c2e59adf94d2cefb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/54a1907958e2c6b602122542c2e59adf94d2cefb/pull.py
dest_xport, dest_xport_cfg, remote_publishers=True)
dest_xport, dest_xport_cfg, remote_prefix=True)
def get_basename(pfmri): open_time = pfmri.get_timestamp() return "%d_%s" % \ (calendar.timegm(open_time.utctimetuple()), urllib.quote(str(pfmri), ""))
54a1907958e2c6b602122542c2e59adf94d2cefb /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/54a1907958e2c6b602122542c2e59adf94d2cefb/pull.py
publisher_info = self._get_getpublisherinfo(pub,
publisher_info = self._get_publisherinfo(pub,
def get_publisherinfo(self, pub, ccancel=None): """Given a publisher pub, return the publisher/0 information in a StringIO object."""
6fbad57afaa5940f0edc8324799eeb6f528ee06d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/6fbad57afaa5940f0edc8324799eeb6f528ee06d/transport.py
if not_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name not in not_these_pkgs ) else: newpkgs = set(pkgdict.values())
def main_func(): global file_repo global def_branch global def_repo global def_vers global extra_entire_contents global just_these_pkgs global not_these_pkgs global nopublish global publish_all global print_pkg_names global reference_uris global show_debug global wos_path global not_these_consolidations global curpkg try: _opts, _args = getopt.getopt(sys.argv[1:], "AB:C:D:E:I:J:G:NR:T:b:dj:m:ns:v:w:p:") except getopt.GetoptError, _e: print "unknown option", _e.opt sys.exit(1) g_proto_area = os.environ.get("ROOT", "") for opt, arg in _opts: if opt == "-b": def_branch = arg.rstrip("abcdefghijklmnopqrstuvwxyz") elif opt == "-d": show_debug = True elif opt == "-j": # means we're using the new argument form... just_these_pkgs.append(arg) elif opt == "-m": _a = arg.split("=", 1) set_macro(_a[0], _a[1]) elif opt == "-n": nopublish = True elif opt == "-p": if not os.path.exists(arg): raise RuntimeError("Invalid prototype area specified.") # Clean up relative ../../, etc. out of path to proto g_proto_area = os.path.realpath(arg) elif opt == "-s": def_repo = arg if def_repo.startswith("file://"): file_repo = True elif opt == "-v": def_vers = arg elif opt == "-w": wos_path.append(arg) elif opt == "-A": # Always publish obsoleted and renamed packages. publish_all = True elif opt == "-B": branch_file = file(arg) for _line in branch_file: if not _line.startswith("#"): bfargs = _line.split() if len(bfargs) == 2: branch_dict[bfargs[0]] = bfargs[1] branch_file.close() elif opt == "-C": not_these_consolidations.append(arg) elif opt == "-D": elided_files[arg] = True elif opt == "-E": if "@" not in arg: print "-E fmris require a version: %s" % arg sys.exit(2) extra_entire_contents.append(arg) elif opt == "-I": include_path.extend(arg.split(":")) elif opt == "-J": not_these_pkgs.append(arg) elif opt == "-G": #another file of global includes global_includes.append(arg) elif opt == "-N": print_pkg_names = True elif opt == "-R": reference_uris.append(arg) elif opt == "-T": timestamp_files.append(arg) if not def_branch: print "need a branch id (build number)" sys.exit(2) elif "." not in def_branch: print "branch id needs to be of the form 'x.y'" sys.exit(2) if not _args: print "need argument!" sys.exit(2) if not wos_path: wos_path = def_wos_path if just_these_pkgs: filelist = _args else: filelist = _args[0:1] just_these_pkgs = _args[1:] if print_pkg_names: for _mf in filelist: SolarisParse(_mf) sys.exit(0) start_time = time.clock() print "First pass: initial import", datetime.now() for _mf in filelist: SolarisParse(_mf) # Remove pkgs we're not touching because we're skipping that # consolidation pkgs_to_elide = [ p.name for p in pkgdict.values() if p.consolidation in not_these_consolidations ] for pkg in pkgs_to_elide: del pkgdict[pkg] for pkg in not_these_pkgs: del pkgdict[pkg] # Unless we are publishing all obsolete and renamed packages # (-A command line option), remove obsolete and renamed packages # that weren't obsoleted or renamed at this branch and create # a dictionary (called or_pkgs_per_con) of obsoleted and renamed # packages per consolidation. The version portion of the fmri # will contain the branch that the package was obsoleted or renamed at. or_pkgs_per_con = {} obs_or_renamed_pkgs = {} for pkg in pkgdict.keys(): obs_branch = pkgdict[pkg].obsolete_branch rename_branch = pkgdict[pkg].rename_branch ver_tokens = pkgdict[pkg].version.split(".") cons = pkgdict[pkg].consolidation if obs_branch: ver_tokens[-1] = obs_branch ver_string = ".".join(ver_tokens) or_pkgs_per_con.setdefault(cons, {})[pkg] = ver_string obs_or_renamed_pkgs[pkg] = (pkgdict[pkg].fmristr(), "obsolete") if publish_all: pkgdict[pkg].version = ver_string else: if obs_branch != def_branch.split(".")[1]: # Not publishing this obsolete package. del pkgdict[pkg] if rename_branch: ver_tokens[-1] = rename_branch ver_string = ".".join(ver_tokens) or_pkgs_per_con.setdefault(cons, {})[pkg] = ver_string obs_or_renamed_pkgs[pkg] = (pkgdict[pkg].fmristr(), "renamed") if publish_all: pkgdict[pkg].version = ver_string else: if rename_branch != def_branch.split(".")[1]: # Not publishing this renamed package. del pkgdict[pkg] print "Second pass: global crosschecks", datetime.now() # perform global crosschecks # path_dict.clear() for pkg in pkgdict.values(): for action in pkg.actions: if "path" not in action.attrs: continue path = action.attrs["path"] path_dict.setdefault(path, []).append(action) if action.name in ["file", "link", "hardlink"]: basename_dict.setdefault(os.path.basename(path), []).append(action) pkgpath_dict.setdefault(path, []).append(action.attrs["importer.ipspkg"]) errors = check_pathdict_actions(path_dict) if errors: for e in errors: print "Fail: %s" % e sys.exit(1) # check for require dependencies on obsolete or renamed pkgs errors = [] warns = [] for pack in pkgdict.values(): for action in pack.actions: if action.name != "depend": continue if action.attrs["type"] == "require" and "fmri" in action.attrs: fmri = action.attrs["fmri"].split("@")[0] # remove version if fmri.startswith("pkg:/"): # remove pkg:/ if exists fmri = fmri[5:] if fmri in obs_or_renamed_pkgs: tup = obs_or_renamed_pkgs[fmri] s = "Pkg %s has 'require' dependency on pkg %s, which is %s" % ( (pack.fmristr(),) + tup) if tup[1] == "obsolete": errors.append(s) else: warns.append(s) if warns: for w in warns: print "Warn: %s" % w if errors: for e in errors: print "Fail: %s" % e sys.exit(1) print "packages being published are self consistent" if reference_uris: print "downloading and checking external references" excludes = [variant.Variants({"variant.arch": get_arch()}).allow_action] for uri in reference_uris: server, fmri_string = uri.split("@", 1) for pfmri in get_dependencies(server, [fmri_string]): if pfmri.get_name() in pkgdict: continue # ignore pkgs already seen pfmri_str = "%s@%s" % (pfmri.get_name(), pfmri.get_version()) fmridict[pfmri.get_name()] = pfmri_str for action in get_manifest(server, pfmri).gen_actions(excludes): if "path" not in action.attrs: continue if action.name == "unknown": # we don't care about unknown actions - # mispublished packages with eg. SVR4 # pkginfo files result in duplicate paths, # causing errors in check_pathdict_actions # "Multiple actions on different types # with the same path" print "INFO: ignoring action in %s: %s" \ % (pfmri_str, str(action)) continue action.attrs["importer.ipspkg"] = pfmri_str path_dict.setdefault(action.attrs["path"], []).append(action) if action.name in ["file", "link", "hardlink"]: basename_dict.setdefault(os.path.basename( action.attrs["path"]), []).append(action) pkgpath_dict.setdefault(action.attrs["path"], []).append(action.attrs["importer.ipspkg"]) errors = check_pathdict_actions(path_dict) if errors: for e in errors: print "Fail: %s" % e sys.exit(1) print "external packages checked for conflicts" print "Third pass: dependency id, resolution and publication", datetime.now() consolidation_incorporations = [] obsoleted_renamed_pkgs = [] # Generate consolidation incorporations for cons in cons_dict.keys(): if cons in not_these_consolidations: print "skipping consolidation %s" % cons continue consolidation_incorporation = "consolidation/%s/%s-incorporation" % ( cons, cons) consolidation_incorporations.append(consolidation_incorporation) curpkg = start_package(consolidation_incorporation) curpkg.summary = "%s consolidation incorporation" % cons curpkg.desc = "This incorporation constrains packages " \ "from the %s consolidation." % cons # Add packages that aren't renamed or obsoleted or_pkgs = or_pkgs_per_con.get(cons, {}) curpkg.actions.append(actions.fromstr( "set name=pkg.depend.install-hold value=core-os.%s" % cons)) for depend in cons_dict[cons]: if depend not in or_pkgs: action = actions.fromstr( "depend fmri=%s type=incorporate" % depend) action.attrs["importer.source"] = "depend" curpkg.actions.append(action) # Add in the obsoleted and renamed packages for this # consolidation. for name, version in or_pkgs.iteritems(): action = actions.fromstr( "depend fmri=%s@%s type=incorporate" % (name, version)) action.attrs["importer.source"] = "depend" curpkg.actions.append(action) obsoleted_renamed_pkgs.append("%s@%s" % (name, version)) action = actions.fromstr("set " \ "name=org.opensolaris.consolidation value=%s" % cons) action.attrs["importer.source"] = "add" curpkg.actions.append(action) end_package(curpkg) curpkg = None # Generate entire consolidation if we're generating any consolidation incorps if consolidation_incorporations: curpkg = start_package("entire") curpkg.summary = "incorporation to lock all system packages to same build" curpkg.desc = "This package constrains " \ "system package versions to the same build. WARNING: Proper " \ "system update and correct package selection depend on the " \ "presence of this incorporation. Removing this package will " \ "result in an unsupported system." curpkg.actions.append(actions.fromstr( "set name=pkg.depend.install-hold value=core-os")) for incorp in consolidation_incorporations: action = actions.fromstr("depend fmri=%s type=incorporate" % incorp) action.attrs["importer.source"] = "auto-generated" curpkg.actions.append(action) action = actions.fromstr("depend fmri=%s type=require" % incorp) action.attrs["importer.source"] = "auto-generated" action.attrs["importer.no-version"] = "true" curpkg.actions.append(action) for extra in extra_entire_contents: action = actions.fromstr("depend fmri=%s type=incorporate" % extra) action.attrs["importer.source"] = "command-line" curpkg.actions.append(action) extra_noversion = extra.split("@")[0] # remove version action = actions.fromstr("depend fmri=%s type=require" % extra_noversion) action.attrs["importer.source"] = "command-line" action.attrs["importer.no-version"] = "true" curpkg.actions.append(action) end_package(curpkg) curpkg = None incorporated_pkgs = set([ f for l in cons_dict.values() for f in l ]) incorporated_pkgs |= set(consolidation_incorporations) incorporated_pkgs |= set(["entire", "redistributable"]) incorporated_pkgs |= set(obsoleted_renamed_pkgs) unincorps = set(pkgdict.keys()) - incorporated_pkgs if unincorps: # look through these; if they have only set actions they're # ancient obsoleted pkgs - ignore them. for f in unincorps.copy(): for a in pkgdict[f].actions: if a.name != "set": break else: unincorps.remove(f) print "The following non-empty unincorporated pkgs are not part of any consolidation" for f in unincorps: print f if just_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name in just_these_pkgs ) else: newpkgs = set(pkgdict.values()) if not_these_pkgs: newpkgs = set(pkgdict[name] for name in pkgdict.keys() if name not in not_these_pkgs ) else: newpkgs = set(pkgdict.values()) if not_these_consolidations: newpkgs = set([ p for p in newpkgs if not p.delivered_via_ips() ]) processed = 0 total = len(newpkgs) error_count = 0 for _p in sorted(newpkgs): if show_debug: print " Version:", _p.version print " Description:", _p.desc print " Summary:", _p.summary print " Classification:", ",".join(_p.classification) try: publish_pkg(_p) except trans.TransactionError, _e: print "%s: FAILED: %s\n" % (_p.name, _e) error_count += 1 processed += 1 if show_debug: print "%d/%d packages processed; %.2f%% complete" % (processed, total, processed * 100.0 / total) if error_count: print "%d/%d packages has errors; %.2f%% FAILED" % (error_count, total, error_count * 100.0 / total) sys.exit(1) print "%d/%d packages processed; %.2f%% complete" % (processed, total, processed * 100.0 / total) if file_repo: code = repo_add_content(def_repo[7:], g_proto_area) if code: sys.exit(code) print "Done:", datetime.now() elapsed = time.clock() - start_time print "publication took %d:%.2d" % (elapsed/60, elapsed % 60) sys.exit(0)
b1f10eebb48129ad84215e065bc71d0e78f95e14 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/b1f10eebb48129ad84215e065bc71d0e78f95e14/importer.py
pub_name = "opensolaris.org"
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch.") is_zone = False pub_name = "opensolaris.org" refresh_allowed = True # create a temporary image api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME, CLIENT_API_VERSION, image_dir, pkg.client.api.IMG_TYPE_USER, is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name, progtrack=tracker, refresh_allowed=refresh_allowed, repo_uri=server_url) api_inst = pkg.client.api.ImageInterface(image_dir, pkg.client.api.CURRENT_API_VERSION, tracker, None, PKG_CLIENT_NAME) # restore the current directory, which ImageInterace had changed os.chdir(dir) searches = [] fmris = set() case_sensitive = False return_actions = True query = [] for manifest_loc in manifest_locations: query.append(pkg.client.api.Query(":directory:path:/%s" % manifest_loc, case_sensitive, return_actions)) searches.append(api_inst.remote_search(query)) shutil.rmtree(image_dir, True) for item in searches: for result in item: pfmri = None try: query_num, pub, (v, return_type, tmp) = result pfmri, index, action = tmp except ValueError: raise if pfmri is None: continue if filter in pfmri.get_fmri(): fmris.add(pfmri.get_fmri()) return [pkg.fmri.PkgFmri(pfmri) for pfmri in fmris]
83542e965a5266300e5272b3b75fa5c2619b8722 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/83542e965a5266300e5272b3b75fa5c2619b8722/importer.py
is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name,
is_zone, facets=pkg.facet.Facets(), force=False,
def get_smf_packages(server_url, manifest_locations, filter): """ Performs a search against server_url looking for packages which contain SMF manifests, returning a list of those pfmris """ dir = os.getcwd() tracker = pkg.client.progress.QuietProgressTracker() image_dir = tempfile.mkdtemp("", "pkg_importer_smfsearch.") is_zone = False pub_name = "opensolaris.org" refresh_allowed = True # create a temporary image api_inst = pkg.client.api.image_create(PKG_CLIENT_NAME, CLIENT_API_VERSION, image_dir, pkg.client.api.IMG_TYPE_USER, is_zone, facets=pkg.facet.Facets(), force=False, prefix=pub_name, progtrack=tracker, refresh_allowed=refresh_allowed, repo_uri=server_url) api_inst = pkg.client.api.ImageInterface(image_dir, pkg.client.api.CURRENT_API_VERSION, tracker, None, PKG_CLIENT_NAME) # restore the current directory, which ImageInterace had changed os.chdir(dir) searches = [] fmris = set() case_sensitive = False return_actions = True query = [] for manifest_loc in manifest_locations: query.append(pkg.client.api.Query(":directory:path:/%s" % manifest_loc, case_sensitive, return_actions)) searches.append(api_inst.remote_search(query)) shutil.rmtree(image_dir, True) for item in searches: for result in item: pfmri = None try: query_num, pub, (v, return_type, tmp) = result pfmri, index, action = tmp except ValueError: raise if pfmri is None: continue if filter in pfmri.get_fmri(): fmris.add(pfmri.get_fmri()) return [pkg.fmri.PkgFmri(pfmri) for pfmri in fmris]
83542e965a5266300e5272b3b75fa5c2619b8722 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12612/83542e965a5266300e5272b3b75fa5c2619b8722/importer.py
self.write(cursor, user, id, {db_field: value}, context=context)
self.write(cursor, user, id, { db_field: datetime.datetime.combine(value, datetime.time()), }, context=context)
def set_function_fields(self, cursor, user, id, name, value, arg, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
37977ffd4cba2a2b2be5d263a7cf423a44376bec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/37977ffd4cba2a2b2be5d263a7cf423a44376bec/work.py
date = datetime(
date = datetime.datetime(
def add_minutes(self, cursor, user, company, date, minutes, context=None): minutes = int(round(minutes)) minutes = date.minute + minutes
37977ffd4cba2a2b2be5d263a7cf423a44376bec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/37977ffd4cba2a2b2be5d263a7cf423a44376bec/work.py
date = datetime(
date = datetime.datetime(
def add_hours(self, cursor, user, company, date, hours, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
37977ffd4cba2a2b2be5d263a7cf423a44376bec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/37977ffd4cba2a2b2be5d263a7cf423a44376bec/work.py
date += timedelta(days= -date.weekday() + intfloor(days))
date += datetime.timedelta(days= -date.weekday() + intfloor(days))
def add_days(self, cursor, user, company, date, days, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
37977ffd4cba2a2b2be5d263a7cf423a44376bec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/37977ffd4cba2a2b2be5d263a7cf423a44376bec/work.py
date += timedelta(days= 7 * intfloor(weeks))
date += datetime.timedelta(days= 7 * intfloor(weeks))
def add_weeks(self, cursor, user, company, date, weeks, context=None): day_per_week = company.hours_per_work_week / company.hours_per_work_day
37977ffd4cba2a2b2be5d263a7cf423a44376bec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/37977ffd4cba2a2b2be5d263a7cf423a44376bec/work.py
cursor = Transation().cursor
cursor = Transaction().cursor
def get_function_fields(self, ids, names): ''' Function to compute function fields
aa1ac136bfb24c7141d4e7e5ffdd33d607c7e831 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/aa1ac136bfb24c7141d4e7e5ffdd33d607c7e831/work.py
self.assertRaises(Exception, test_view('project_plan'))
test_view('project_plan')
def test0005views(self): ''' Test views. ''' self.assertRaises(Exception, test_view('project_plan'))
6656c57581da791845e6a3d9ac19b087c74ef2cc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/6656c57581da791845e6a3d9ac19b087c74ef2cc/test_project_plan.py
db_field: datetime.datetime.combine(value, datetime.time()),
db_field: value \ and datetime.datetime.combine(value, datetime.time()) \ or False,
def set_function_fields(self, cursor, user, ids, name, value, context=None): request_obj = self.pool.get('res.request') req_ref_obj = self.pool.get('res.request.reference')
f75243f23751ae4199ad6e7c83024f4f28feac44 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9282/f75243f23751ae4199ad6e7c83024f4f28feac44/work.py
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.jobStart(), self.jobEnd(), self.getTime(), self.getCost(), self.getState()))
85670ddefd36ac8ee776022693db06ecef7e459c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/85670ddefd36ac8ee776022693db06ecef7e459c/job.py
self.__ticket_id = ticket_id
self.__ticket_id = ticket_id
def __init__(self, ticket_id, id, mgr): """Initialize a new task.""" TaskFSM.__init__(self) self.mtx = self.fsm.getLock()
8bc87625dde3bef611c4b5e7b8cb4ecc84db90be /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/8bc87625dde3bef611c4b5e7b8cb4ecc84db90be/task.py
self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
if self.__spool is not None: self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
def cleanUp(self): """cleans up the task, i.e. removes the task's spool directory""" self.log.info("removing spool directory") from xbe.util import removeDirCompletely removeDirCompletely(self.__spool)
8bc87625dde3bef611c4b5e7b8cb4ecc84db90be /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/8bc87625dde3bef611c4b5e7b8cb4ecc84db90be/task.py
return self.__fsm.getState().getName()
try: return self.__fsm.getState().getName() except: return "Terminated"
def getState(self): return self.__fsm.getState().getName()
0fdc405db8c912602a57671c288f8a55d184b85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/0fdc405db8c912602a57671c288f8a55d184b85f/job.py
log.debug("Event '%s' not found." % event)
log.error("Event '%s' not found." % event)
def do_Event(self, event, reqCtxt): log.debug("JOB '%s' run in state '%s' event '%s'" % (self.ticket(), self.__fsm.getState().getName(), event)) if hasattr(self.__fsm, event): log.debug("Run event '%s'" % event) getattr(self.__fsm, event)(self, reqCtxt) else: log.debug("Event '%s' not found." % event) raise CommandFailed("jobFSM: No such Transition '%s'." % event)
0fdc405db8c912602a57671c288f8a55d184b85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/0fdc405db8c912602a57671c288f8a55d184b85f/job.py
log.debug("=========>do_Event '%s' run in state '%s' even [%s]" %
log.debug("=========>do_Event '%s' run in state '%s' event [%s]" %
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "Finished" : "closeJob_Closing",
0fdc405db8c912602a57671c288f8a55d184b85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/0fdc405db8c912602a57671c288f8a55d184b85f/job.py
log.debug("FAILED: %s." % e) else: log.debug("Event '%s' not found." % event)
log.error("FAILED: %s." % e) else: log.error("Event '%s' not found." % event)
def do_EventByMap(self, eventkey, reqCtxt): eventMap = { "Pending:Reserved" : 1, "Pending:Confirmed" : "confirm", "Running:Stage-In" : "runJob_StageIn", "Running:Instance-Starting" : "", "Running:Executing" : "runJob_Execute", "Running:Stage-Out" : "runJob_StageOut", "Running:Instance-Stopping" : "", "Finished" : "closeJob_Closing",
0fdc405db8c912602a57671c288f8a55d184b85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/0fdc405db8c912602a57671c288f8a55d184b85f/job.py
log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
log.debug("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState())) return True
def log_job_closed(self): log.info("Job finished: Ticket:%s Task:%s User:%s Start:%f End:%f Time:%f Price:%f State:%s" % (self.ticket(), self.task(), self.getUser(), self.getStart(), self.getEnd(), self.getTime(), self.getCost(), self.getState()))
0fdc405db8c912602a57671c288f8a55d184b85f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2459/0fdc405db8c912602a57671c288f8a55d184b85f/job.py