rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
self.driver.glr_lighting_disable() | def draw_tls_surface(self, Lx_eigen_vec, Lx_eigen_val, Lx_rho, Lx_pitch): """Draws the TLS probability surface for a single non-intersecting screw axis. Lx_eigen_val is the vaiance (mean square deviation MSD) of the rotation about the Lx_eigen_vec axis. """ ## create a unique list of bonds which will be used to ## render the TLS surface; this list may be passed in a argument ## to avoid multiple calculations for each screw-rotation axis bond_list = [] in_dict = {} | 0779a1d6628e6acba115b7ba9004fcb5efdcc8b7 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/0779a1d6628e6acba115b7ba9004fcb5efdcc8b7/TLS.py |
|
arec2["serial"] = arec1["serial"] arec2["chainID"] = arec1["chainID"] arec2["resName"] = arec1["resName"] arec2["resSeq"] = arec1["resSeq"] arec2["iCode"] = arec1["iCode"] arec2["name"] = arec1["name"] arec2["altLoc"] = arec1["altLoc"] arec2["element"] = arec1["element"] arec2["charge"] = arec1["charge"] | if arec1.has_key("serial"): arec2["serial"] = arec1["serial"] if arec1.has_key("chainID"): arec2["chainID"] = arec1["chainID"] if arec1.has_key("resName"): arec2["resName"] = arec1["resName"] if arec1.has_key("resSeq"): arec2["resSeq"] = arec1["resSeq"] if arec1.has_key("iCode"): arec2["iCode"] = arec1["iCode"] if arec1.has_key("name"): arec2["name"] = arec1["name"] if arec1.has_key("altLoc"): arec2["altLoc"] = arec1["altLoc"] if arec1.has_key("element"): arec2["element"] = arec1["element"] if arec1.has_key("charge"): arec2["charge"] = arec1["charge"] | def atom_common(arec1, arec2): arec2["serial"] = arec1["serial"] arec2["chainID"] = arec1["chainID"] arec2["resName"] = arec1["resName"] arec2["resSeq"] = arec1["resSeq"] arec2["iCode"] = arec1["iCode"] arec2["name"] = arec1["name"] arec2["altLoc"] = arec1["altLoc"] arec2["element"] = arec1["element"] arec2["charge"] = arec1["charge"] | 4a2bf3d6cbf5fd3c4e404b8b49e62876f1cec5ff /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4a2bf3d6cbf5fd3c4e404b8b49e62876f1cec5ff/PDBBuilder.py |
("sgroup", 56, 66, "string", "rjust", None), ("z", 67, 70, "integer", "rjust", None)] | ("sgroup", 56, 66, "string", "ljust", None), ("z", 67, 70, "integer", "ljust", None)] | def process(self, recs): """Returns a dictionary with attributes chain_id, num_res, and sequence_list """ seqres = {} | 6c2d587c1af39ee7b76fbae9bc6e8722151de2ca /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/6c2d587c1af39ee7b76fbae9bc6e8722151de2ca/PDB.py |
assert atm.occupancy >= 0.0 and atm.occupancy < 1.0 | assert atm.occupancy >= 0.0 and atm.occupancy <= 1.0 | def calc_atom_weight(atm): """Weight the least-squares fit according to this function. """ | a415833e5ff5acbecb068884a2d8765591db9289 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/a415833e5ff5acbecb068884a2d8765591db9289/tlsmd_analysis.py |
def __init__(self, form, text=None): | def __init__(self, form, text): | def __init__(self, form, text=None): Page.__init__(self, form) self.text = text | 7513555019c5fc099aa4aa56f3137e326a5a06fd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7513555019c5fc099aa4aa56f3137e326a5a06fd/refineprep.py |
page = ErrorPage("The Job ID seems to be expired.") | page = ErrorPage(form, "The Job ID seems to be expired.") | def main(): form = cgi.FieldStorage() page = None job_id = check_job_id(form) if job_id==None: page = ErrorPage("The Job ID seems to be expired.") else: page = RefinePrepPage(form) try: print page.html_page() except RefinePrepError, err: text = '<center><p>%s</p></center>' % (err.text) page = ErrorPage(form, text) print page.html_page() except xmlrpclib.Fault, err: page = ErrorPage(form, "xmlrpclib.Fault: " +str(err)) print page.html_page() except socket.error, err: page = ErrorPage(form, "socket.error: " + str(err)) print page.html_page() | 7513555019c5fc099aa4aa56f3137e326a5a06fd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7513555019c5fc099aa4aa56f3137e326a5a06fd/refineprep.py |
except KeyError: | except IndexError: | def next_chain_id(suggest_chain_id): if suggest_chain_id != "": try: self.struct[suggest_chain_id] except KeyError: return suggest_chain_id for chain_id in "ABCDEFGHIJKLMNOPQRSTUVWXYZ": try: self.struct[chain_id] except KeyError: return chain_id | 9d8a841c203e73e58c0201cbce892f595442be7e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/9d8a841c203e73e58c0201cbce892f595442be7e/StructureBuilder.py |
u_iso = atm_desc["u_iso"] | def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
|
set_TLSiso_b(B_ISOW, i, u_iso, w) | set_TLSiso_b(B_ISOW, i, atm_desc["u_iso"], w) | def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
U_ISOW = matrixmultiply(A_ANISOW, X_ANISO) | def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
|
def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
||
def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
||
return TLSGraphChainFastHybrid() | if USE_TLSMDMODULE==True: return TLSGraphChainFastHybrid() else: return TLSGraphChainHybrid() | def NewTLSGraphChain0(tls_model): """Generate and return the proper TLSGraphChain subclass for the requested TLS model. """ if tls_model=="HYBRID": return TLSGraphChainFastHybrid() if tls_model=="ANISO": return TLSGraphChainAnisotropic() if tls_model=="PLUGIN": return TLSGraphChainPlugin() raise Exception() | e84d51aa8f483309e902b29a0f740a21562abad5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e84d51aa8f483309e902b29a0f740a21562abad5/tlsmd_analysis.py |
'<p style="font-size:xx-small; margin-top:%dpx; line-height:12.5px">' % (plot.border_width)] | '<p style="font-size:xx-small; margin-top:%dpx">' % (plot.border_width)] l.append('<ul style="list-style-type:none;margin:0px;padding:0px">') | def html_chain_alignment_plot(self, chain): """generate a plot comparing all segmentations """ plot = sequence_plot.TLSSegmentAlignmentPlot() for ntls, cpartition in chain.partition_collection.iter_ntls_chain_partitions(): plot.add_tls_segmentation(cpartition) | d2709532e583d640a6277dcc18d10e99b4d1295e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/d2709532e583d640a6277dcc18d10e99b4d1295e/html.py |
l.append('<a href=" | l.append('<li style="line-height:20px"><a href=" l.append('</ul>') | def html_chain_alignment_plot(self, chain): """generate a plot comparing all segmentations """ plot = sequence_plot.TLSSegmentAlignmentPlot() for ntls, cpartition in chain.partition_collection.iter_ntls_chain_partitions(): plot.add_tls_segmentation(cpartition) | d2709532e583d640a6277dcc18d10e99b4d1295e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/d2709532e583d640a6277dcc18d10e99b4d1295e/html.py |
def gldl_iter_multidraw_animate(self): """ """ ## optimization: if a rotation of 0.0 degrees was already ## drawn, then there is no need to draw it again zero_rot = False for Lx_axis, Lx_rho, Lx_pitch, Lx_rot, Lx_scale in ( ("L1_eigen_vec", "L1_rho", "L1_pitch", "L1_rot", "L1_scale"), ("L2_eigen_vec", "L2_rho", "L2_pitch", "L2_rot", "L2_scale"), ("L3_eigen_vec", "L3_rho", "L3_pitch", "L3_rot", "L3_scale") ): | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
||
self.driver.glr_translate(-rho) | self.driver.glr_translate(rho) | def gldl_iter_multidraw_animate(self): """ """ ## optimization: if a rotation of 0.0 degrees was already ## drawn, then there is no need to draw it again zero_rot = False for Lx_axis, Lx_rho, Lx_pitch, Lx_rot, Lx_scale in ( ("L1_eigen_vec", "L1_rho", "L1_pitch", "L1_rot", "L1_scale"), ("L2_eigen_vec", "L2_rho", "L2_pitch", "L2_rot", "L2_scale"), ("L3_eigen_vec", "L3_rho", "L3_pitch", "L3_rot", "L3_scale") ): | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
self.driver.glr_translate(rho + screw) | self.driver.glr_translate(-rho + screw) | def gldl_iter_multidraw_animate(self): """ """ ## optimization: if a rotation of 0.0 degrees was already ## drawn, then there is no need to draw it again zero_rot = False for Lx_axis, Lx_rho, Lx_pitch, Lx_rot, Lx_scale in ( ("L1_eigen_vec", "L1_rho", "L1_pitch", "L1_rot", "L1_scale"), ("L2_eigen_vec", "L2_rho", "L2_pitch", "L2_rot", "L2_scale"), ("L3_eigen_vec", "L3_rho", "L3_pitch", "L3_rot", "L3_scale") ): | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
L1_c = 0.0 | L1_c = 0.0 | def update_time(self): """Changes the time of the TLS group simulating harmonic motion. """ if self.tls_group.is_null(): return | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
L2_c = 0.0 | L2_c = 0.0 | def update_time(self): """Changes the time of the TLS group simulating harmonic motion. """ if self.tls_group.is_null(): return | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
L3_c = 0.0 | L3_c = 0.0 | def update_time(self): """Changes the time of the TLS group simulating harmonic motion. """ if self.tls_group.is_null(): return | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
gam = 0.5 | gam = 0.50 | def draw_tls_surface(self, Lx_eigen_vec, Lx_eigen_val, Lx_rho, Lx_pitch): """Draws the TLS probability surface for a single non-intersecting screw axis. Lx_eigen_val is the vaiance (mean square deviation MSD) of the rotation about the Lx_eigen_vec axis. """ ## create a unique list of bonds which will be used to ## render the TLS surface; this list may be passed in a argument ## to avoid multiple calculations for each screw-rotation axis bond_list = [] in_dict = {} | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
step1 = rot_step * float(step) step2 = step1 + rot_step | rot_start = rot_step * float(step) rot_end = rot_step * float(step + 1) | def draw_tls_surface(self, Lx_eigen_vec, Lx_eigen_val, Lx_rho, Lx_pitch): """Draws the TLS probability surface for a single non-intersecting screw axis. Lx_eigen_val is the vaiance (mean square deviation MSD) of the rotation about the Lx_eigen_vec axis. """ ## create a unique list of bonds which will be used to ## render the TLS surface; this list may be passed in a argument ## to avoid multiple calculations for each screw-rotation axis bond_list = [] in_dict = {} | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
rot1 = step1 * sign rot2 = step2 * sign | rot1 = rot_start * sign rot2 = rot_end * sign | def draw_tls_surface(self, Lx_eigen_vec, Lx_eigen_val, Lx_rho, Lx_pitch): """Draws the TLS probability surface for a single non-intersecting screw axis. Lx_eigen_val is the vaiance (mean square deviation MSD) of the rotation about the Lx_eigen_vec axis. """ ## create a unique list of bonds which will be used to ## render the TLS surface; this list may be passed in a argument ## to avoid multiple calculations for each screw-rotation axis bond_list = [] in_dict = {} | 4d5284e4c4b1286073a18f9657c98f6fd520870d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/4d5284e4c4b1286073a18f9657c98f6fd520870d/TLS.py |
self.write(self.form_string(mstring)) | self.write(self.form_mstring(mstring)) | def write_mstring(self, mstring): self.write(self.form_string(mstring)) | f32de331f1af88fb64e8cc4cec6b143ca5ef3b76 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/f32de331f1af88fb64e8cc4cec6b143ca5ef3b76/mmCIF.py |
sys.stderr.write("Mail Client %s Not Found" % (conf.MSMTP)) | sys.stderr.write("mail client not found: %s" % (conf.MSMTP)) | def SendEmail(address, subject, body): if not os.path.isfile(conf.MSMTP): sys.stderr.write("Mail Client %s Not Found" % (conf.MSMTP)) return mlist = ["To: %s" % (address), "Subject: %s" % (subject), "", body] ## send mail using msmtp pobj = subprocess.Popen([conf.MSMTP, address], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, close_fds = True, bufsize = 8192) pobj.stdin.write("\n".join(mlist)) pobj.wait() | 6d7350dd61b4a559839fe339299061a4df5f7f5b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/6d7350dd61b4a559839fe339299061a4df5f7f5b/email.py |
pobj = subprocess.Popen([conf.MSMTP, address], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, close_fds = True, bufsize = 8192) | try: pobj = subprocess.Popen([conf.MSMTP, address], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, close_fds = True, bufsize = 8192) except OSError: sys.stderr.write("[ERROR] mail client failed to execute: %s" % (conf.MSMTP)) return | def SendEmail(address, subject, body): if not os.path.isfile(conf.MSMTP): sys.stderr.write("Mail Client %s Not Found" % (conf.MSMTP)) return mlist = ["To: %s" % (address), "Subject: %s" % (subject), "", body] ## send mail using msmtp pobj = subprocess.Popen([conf.MSMTP, address], stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, close_fds = True, bufsize = 8192) pobj.stdin.write("\n".join(mlist)) pobj.wait() | 6d7350dd61b4a559839fe339299061a4df5f7f5b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/6d7350dd61b4a559839fe339299061a4df5f7f5b/email.py |
if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) | stats = {} stats["time"] = sec | def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
if rec["RECORD"] == "REMARK": | rec_type = rec["RECORD"] if rec_type == "REMARK": | def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | continue if text.find("RESOLUTION RANGE HIGH") == 1: try: stats["res"] = float(text[33:]) except ValueError: pass elif rec_type == "ATOM " or rec_type == "HETATM": try: stats["atoms"] += 1 except KeyError: stats["atoms"] = 1 elif rec_type == "ANISOU": try: stats["anisou"] += 1 except KeyError: stats["anisou"] = 1 return stats | def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
||
def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
||
print str(i)+": ", read_pdb(pathx) | stats = read_pdb(pathx) print "%d:%s:%s" % (i, pathx, stats) | def read_pdb(path): sec = time.time() records = pdbmodule.read(path) sec = time.time() - sec if records != None: print "%s: %d records in %.2f seconds" % ( path, len(records), sec) else: print "%s: NO RECORDS" % (path) for rec in records: if rec["RECORD"] == "REMARK": try: text = rec["text"] except KeyError: pass else: if text.find("RESOLUTION RANGE HIGH") == 1: print text | 7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7ec4c6518170b22cfaef5e2cd7fdbf21624c8b92/test.py |
"[Status: %s] " % (jdict.get('status', 'None'))] | "[State: %s] " % (jdict.get('state', 'None'))] | def log_job_end(jdict): ln = "" ln += "[%s]: " % (time.asctime(time.localtime(time.time()))) ln += "Finished Job %s" % (jdict["job_id"]) log_write(ln) ## write to a special log file if jdict.get("private_job", True): private_text = "private" else: private_text = "public" submit_time = jdict.get('submit_time', 0.0) run_time_begin = jdict.get('run_time_begin', 0.0) run_time_end = jdict.get('run_time_end', 0.0) processing_time = timediff(run_time_begin, run_time_end) l = ["[Submit time: %s]" % (timestring(submit_time)), "[Start time: %s] " % (timestring(run_time_begin)), "[End time: %s] " % (timestring(run_time_end)), "[Processing time: %s] " % (processing_time), "[IP : %s] " % (jdict.get("ip_addr", "000.000.000.000")), "[Email: %s] " % (jdict.get("email", "[email protected]")), "[Privacy: %s] " % (private_text), "[Job ID: %s] " % (jdict.get("job_id", "EEK!!")), "[Structure ID: %s] " % (jdict.get("structure_id", "----")), "[Chain sizes: %s] " % (chain_size_string(jdict)), "[TLS Model: %s] " % (jdict.get('tls_model', 'None')), "[Weight: %s] " % (jdict.get('weight', 'None')), "[Atoms: %s] " % (jdict.get('include_atoms', 'None')), "[Status: %s] " % (jdict.get('status', 'None'))] try: open(conf.LOG_PATH, "a").write(" ".join(l) + "\n") except IOError: log_write("ERROR: cannot open logfile %s" % (conf.LOG_PATH)) | b48d806b00beca8c5e289445a77097960da89cac /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/b48d806b00beca8c5e289445a77097960da89cac/webtlsmdrund.py |
if atmx.name=="CA": yield atmx | if atmx.name in ["N","CA","C","O","CB"]: yield atmx | def iter_protein_atoms(sobjx): for fragx in sobjx.iter_amino_acids(): for atmx in fragx.iter_atoms(): if atmx.name=="CA": yield atmx | e106395b62b6f7c590f709d60deb6ca318c31727 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e106395b62b6f7c590f709d60deb6ca318c31727/html.py |
"""Iterates over all Atom objects. The iteration is preformed in order according to the Chain and Fragment ordering rules the Atom object is a part of. | """Iterates over all Atom objects according to the Structure defaults. | def iter_atoms(self): """Iterates over all Atom objects. The iteration is preformed in order according to the Chain and Fragment ordering rules the Atom object is a part of. """ for chain in self.iter_chains(): for atm in chain.iter_atoms(): yield atm | 5c7060c01cdfb5cacb07902a367f879ff7543e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5c7060c01cdfb5cacb07902a367f879ff7543e10/Structure.py |
"""Counts all Atom objects in the Structure's default alt_loc. | """Counts all Atom objects in according to the Structure defaults. | def count_atoms(self): """Counts all Atom objects in the Structure's default alt_loc. """ n = 0 for atm in self.iter_atoms(): n += 1 return n | 5c7060c01cdfb5cacb07902a367f879ff7543e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5c7060c01cdfb5cacb07902a367f879ff7543e10/Structure.py |
""" | """Iterates over all Atom objects including all atoms in multiple conformations. | def iter_all_atoms(self): """ """ for chain in self.iter_chains(): for atm in chain.iter_all_atoms(): yield atm | 5c7060c01cdfb5cacb07902a367f879ff7543e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5c7060c01cdfb5cacb07902a367f879ff7543e10/Structure.py |
"""Counts all Atom objects. | """Counts all Atom objects including all atoms in multiple conformations. | def count_all_atoms(self): """Counts all Atom objects. """ n = 0 for atm in self.iter_all_atoms(): n += 1 return n | 5c7060c01cdfb5cacb07902a367f879ff7543e10 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5c7060c01cdfb5cacb07902a367f879ff7543e10/Structure.py |
"Ethan Merrit") | "Ethan Merritt") | def __init__(self): gtk.Dialog.__init__(self, "About mmCIF Editor", None, 0) self.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE) self.connect("delete-event", self.delete_event_cb) self.connect("response", self.delete_event_cb) | 845416a118287e7faa2f9e69392cad2bdc4545cc /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/845416a118287e7faa2f9e69392cad2bdc4545cc/cif_editor.py |
def refmac5_prep(xyzin, tlsin, xyzout, tlsout): | def refmac5_prep(xyzin, tlsin_list, xyzout, tlsout): | def refmac5_prep(xyzin, tlsin, xyzout, tlsout): """Use TLS model + Uiso for each atom. Output xyzout with the residual Uiso only. """ os.umask(022) ## load structure struct = LoadStructure(fil = xyzin) ## load and construct TLS groups tls_group_list = [] tls_file = TLSFile() tls_file.set_file_format(TLSFileFormatTLSOUT()) tls_file.load(open(tlsin, "r")) for tls_desc in tls_file.tls_desc_list: tls_group = tls_desc.construct_tls_group_with_atoms(struct) fit_tls_group(tls_group) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) ## set the extra Uiso for each atom for tls_group in tls_group_list: ## minimal/maximal amount of Uiso which has to be added ## to the group's atoms to to make Uiso == Uiso_tls min_Uiso = 0.0 max_Uiso = 0.0 n = 0 sum_diff2 = 0.0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 n += 1 sum_diff2 += (tls_tf - ref_tf)**2 if ref_tf>tls_tf: max_Uiso = max(ref_tf - tls_tf, max_Uiso) else: min_Uiso = max(tls_tf - ref_tf, min_Uiso) msd = sum_diff2 / n rmsd = math.sqrt(msd) ## report the percentage of atoms with Uiso within the RMSD ntotal = 0 nrmsd = 0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 ntotal += 1 deviation = math.sqrt((tls_tf - ref_tf)**2) if deviation<=rmsd: nrmsd += 1 ## reduce the TLS group T tensor by min_Uiso so that ## a PDB file can be written out where all atoms ## Uiso == Uiso_tls ## we must rotate the T tensor to its primary axes before ## subtracting min_Uiso magnitude from it (T_eval, TR) = eigenvectors(tls_group.T) T = matrixmultiply(TR, matrixmultiply(tls_group.T, transpose(TR))) assert allclose(T[0,1], 0.0) assert allclose(T[0,2], 0.0) assert allclose(T[1,2], 0.0) T[0,0] = T[0,0] - min_Uiso T[1,1] = T[1,1] - min_Uiso T[2,2] = T[2,2] - min_Uiso ## now take half of the smallest principal component of T and ## move it into the individual atomic temperature factors min_T = min(T[0,0], min(T[1,1], T[2,2])) sub_T = min_T * 0.80 add_Uiso = min_T - sub_T T[0,0] = T[0,0] - sub_T T[1,1] = T[1,1] - sub_T T[2,2] = T[2,2] - sub_T ## rotate T back to original orientation tls_group.T = matrixmultiply(transpose(TR), matrixmultiply(T, TR)) ## reset the TLS tensor values in the TLSDesc object so they can be ## saved tls_group.tls_desc.set_tls_group(tls_group) ## set atm.temp_factor for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B aatm.U = None else: aatm.temp_factor = (add_Uiso) * U2B aatm.U = None SaveStructure(fil=xyzout, struct=struct) tls_file.save(open(tlsout, "w")) | 9fd3d90c1518243c4a524d66eb34d0f807950d86 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/9fd3d90c1518243c4a524d66eb34d0f807950d86/refineprep.py |
tls_file = TLSFile() tls_file.set_file_format(TLSFileFormatTLSOUT()) tls_file.load(open(tlsin, "r")) for tls_desc in tls_file.tls_desc_list: tls_group = tls_desc.construct_tls_group_with_atoms(struct) fit_tls_group(tls_group) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) | for tlsin in tlsin_list: tls_file = TLSFile() tls_file.set_file_format(TLSFileFormatTLSOUT()) tls_file.load(open(tlsin, "r")) for tls_desc in tls_file.tls_desc_list: tls_group = tls_desc.construct_tls_group_with_atoms(struct) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) | def refmac5_prep(xyzin, tlsin, xyzout, tlsout): """Use TLS model + Uiso for each atom. Output xyzout with the residual Uiso only. """ os.umask(022) ## load structure struct = LoadStructure(fil = xyzin) ## load and construct TLS groups tls_group_list = [] tls_file = TLSFile() tls_file.set_file_format(TLSFileFormatTLSOUT()) tls_file.load(open(tlsin, "r")) for tls_desc in tls_file.tls_desc_list: tls_group = tls_desc.construct_tls_group_with_atoms(struct) fit_tls_group(tls_group) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) ## set the extra Uiso for each atom for tls_group in tls_group_list: ## minimal/maximal amount of Uiso which has to be added ## to the group's atoms to to make Uiso == Uiso_tls min_Uiso = 0.0 max_Uiso = 0.0 n = 0 sum_diff2 = 0.0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 n += 1 sum_diff2 += (tls_tf - ref_tf)**2 if ref_tf>tls_tf: max_Uiso = max(ref_tf - tls_tf, max_Uiso) else: min_Uiso = max(tls_tf - ref_tf, min_Uiso) msd = sum_diff2 / n rmsd = math.sqrt(msd) ## report the percentage of atoms with Uiso within the RMSD ntotal = 0 nrmsd = 0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 ntotal += 1 deviation = math.sqrt((tls_tf - ref_tf)**2) if deviation<=rmsd: nrmsd += 1 ## reduce the TLS group T tensor by min_Uiso so that ## a PDB file can be written out where all atoms ## Uiso == Uiso_tls ## we must rotate the T tensor to its primary axes before ## subtracting min_Uiso magnitude from it (T_eval, TR) = eigenvectors(tls_group.T) T = matrixmultiply(TR, matrixmultiply(tls_group.T, transpose(TR))) assert allclose(T[0,1], 0.0) assert allclose(T[0,2], 0.0) assert allclose(T[1,2], 0.0) T[0,0] = T[0,0] - min_Uiso T[1,1] = T[1,1] - min_Uiso T[2,2] = T[2,2] - min_Uiso ## now take half of the smallest principal component of T and ## move it into the individual atomic temperature factors min_T = min(T[0,0], min(T[1,1], T[2,2])) sub_T = min_T * 0.80 add_Uiso = min_T - sub_T T[0,0] = T[0,0] - sub_T T[1,1] = T[1,1] - sub_T T[2,2] = T[2,2] - sub_T ## rotate T back to original orientation tls_group.T = matrixmultiply(transpose(TR), matrixmultiply(T, TR)) ## reset the TLS tensor values in the TLSDesc object so they can be ## saved tls_group.tls_desc.set_tls_group(tls_group) ## set atm.temp_factor for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B aatm.U = None else: aatm.temp_factor = (add_Uiso) * U2B aatm.U = None SaveStructure(fil=xyzout, struct=struct) tls_file.save(open(tlsout, "w")) | 9fd3d90c1518243c4a524d66eb34d0f807950d86 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/9fd3d90c1518243c4a524d66eb34d0f807950d86/refineprep.py |
def refmac5_prep(xyzin, tlsin, xyzout, tlsout): """Use TLS model + Uiso for each atom. Output xyzout with the residual Uiso only. """ os.umask(022) ## load structure struct = LoadStructure(fil = xyzin) ## load and construct TLS groups tls_group_list = [] tls_file = TLSFile() tls_file.set_file_format(TLSFileFormatTLSOUT()) tls_file.load(open(tlsin, "r")) for tls_desc in tls_file.tls_desc_list: tls_group = tls_desc.construct_tls_group_with_atoms(struct) fit_tls_group(tls_group) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) ## set the extra Uiso for each atom for tls_group in tls_group_list: ## minimal/maximal amount of Uiso which has to be added ## to the group's atoms to to make Uiso == Uiso_tls min_Uiso = 0.0 max_Uiso = 0.0 n = 0 sum_diff2 = 0.0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 n += 1 sum_diff2 += (tls_tf - ref_tf)**2 if ref_tf>tls_tf: max_Uiso = max(ref_tf - tls_tf, max_Uiso) else: min_Uiso = max(tls_tf - ref_tf, min_Uiso) msd = sum_diff2 / n rmsd = math.sqrt(msd) ## report the percentage of atoms with Uiso within the RMSD ntotal = 0 nrmsd = 0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 ntotal += 1 deviation = math.sqrt((tls_tf - ref_tf)**2) if deviation<=rmsd: nrmsd += 1 ## reduce the TLS group T tensor by min_Uiso so that ## a PDB file can be written out where all atoms ## Uiso == Uiso_tls ## we must rotate the T tensor to its primary axes before ## subtracting min_Uiso magnitude from it (T_eval, TR) = eigenvectors(tls_group.T) T = matrixmultiply(TR, matrixmultiply(tls_group.T, transpose(TR))) assert allclose(T[0,1], 0.0) assert allclose(T[0,2], 0.0) assert allclose(T[1,2], 0.0) T[0,0] = T[0,0] - min_Uiso T[1,1] = T[1,1] - min_Uiso T[2,2] = T[2,2] - min_Uiso ## now take half of the smallest principal component of T and ## move it into the individual atomic temperature factors min_T = min(T[0,0], min(T[1,1], T[2,2])) sub_T = min_T * 0.80 add_Uiso = min_T - sub_T T[0,0] = T[0,0] - sub_T T[1,1] = T[1,1] - sub_T T[2,2] = T[2,2] - sub_T ## rotate T back to original orientation tls_group.T = matrixmultiply(transpose(TR), matrixmultiply(T, TR)) ## reset the TLS tensor values in the TLSDesc object so they can be ## saved tls_group.tls_desc.set_tls_group(tls_group) ## set atm.temp_factor for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = trace(Utls)/3.0 ref_tf = trace(aatm.get_U())/3.0 if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B aatm.U = None else: aatm.temp_factor = (add_Uiso) * U2B aatm.U = None SaveStructure(fil=xyzout, struct=struct) tls_file.save(open(tlsout, "w")) | 9fd3d90c1518243c4a524d66eb34d0f807950d86 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/9fd3d90c1518243c4a524d66eb34d0f807950d86/refineprep.py |
||
if atm.sig_position: | if atm.sig_position != None: | def atom_common(arec1, arec2): arec2["serial"] = arec1["serial"] arec2["chainID"] = arec1["chainID"] arec2["resName"] = arec1["resName"] arec2["resSeq"] = arec1["resSeq"] arec2["iCode"] = arec1["iCode"] arec2["name"] = arec1["name"] arec2["altLoc"] = arec1["altLoc"] arec2["element"] = arec1["element"] arec2["charge"] = arec1["charge"] | 7a81f52999a846a8139b020afa81586b47e97200 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7a81f52999a846a8139b020afa81586b47e97200/PDB.py |
if atm.U: | if atm.U != None: | def atom_common(arec1, arec2): arec2["serial"] = arec1["serial"] arec2["chainID"] = arec1["chainID"] arec2["resName"] = arec1["resName"] arec2["resSeq"] = arec1["resSeq"] arec2["iCode"] = arec1["iCode"] arec2["name"] = arec1["name"] arec2["altLoc"] = arec1["altLoc"] arec2["element"] = arec1["element"] arec2["charge"] = arec1["charge"] | 7a81f52999a846a8139b020afa81586b47e97200 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7a81f52999a846a8139b020afa81586b47e97200/PDB.py |
if atm.sig_U: siguij_rec = siguij() | if atm.sig_U != None: siguij_rec = SIGUIJ() | def atom_common(arec1, arec2): arec2["serial"] = arec1["serial"] arec2["chainID"] = arec1["chainID"] arec2["resName"] = arec1["resName"] arec2["resSeq"] = arec1["resSeq"] arec2["iCode"] = arec1["iCode"] arec2["name"] = arec1["name"] arec2["altLoc"] = arec1["altLoc"] arec2["element"] = arec1["element"] arec2["charge"] = arec1["charge"] | 7a81f52999a846a8139b020afa81586b47e97200 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/7a81f52999a846a8139b020afa81586b47e97200/PDB.py |
self.segmentations = [] | self.configurations = [] | def __init__(self): ## bars are 15 pixels heigh self.pheight = ALIGN_HEIGHT ## spacing pixels between stacked bars self.spacing = ALIGN_SPACING ## background color self.bg_color = rgb_f2i((1.0, 1.0, 1.0)) self.frag_list = [] self.segmentations = [] | 95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1/html.py |
self.segmentations.append(tls_seg_desc) | self.configurations.append(tls_seg_desc) | def add_tls_segmentation(self, chainopt, ntls): """Add a TLS optimization to the alignment plot. """ tlsopt = chainopt["tlsopt"][ntls] ## get the list of TLS segments for the specified number of ## segments (ntls) tls_seg_desc = {} self.segmentations.append(tls_seg_desc) tls_seg_desc["chainopt"] = chainopt tls_seg_desc["ntls"] = ntls tls_seg_desc["tlsopt"] = tlsopt ## update the master fragment_list self.__update_frag_list(chainopt["chain"], tlsopt) | 95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1/html.py |
if len(self.frag_list)==0 or len(self.segmentations)==0: | if len(self.frag_list)==0 or len(self.configurations)==0: | def plot(self, path): """Plot and write the png plot image to the specified path. """ if len(self.frag_list)==0 or len(self.segmentations)==0: return False nfrag = len(self.frag_list) target_width = 500 fw = int(round(float(ALIGN_TARGET_WIDTH) / nfrag)) fwidth = max(1, fw) | 95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1/html.py |
num_plots = len(self.segmentations) | num_plots = len(self.configurations) | def plot(self, path): """Plot and write the png plot image to the specified path. """ if len(self.frag_list)==0 or len(self.segmentations)==0: return False nfrag = len(self.frag_list) target_width = 500 fw = int(round(float(ALIGN_TARGET_WIDTH) / nfrag)) fwidth = max(1, fw) | 95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1/html.py |
for i in range(len(self.segmentations)): tls_seg_desc = self.segmentations[i] | for i in range(len(self.configurations)): tls_seg_desc = self.configurations[i] | def plot(self, path): """Plot and write the png plot image to the specified path. """ if len(self.frag_list)==0 or len(self.segmentations)==0: return False nfrag = len(self.frag_list) target_width = 500 fw = int(round(float(ALIGN_TARGET_WIDTH) / nfrag)) fwidth = max(1, fw) | 95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/95e4e59ca6c2fcbff881aa5f49bc4f4fc89aeee1/html.py |
if const.MAINCHAIN_ATOM_DICT.has_key(atom.name) is False: | if const.MAINCHAIN_ATOM_DICT.has_key(atm.name) is False: | def calc_include_atom(atm, reject_messages = False): """Filter out atoms from the model which will cause problems or cont contribute to the TLS analysis. """ if atm.position == None: return False if atm.occupancy < 0.1: if reject_messages == True: console.stdoutln("calc_include_atom(%s): rejected because of low occupancy" % (atm)) return False if numpy.trace(atm.get_U()) <= const.TSMALL: if reject_messages == True: console.stdoutln("calc_include_atom(%s): rejected because of small Uiso magnitude " % (atm)) return False elif conf.globalconf.include_atoms == "MAINCHAIN": if const.MAINCHAIN_ATOM_DICT.has_key(atom.name) is False: if reject_messages == True: console.stdoutln("calc_include_atom(%s): rejected non-mainchain atom" % (atm)) return False return True | 8a073b457d87aac2cb7a4880ce71c77f6f8f23c9 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/8a073b457d87aac2cb7a4880ce71c77f6f8f23c9/atom_selection.py |
self.term_alpha = 0.75 | self.term_alpha = 0.5 | def __init__(self): self.visible = True self.width = 0 self.height = 0 self.zplane = 5000.0 | 556ea8e399ecff3095bf69074967705963f51e8d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/556ea8e399ecff3095bf69074967705963f51e8d/glutviewer.py |
print "lsq_fit_segment centroid = ", centroid | def lsq_fit_segment(self, frag_id1, frag_id2): """Performs a LSQ fit of TLS parameters for the protein segment starting with fragment index ifrag_start to (and including) the fragment ifrag_end. """ ## all return values here fit_info = {} ## calculate the start/end indexes of the start fragment ## and end fragment so the A matrix and b vector can be sliced ## in the correct placees | e788235f3f04bc81092738494ac5eb845aacc6ef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/e788235f3f04bc81092738494ac5eb845aacc6ef/tlsmd_analysis.py |
|
name = atm_map["name"] fragment_id = atm_map["fragment_id"] chain_id = atm_map["chain_id"] | name = atm_map.get("name", "") fragment_id = atm_map.get("fragment_id", "") chain_id = atm_map.get("chain_id", "") | def load_atom(self, atm_map): """Called repeatedly by the implementation of read_atoms to load all the data for a single atom. The data is contained in the atm_map argument, and is not well documented at this point. Look at this function and you'll figure it out. """ ## XXX -- I presently do not support more than one NMR ## style MODEL; this is first on the list for the ## next version if atm_map.has_key("model_num") and atm_map["model_num"] > 1: debug("NMR-style multi-models not supported yet") return ## /XXX | 5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23/StructureBuilder.py |
def load_atom(self, atm_map): """Called repeatedly by the implementation of read_atoms to load all the data for a single atom. The data is contained in the atm_map argument, and is not well documented at this point. Look at this function and you'll figure it out. """ ## XXX -- I presently do not support more than one NMR ## style MODEL; this is first on the list for the ## next version if atm_map.has_key("model_num") and atm_map["model_num"] > 1: debug("NMR-style multi-models not supported yet") return ## /XXX | 5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23/StructureBuilder.py |
||
atm_id = (name, alt_loc, fragment_id, chain_id) | def load_atom(self, atm_map): """Called repeatedly by the implementation of read_atoms to load all the data for a single atom. The data is contained in the atm_map argument, and is not well documented at this point. Look at this function and you'll figure it out. """ ## XXX -- I presently do not support more than one NMR ## style MODEL; this is first on the list for the ## next version if atm_map.has_key("model_num") and atm_map["model_num"] > 1: debug("NMR-style multi-models not supported yet") return ## /XXX | 5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23/StructureBuilder.py |
|
atm_id = (name, alt_loc, fragment_id, chain_id) | def load_atom(self, atm_map): """Called repeatedly by the implementation of read_atoms to load all the data for a single atom. The data is contained in the atm_map argument, and is not well documented at this point. Look at this function and you'll figure it out. """ ## XXX -- I presently do not support more than one NMR ## style MODEL; this is first on the list for the ## next version if atm_map.has_key("model_num") and atm_map["model_num"] > 1: debug("NMR-style multi-models not supported yet") return ## /XXX | 5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23/StructureBuilder.py |
|
debug("duplicate atom "+str(atm_id)) return | old_name = name i = 2 while self.atom_cache.has_key(atm_id): name = "%s%d" % (old_name, i) atm_id = (name, alt_loc, fragment_id, chain_id) i += 1 | def load_atom(self, atm_map): """Called repeatedly by the implementation of read_atoms to load all the data for a single atom. The data is contained in the atm_map argument, and is not well documented at this point. Look at this function and you'll figure it out. """ ## XXX -- I presently do not support more than one NMR ## style MODEL; this is first on the list for the ## next version if atm_map.has_key("model_num") and atm_map["model_num"] > 1: debug("NMR-style multi-models not supported yet") return ## /XXX | 5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5a1d5bac5e97c5e026ff1ec048c5d79e784e7f23/StructureBuilder.py |
stdout, stdin, stderr = popen2.popen3( (self.render_program_path, "-png", self.render_png_path, "-gamma", "1.5"), 32768*4) | pobj = popen2.Popen4([self.render_program_path, "-png", self.render_png_path, "-gamma", "1.5"], 32768) stdin = pobj.tochild | def close(self): for fil in self.fils: fil.close() | 1d63e693bf684922f0cc1d31f96bdf5c3b9f5cef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/1d63e693bf684922f0cc1d31f96bdf5c3b9f5cef/R3DDriver.py |
else: stdout.read() stdout.close() stderr.read() stderr.close() | elif pobj is not None: pobj.wait() | def close(self): for fil in self.fils: fil.close() | 1d63e693bf684922f0cc1d31f96bdf5c3b9f5cef /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/1d63e693bf684922f0cc1d31f96bdf5c3b9f5cef/R3DDriver.py |
atom.calc_anisotropy() atom.calc_anisotropy3() | try: atom.calc_anisotropy() except ZeroDivisionError: pass try: atom.calc_anisotropy3() except ZeroDivisionError: pass | def atom_test(atom, stats): """Tests the mmLib.Structure.Atom object. """ stats["atom_count"] += 1 stats["testing"] = atom len(atom) alt_loc = atom.get_structure().get_default_alt_loc() atom.get_fragment() atom.get_chain() atom.get_model() atom.get_structure() visited_atm_list = [] for atm in atom.iter_alt_loc(): assert isinstance(atm, Atom) assert atm in atom assert atm not in visited_atm_list visited_atm_list.append(atm) assert atm[atom.alt_loc] == atom assert atm.get_fragment() == atom.get_fragment() assert atm.get_chain() == atom.get_chain() assert atm.get_structure() == atom.get_structure() assert atm.name == atom.name assert atm.res_name == atom.res_name assert atm.fragment_id == atom.fragment_id assert atm.chain_id == atom.chain_id atom.calc_anisotropy() atom.calc_anisotropy3() | 83d1b6500188bfe4caa9eaee09d2dda9ad26f7e0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/83d1b6500188bfe4caa9eaee09d2dda9ad26f7e0/mmlib_test.py |
try: frag1 = self.segment[0] frag2 = self.segment[-1] except IndexError: return "AlphaHelix(%s %d)" % (self.helix_id, self.helix_class) return "AlphaHelix(%s %s %s...%s)" % ( self.helix_id, self.helix_class, str(frag1), str(frag2)) | return "AlphaHelix(%s %s %s:%s...%s:%s)" % ( self.helix_id, self.helix_class, self.chain_id1, self.fragment_id1, self.chain_id2, self.fragment_id2) | def __str__(self): try: frag1 = self.segment[0] frag2 = self.segment[-1] except IndexError: return "AlphaHelix(%s %d)" % (self.helix_id, self.helix_class) | 63febea20a5e8e7dad1aa889a68f883e88edae8a /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/63febea20a5e8e7dad1aa889a68f883e88edae8a/Structure.py |
if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B | if ref_tf > tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*Constants.U2B | def refmac5_prep(xyzin, tlsin_list, xyzout, tlsout): """Use TLS model + Uiso for each atom. Output xyzout with the residual Uiso only. """ os.umask(022) ## load structure struct = FileLoader.LoadStructure(fil = xyzin) ## load and construct TLS groups tls_group_list = [] tls_file = TLS.TLSFile() tls_file.set_file_format(TLS.TLSFileFormatTLSOUT()) tls_file_format = TLS.TLSFileFormatTLSOUT() for tlsin in tlsin_list: tls_desc_list = tls_file_format.load(open(tlsin, "r")) for tls_desc in tls_desc_list: tls_file.tls_desc_list.append(tls_desc) tls_group = tls_desc.construct_tls_group_with_atoms(struct) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) ## set the extra Uiso for each atom for tls_group in tls_group_list: ## minimal/maximal amount of Uiso which has to be added ## to the group's atoms to to make Uiso == Uiso_tls min_Uiso = 0.0 max_Uiso = 0.0 n = 0 sum_diff2 = 0.0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 n += 1 sum_diff2 += (tls_tf - ref_tf)**2 if ref_tf>tls_tf: max_Uiso = max(ref_tf - tls_tf, max_Uiso) else: min_Uiso = max(tls_tf - ref_tf, min_Uiso) msd = sum_diff2 / n rmsd = math.sqrt(msd) ## report the percentage of atoms with Uiso within the RMSD ntotal = 0 nrmsd = 0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 ntotal += 1 deviation = math.sqrt((tls_tf - ref_tf)**2) if deviation <= rmsd: nrmsd += 1 ## reduce the TLS group T tensor by min_Uiso so that ## a PDB file can be written out where all atoms ## Uiso == Uiso_tls ## we must rotate the T tensor to its primary axes before ## subtracting min_Uiso magnitude from it (T_eval, TR) = numpy.linalg.eigenvectors(tls_group.T) T = numpy.matrixmultiply(TR, numpy.matrixmultiply(tls_group.T, numpy.transpose(TR))) assert numpy.allclose(T[0,1], 0.0) assert numpy.allclose(T[0,2], 0.0) assert numpy.allclose(T[1,2], 0.0) T[0,0] = T[0,0] - min_Uiso T[1,1] = T[1,1] - min_Uiso T[2,2] = T[2,2] - min_Uiso ## now take half of the smallest principal component of T and ## move it into the individual atomic temperature factors min_T = min(T[0,0], min(T[1,1], T[2,2])) sub_T = min_T * 0.80 add_Uiso = min_T - sub_T T[0,0] = T[0,0] - sub_T T[1,1] = T[1,1] - sub_T T[2,2] = T[2,2] - sub_T ## rotate T back to original orientation tls_group.T = numpy.matrixmultiply(numpy.transpose(TR), numpy.matrixmultiply(T, TR)) ## reset the TLS tensor values in the TLSDesc object so they can be saved tls_group.tls_desc.set_tls_group(tls_group) ## set atm.temp_factor for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B aatm.U = None else: aatm.temp_factor = (add_Uiso) * U2B aatm.U = None FileLoader.SaveStructure(fil=xyzout, struct=struct) tls_file.save(open(tlsout, "w")) | 5cf3e0a446f3ad356a457839d6c6a478c04ac208 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5cf3e0a446f3ad356a457839d6c6a478c04ac208/refineprep.py |
aatm.temp_factor = (add_Uiso) * U2B | aatm.temp_factor = (add_Uiso) * Constants.U2B | def refmac5_prep(xyzin, tlsin_list, xyzout, tlsout): """Use TLS model + Uiso for each atom. Output xyzout with the residual Uiso only. """ os.umask(022) ## load structure struct = FileLoader.LoadStructure(fil = xyzin) ## load and construct TLS groups tls_group_list = [] tls_file = TLS.TLSFile() tls_file.set_file_format(TLS.TLSFileFormatTLSOUT()) tls_file_format = TLS.TLSFileFormatTLSOUT() for tlsin in tlsin_list: tls_desc_list = tls_file_format.load(open(tlsin, "r")) for tls_desc in tls_desc_list: tls_file.tls_desc_list.append(tls_desc) tls_group = tls_desc.construct_tls_group_with_atoms(struct) tls_group.tls_desc = tls_desc tls_group_list.append(tls_group) ## set the extra Uiso for each atom for tls_group in tls_group_list: ## minimal/maximal amount of Uiso which has to be added ## to the group's atoms to to make Uiso == Uiso_tls min_Uiso = 0.0 max_Uiso = 0.0 n = 0 sum_diff2 = 0.0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 n += 1 sum_diff2 += (tls_tf - ref_tf)**2 if ref_tf>tls_tf: max_Uiso = max(ref_tf - tls_tf, max_Uiso) else: min_Uiso = max(tls_tf - ref_tf, min_Uiso) msd = sum_diff2 / n rmsd = math.sqrt(msd) ## report the percentage of atoms with Uiso within the RMSD ntotal = 0 nrmsd = 0 for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 ntotal += 1 deviation = math.sqrt((tls_tf - ref_tf)**2) if deviation <= rmsd: nrmsd += 1 ## reduce the TLS group T tensor by min_Uiso so that ## a PDB file can be written out where all atoms ## Uiso == Uiso_tls ## we must rotate the T tensor to its primary axes before ## subtracting min_Uiso magnitude from it (T_eval, TR) = numpy.linalg.eigenvectors(tls_group.T) T = numpy.matrixmultiply(TR, numpy.matrixmultiply(tls_group.T, numpy.transpose(TR))) assert numpy.allclose(T[0,1], 0.0) assert numpy.allclose(T[0,2], 0.0) assert numpy.allclose(T[1,2], 0.0) T[0,0] = T[0,0] - min_Uiso T[1,1] = T[1,1] - min_Uiso T[2,2] = T[2,2] - min_Uiso ## now take half of the smallest principal component of T and ## move it into the individual atomic temperature factors min_T = min(T[0,0], min(T[1,1], T[2,2])) sub_T = min_T * 0.80 add_Uiso = min_T - sub_T T[0,0] = T[0,0] - sub_T T[1,1] = T[1,1] - sub_T T[2,2] = T[2,2] - sub_T ## rotate T back to original orientation tls_group.T = numpy.matrixmultiply(numpy.transpose(TR), numpy.matrixmultiply(T, TR)) ## reset the TLS tensor values in the TLSDesc object so they can be saved tls_group.tls_desc.set_tls_group(tls_group) ## set atm.temp_factor for atm, Utls in tls_group.iter_atm_Utls(): for aatm in atm.iter_alt_loc(): tls_tf = numpy.trace(Utls)/3.0 ref_tf = numpy.trace(aatm.get_U())/3.0 if ref_tf>tls_tf: aatm.temp_factor = ((add_Uiso) + ref_tf - tls_tf)*U2B aatm.U = None else: aatm.temp_factor = (add_Uiso) * U2B aatm.U = None FileLoader.SaveStructure(fil=xyzout, struct=struct) tls_file.save(open(tlsout, "w")) | 5cf3e0a446f3ad356a457839d6c6a478c04ac208 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5cf3e0a446f3ad356a457839d6c6a478c04ac208/refineprep.py |
outbase = string.join(listx, "_") | outbase ="_".join(listx) | def html_page(self): job_id = check_job_id(self.form) title = 'Input Files for Refmac5 TLS Refinement' x = '' x += self.html_head(title) x += self.html_title(title) | 5cf3e0a446f3ad356a457839d6c6a478c04ac208 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/10674/5cf3e0a446f3ad356a457839d6c6a478c04ac208/refineprep.py |
usage: idle.py [-c command] [-d] [-e] [-s] [-t title] [arg] ... -c command run this command -d enable debugger -e edit mode; arguments are files to be edited -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window When neither -c nor -e is used, and there are arguments, and the first argument is not '-', the first argument is run as a script. Remaining arguments are arguments to the script or to the command run by -c. | usage: idle.py [-c command] [-d] [-i] [-r script] [-s] [-t title] [arg] ... idle file(s) (without options) edit the file(s) -c cmd run the command in a shell -d enable the debugger -i open an interactive shell -i file(s) open a shell and also an editor window for each file -r script run a file as a script in a shell -s run $IDLESTARTUP or $PYTHONSTARTUP before anything else -t title set title of shell window Remaining arguments are applied to the command (-c) or script (-r). | def isatty(self): return 1 | ddb3416a69f1ba1edc2a9d381df82a5bc051af6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ddb3416a69f1ba1edc2a9d381df82a5bc051af6f/PyShell.py |
opts, args = getopt.getopt(argv, "c:deist:") | opts, args = getopt.getopt(argv, "c:dir:st:") | def main(self, argv, noshell): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(argv, "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: noshell = 0 if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if noshell: edit=1 for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) | ddb3416a69f1ba1edc2a9d381df82a5bc051af6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ddb3416a69f1ba1edc2a9d381df82a5bc051af6f/PyShell.py |
noshell = 0 | noshell = 0 | def main(self, argv, noshell): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(argv, "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: noshell = 0 if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if noshell: edit=1 for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) | ddb3416a69f1ba1edc2a9d381df82a5bc051af6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ddb3416a69f1ba1edc2a9d381df82a5bc051af6f/PyShell.py |
if o == '-e': edit = 1 | if o == '-i': interactive = 1 if o == '-r': script = a | def main(self, argv, noshell): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(argv, "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: noshell = 0 if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if noshell: edit=1 for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) | ddb3416a69f1ba1edc2a9d381df82a5bc051af6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ddb3416a69f1ba1edc2a9d381df82a5bc051af6f/PyShell.py |
elif not edit and args and args[0] != "-": interp.execfile(args[0]) | elif script: if os.path.isfile(script): interp.execfile(script) else: print "No script file: ", script | def main(self, argv, noshell): cmd = None edit = 0 debug = 0 startup = 0 try: opts, args = getopt.getopt(argv, "c:deist:") except getopt.error, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.stderr.write(usage_msg) sys.exit(2) for o, a in opts: noshell = 0 if o == '-c': cmd = a if o == '-d': debug = 1 if o == '-e': edit = 1 if o == '-s': startup = 1 if o == '-t': PyShell.shell_title = a if noshell: edit=1 for i in range(len(sys.path)): sys.path[i] = os.path.abspath(sys.path[i]) pathx = [] if edit: for filename in args: pathx.append(os.path.dirname(filename)) elif args and args[0] != "-": pathx.append(os.path.dirname(args[0])) else: pathx.append(os.curdir) for dir in pathx: dir = os.path.abspath(dir) if not dir in sys.path: sys.path.insert(0, dir) | ddb3416a69f1ba1edc2a9d381df82a5bc051af6f /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/ddb3416a69f1ba1edc2a9d381df82a5bc051af6f/PyShell.py |
def testNtoH(self): for func in socket.htonl, socket.ntohl: for i in (0, 1, ~0xffff, 2L): self.assertEqual(i, func(func(i))) biglong = 2**32L - 1 swapped = func(biglong) self.assert_(swapped == biglong or swapped == -1) self.assertRaises(OverflowError, func, 2L**34) | def testNtoHL(self): sizes = {socket.htonl: 32, socket.ntohl: 32, socket.htons: 16, socket.ntohs: 16} for func, size in sizes.items(): mask = (1L<<size) - 1 for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210): self.assertEqual(i & mask, func(func(i&mask)) & mask) swapped = func(mask) self.assertEqual(swapped & mask, mask) self.assertRaises(OverflowError, func, 1L<<34) | def testNtoH(self): for func in socket.htonl, socket.ntohl: for i in (0, 1, ~0xffff, 2L): self.assertEqual(i, func(func(i))) | 6b23db401df4d30fd8706dc573ddad8a6358351e /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/6b23db401df4d30fd8706dc573ddad8a6358351e/test_socket.py |
line = line.lstrip() | def _ascii_split(self, s, charset, firstline): # Attempt to split the line at the highest-level syntactic break # possible. Note that we don't have a lot of smarts about field # syntax; we just try to break on semi-colons, then whitespace. rtn = [] lines = s.splitlines() while lines: line = lines.pop(0) if firstline: maxlinelen = self._firstlinelen firstline = 0 else: line = line.lstrip() maxlinelen = self._maxlinelen # Short lines can remain unchanged if len(line.replace('\t', SPACE8)) <= maxlinelen: rtn.append(line) else: oldlen = len(line) # Try to break the line on semicolons, but if that doesn't # work, try to split on folding whitespace. while len(line) > maxlinelen: i = line.rfind(';', 0, maxlinelen) if i < 0: break rtn.append(line[:i] + ';') line = line[i+1:] # Is the remaining stuff still longer than maxlinelen? if len(line) <= maxlinelen: # Splitting on semis worked rtn.append(line) continue # Splitting on semis didn't finish the job. If it did any # work at all, stick the remaining junk on the front of the # `lines' sequence and let the next pass do its thing. if len(line) <> oldlen: lines.insert(0, line) continue # Otherwise, splitting on semis didn't help at all. parts = re.split(r'(\s+)', line) if len(parts) == 1 or (len(parts) == 3 and parts[0].endswith(':')): # This line can't be split on whitespace. There's now # little we can do to get this into maxlinelen. BAW: # We're still potentially breaking the RFC by possibly # allowing lines longer than the absolute maximum of 998 # characters. For now, let it slide. # # len(parts) will be 1 if this line has no `Field: ' # prefix, otherwise it will be len(3). rtn.append(line) continue # There is whitespace we can split on. first = parts.pop(0) sublines = [first] acc = len(first) while parts: len0 = len(parts[0]) len1 = len(parts[1]) if acc + len0 + len1 <= maxlinelen: sublines.append(parts.pop(0)) sublines.append(parts.pop(0)) acc += len0 + len1 else: # Split it here, but don't forget to ignore the # next whitespace-only part if first <> '': rtn.append(EMPTYSTRING.join(sublines)) del parts[0] first = parts.pop(0) sublines = [first] acc = len(first) rtn.append(EMPTYSTRING.join(sublines)) return [(chunk, charset) for chunk in rtn] | a2155165b8ce7df387780260143148938c957947 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/a2155165b8ce7df387780260143148938c957947/Header.py |
|
sentence_end = ".?!" | sentence_end_re = re.compile(r'[%s]' r'[\.\!\?]' r'[\"\']?' % string.lowercase) | def islower (c): return c in string.lowercase | bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23/textwrap.py |
punct = self.sentence_end | pat = self.sentence_end_re | def _fix_sentence_endings (self, chunks): """_fix_sentence_endings(chunks : [string]) | bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23/textwrap.py |
if (chunks[i][-1] in punct and chunks[i+1] == " " and islower(chunks[i][-2])): | if chunks[i+1] == " " and pat.search(chunks[i]): | def _fix_sentence_endings (self, chunks): """_fix_sentence_endings(chunks : [string]) | bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/bce52ce3b3a50ba95e694d6e6b4dee7a5d5a0f23/textwrap.py |
dump(vars(values), write) | dump({'faultCode': values.faultCode, 'faultString': values.faultString}, write) | def dumps(self, values): out = [] write = out.append dump = self.__dump if isinstance(values, Fault): # fault instance write("<fault>\n") dump(vars(values), write) write("</fault>\n") else: # parameter block # FIXME: the xml-rpc specification allows us to leave out # the entire <params> block if there are no parameters. # however, changing this may break older code (including # old versions of xmlrpclib.py), so this is better left as # is for now. See @XMLRPC3 for more information. /F write("<params>\n") for v in values: write("<param>\n") dump(v, write) write("</param>\n") write("</params>\n") result = string.join(out, "") return result | fe46f7a5e71ba14dd7cf2188753cf578529a07dd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/fe46f7a5e71ba14dd7cf2188753cf578529a07dd/xmlrpclib.py |
print "self.bdist_dir = %s" % self.bdist_dir print "self.format = %s" % self.format | def run (self): | 68b0de56b32b51627aa364331aa7b37c6a93b3c5 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/68b0de56b32b51627aa364331aa7b37c6a93b3c5/bdist_dumb.py |
|
do_char(c, event) | do_char(c, event) | def do_key(self, event): (what, message, when, where, modifiers) = event c = chr(message & charCodeMask) if modifiers & cmdKey: if c == '.': raise self else: result = MenuKey(ord(c)) id = (result>>16) & 0xffff # Hi word item = result & 0xffff # Lo word if id: self.do_rawmenu(id, item, None, event) | 53cf06757b8d3c57f571e4792fd0e0ccac9de69c /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/53cf06757b8d3c57f571e4792fd0e0ccac9de69c/FrameWork.py |
args = args + to | if to[0] == '-to': to = to[1:] args = args + ('-to',) + tuple(to) | def put(self, data, to=None): args = (self.name, 'put', data) if to: args = args + to apply(self.tk.call, args) | f59254c5479eb2cf959502b211c95ed8d21cd76b /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f59254c5479eb2cf959502b211c95ed8d21cd76b/Tkinter.py |
p = subprocess.Popen([sys.executable, "-c", "import os; os.abort()"]) | old_limit = self._suppress_core_files() try: p = subprocess.Popen([sys.executable, "-c", "import os; os.abort()"]) finally: self._unsuppress_core_files(old_limit) | def test_run_abort(self): # returncode handles signal termination p = subprocess.Popen([sys.executable, "-c", "import os; os.abort()"]) p.wait() self.assertEqual(-p.returncode, signal.SIGABRT) | 4633b74048f94164f8ab26e90df035094f59dadd /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/4633b74048f94164f8ab26e90df035094f59dadd/test_subprocess.py |
self._tk.deletecommand(cbname) | self._master.deletecommand(cbname) | def trace_vdelete(self, mode, cbname): self._tk.call("trace", "vdelete", self._name, mode, cbname) self._tk.deletecommand(cbname) | 2c2861fe5ce4abc92f5446142fe71f31bd18eccf /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/2c2861fe5ce4abc92f5446142fe71f31bd18eccf/Tkinter.py |
self._preprocess(body, headers, lang) | self._preprocess(body, headers, include_dirs, lang) | def try_cpp (self, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file from 'body' (a string containing lines of C/C++ code) and 'headers' (a list of header files to include) and run it through the preprocessor. Return true if the preprocessor succeeded, false if there were any errors. ('body' probably isn't of much use, but what the heck.) """ from distutils.ccompiler import CompileError self._check_compiler() ok = 1 try: self._preprocess(body, headers, lang) except CompileError: ok = 0 | 57e7661ae761a48e4f9827701bb2df3fb3dc94f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/57e7661ae761a48e4f9827701bb2df3fb3dc94f4/config.py |
(src, out) = self._preprocess(body, headers, lang) | (src, out) = self._preprocess(body, headers, include_dirs, lang) | def search_cpp (self, pattern, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file (just like 'try_cpp()'), run it through the preprocessor, and return true if any line of the output matches 'pattern'. 'pattern' should either be a compiled regex object or a string containing a regex. If both 'body' and 'headers' are None, preprocesses an empty file -- which can be useful to determine the symbols the preprocessor and compiler set by default. """ | 57e7661ae761a48e4f9827701bb2df3fb3dc94f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/57e7661ae761a48e4f9827701bb2df3fb3dc94f4/config.py |
if pattern.search(pattern): | if pattern.search(line): | def search_cpp (self, pattern, body=None, headers=None, include_dirs=None, lang="c"): """Construct a source file (just like 'try_cpp()'), run it through the preprocessor, and return true if any line of the output matches 'pattern'. 'pattern' should either be a compiled regex object or a string containing a regex. If both 'body' and 'headers' are None, preprocesses an empty file -- which can be useful to determine the symbols the preprocessor and compiler set by default. """ | 57e7661ae761a48e4f9827701bb2df3fb3dc94f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/57e7661ae761a48e4f9827701bb2df3fb3dc94f4/config.py |
self._compile(body, headers, lang) | self._compile(body, headers, include_dirs, lang) | def try_compile (self, body, headers=None, include_dirs=None, lang="c"): """Try to compile a source file built from 'body' and 'headers'. Return true on success, false otherwise. """ from distutils.ccompiler import CompileError self._check_compiler() try: self._compile(body, headers, lang) ok = 1 except CompileError: ok = 0 | 57e7661ae761a48e4f9827701bb2df3fb3dc94f4 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/57e7661ae761a48e4f9827701bb2df3fb3dc94f4/config.py |
exts.append( Extension('audioop', ['audioop.c']) ) | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') | 37b50db622ccbd8c27e6b6cd7d932dab367056d0 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/37b50db622ccbd8c27e6b6cd7d932dab367056d0/setup.py |
|
__version__ = " | __version__ = " | def testMultiply(self): self.assertEquals((0 * 10), 0) self.assertEquals((5 * 8), 40) | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
Note that decimal places (from zero) is usually not the same | Note that decimal places (from zero) are usually not the same | def failUnlessAlmostEqual(self, first, second, places=7, msg=None): """Fail if the two objects are unequal as determined by their difference rounded to the given number of decimal places (default 7) and comparing to zero. | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
import unittest | def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
|
issubclass(obj, unittest.TestCase)): | issubclass(obj, TestCase)): | def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
elif isinstance(obj, unittest.TestSuite): | elif isinstance(obj, TestSuite): | def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
if not isinstance(test, (unittest.TestCase, unittest.TestSuite)): | if not isinstance(test, (TestCase, TestSuite)): | def loadTestsFromName(self, name, module=None): """Return a suite of all tests cases given a string specifier. | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
timeTaken = float(stopTime - startTime) | timeTaken = stopTime - startTime | def run(self, test): "Run the given test case or test suite." result = self._makeResult() startTime = time.time() test(result) stopTime = time.time() timeTaken = float(stopTime - startTime) result.printErrors() self.stream.writeln(result.separator2) run = result.testsRun self.stream.writeln("Ran %d test%s in %.3fs" % (run, run != 1 and "s" or "", timeTaken)) self.stream.writeln() if not result.wasSuccessful(): self.stream.write("FAILED (") failed, errored = map(len, (result.failures, result.errors)) if failed: self.stream.write("failures=%d" % failed) if errored: if failed: self.stream.write(", ") self.stream.write("errors=%d" % errored) self.stream.writeln(")") else: self.stream.writeln("OK") return result | 99c020b458dc7210f2c230a25c6760727a00cc4d /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/99c020b458dc7210f2c230a25c6760727a00cc4d/unittest.py |
def cmp(f1, f2, shallow=1, use_statcache=0): | def cmp(f1, f2, shallow=1, use_statcache=None): | def cmp(f1, f2, shallow=1, use_statcache=0): """Compare two files. Arguments: f1 -- First file name f2 -- Second file name shallow -- Just check stat signature (do not read the files). defaults to 1. use_statcache -- obsolete argument. Return value: True if the files are the same, False otherwise. This function uses a cache for past comparisons and the results, with a cache invalidation mechanism relying on stale signatures. """ s1 = _sig(os.stat(f1)) s2 = _sig(os.stat(f2)) if s1[0] != stat.S_IFREG or s2[0] != stat.S_IFREG: return False if shallow and s1 == s2: return True if s1[1] != s2[1]: return False result = _cache.get((f1, f2)) if result and (s1, s2) == result[:2]: return result[2] outcome = _do_cmp(f1, f2) _cache[f1, f2] = s1, s2, outcome return outcome | c3a56c3f904f4e8e334426abe8f0e9de1635b125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c3a56c3f904f4e8e334426abe8f0e9de1635b125/filecmp.py |
def cmpfiles(a, b, common, shallow=1, use_statcache=0): | def cmpfiles(a, b, common, shallow=1, use_statcache=None): | def cmpfiles(a, b, common, shallow=1, use_statcache=0): """Compare common files in two directories. a, b -- directory names common -- list of file names found in both directories shallow -- if true, do comparison based solely on stat() information use_statcache -- obsolete argument Returns a tuple of three lists: files that compare equal files that are different filenames that aren't regular files. """ res = ([], [], []) for x in common: ax = os.path.join(a, x) bx = os.path.join(b, x) res[_cmp(ax, bx, shallow)].append(x) return res | c3a56c3f904f4e8e334426abe8f0e9de1635b125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c3a56c3f904f4e8e334426abe8f0e9de1635b125/filecmp.py |
raise getopt.error, 'need exactly two args' | raise getopt.GetoptError('need exactly two args', None) | def demo(): import sys import getopt options, args = getopt.getopt(sys.argv[1:], 'r') if len(args) != 2: raise getopt.error, 'need exactly two args' dd = dircmp(args[0], args[1]) if ('-r', '') in options: dd.report_full_closure() else: dd.report() | c3a56c3f904f4e8e334426abe8f0e9de1635b125 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/c3a56c3f904f4e8e334426abe8f0e9de1635b125/filecmp.py |
if self.optmize > 0: | if self.optimize > 0: | def _bytecode_filenames (self, py_filenames): bytecode_files = [] for py_file in py_filenames: if self.compile: bytecode_files.append(py_file + "c") if self.optmize > 0: bytecode_files.append(py_file + "o") | b1b6be8dc2262bce706e70372f90bff62f0ab960 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/b1b6be8dc2262bce706e70372f90bff62f0ab960/install_lib.py |
tmp_file.close() | _sync_close(tmp_file) | def add(self, message): """Add message and return assigned key.""" tmp_file = self._create_tmp() try: self._dump_message(message, tmp_file) finally: tmp_file.close() if isinstance(message, MaildirMessage): subdir = message.get_subdir() suffix = self.colon + message.get_info() if suffix == self.colon: suffix = '' else: subdir = 'new' suffix = '' uniq = os.path.basename(tmp_file.name).split(self.colon)[0] dest = os.path.join(self._path, subdir, uniq + suffix) os.rename(tmp_file.name, dest) if isinstance(message, MaildirMessage): os.utime(dest, (os.path.getatime(dest), message.get_date())) return uniq | f1a9be4f2b606215ccf96e00683a2a8c9771cf75 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f1a9be4f2b606215ccf96e00683a2a8c9771cf75/mailbox.py |
new_file.close() | _sync_close(new_file) | def flush(self): """Write any pending changes to disk.""" if not self._pending: return self._lookup() new_file = _create_temporary(self._path) try: new_toc = {} self._pre_mailbox_hook(new_file) for key in sorted(self._toc.keys()): start, stop = self._toc[key] self._file.seek(start) self._pre_message_hook(new_file) new_start = new_file.tell() while True: buffer = self._file.read(min(4096, stop - self._file.tell())) if buffer == '': break new_file.write(buffer) new_toc[key] = (new_start, new_file.tell()) self._post_message_hook(new_file) except: new_file.close() os.remove(new_file.name) raise new_file.close() self._file.close() try: os.rename(new_file.name, self._path) except OSError, e: if e.errno == errno.EEXIST or \ (os.name == 'os2' and e.errno == errno.EACCES): os.remove(self._path) os.rename(new_file.name, self._path) else: raise self._file = open(self._path, 'rb+') self._toc = new_toc self._pending = False if self._locked: _lock_file(self._file, dotlock=False) | f1a9be4f2b606215ccf96e00683a2a8c9771cf75 /local1/tlutelli/issta_data/temp/all_python//python/2006_temp/2006/12029/f1a9be4f2b606215ccf96e00683a2a8c9771cf75/mailbox.py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.