rem
stringlengths 1
322k
| add
stringlengths 0
2.05M
| context
stringlengths 4
228k
| meta
stringlengths 156
215
|
---|---|---|---|
timings.append(float(l[3]) / links) sizes.append(int(l[1])) | readdata.append( (int(l[1]),float(l[3]) / 2 ) ); sorteddata = sorted( readdata, key=lambda pair: pair[0]) sizes,timings = zip(*sorteddata); | def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth) | f08c51969b477f4df715e6ed41846f01e52450a7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7090/f08c51969b477f4df715e6ed41846f01e52450a7/calibrate_piecewise.py |
for i in xrange(5, len(sys.argv)): limits += [idx for idx in xrange(len(sizes)) if sizes[idx] == int(sys.argv[i])] | for i in range(5, len(sys.argv)): limits += [idx for idx in range(len(sizes)) if sizes[idx] == int(sys.argv[i])] | def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth) | f08c51969b477f4df715e6ed41846f01e52450a7 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/7090/f08c51969b477f4df715e6ed41846f01e52450a7/calibrate_piecewise.py |
requires=("gtk (>=2.12.0)",), | requires=("gtk",), | def run(self): """Build tarballs and create additional files.""" if os.path.isfile("ChangeLog"): os.remove("ChangeLog") os.system("tools/generate-change-log > ChangeLog") assert os.path.isfile("ChangeLog") assert open("ChangeLog", "r").read().strip() distutils.command.sdist.sdist.run(self) basename = "nfoview-%s" % self.__version tarballs = os.listdir(self.dist_dir) os.chdir(self.dist_dir) # Compare tarball contents with working copy. temp_dir = tempfile.gettempdir() test_dir = os.path.join(temp_dir, basename) tobj = tarfile.open(tarballs[-1], "r") for member in tobj.getmembers(): tobj.extract(member, temp_dir) log.info("comparing tarball (tmp) with working copy (../..)") os.system('diff -qr -x ".*" -x "*.pyc" ../.. %s' % test_dir) response = raw_input("Are all files in the tarball [Y/n]? ") if response.lower() == "n": raise SystemExit("Must edit MANIFEST.in.") shutil.rmtree(test_dir) # Create extra distribution files. log.info("calculating md5sums") os.system("md5sum * > %s.md5sum" % basename) log.info("creating '%s.changes'" % basename) source = os.path.join("..", "..", "ChangeLog") shutil.copyfile(source, "%s.changes" % basename) log.info("creating '%s.news'" % basename) source = os.path.join("..", "..", "NEWS") shutil.copyfile(source, "%s.news" % basename) for tarball in tarballs: log.info("signing '%s'" % tarball) os.system("gpg --detach %s" % tarball) | 84d1e57549ff73825a449449e53abedf7465ac36 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12936/84d1e57549ff73825a449449e53abedf7465ac36/setup.py |
while not lines[-1]: | while lines and not lines[-1]: | def _read_file(self, path, encoding=None): """Read and return the text of the NFO file. | ce9efb6872590104a2f7f7574e24d2eaea98de3a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12936/ce9efb6872590104a2f7f7574e24d2eaea98de3a/window.py |
if subprocess.call(command) != 0: | if os.system(command) != 0: | def run_command_or_exit(command): """Run command in shell and raise SystemExit if it fails.""" if subprocess.call(command) != 0: log.error("command %s failed" % repr(command)) raise SystemExit(1) | 4bd687a6e88966ddc13a25b600ad1aae8296a8d2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12936/4bd687a6e88966ddc13a25b600ad1aae8296a8d2/setup.py |
if subprocess.call(command) != 0: | if os.system(command) != 0: | def run_command_or_warn(command): """Run command in shell and raise SystemExit if it fails.""" if subprocess.call(command) != 0: log.warn("command %s failed" % repr(command)) | 4bd687a6e88966ddc13a25b600ad1aae8296a8d2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12936/4bd687a6e88966ddc13a25b600ad1aae8296a8d2/setup.py |
run_command_or_warn(("update-desktop-database", directory)) | run_command_or_warn('update-desktop-database "%s"' % directory) | def run(self): """Install everything and update the desktop file database.""" install.run(self) get_command_obj = self.distribution.get_command_obj root = get_command_obj("install").root data_dir = get_command_obj("install_data").install_dir # Assume we're actually installing if --root was not given. if (root is not None) or (data_dir is None): return directory = os.path.join(data_dir, "share", "applications") log.info("updating desktop database in '%s'" % directory) run_command_or_warn(("update-desktop-database", directory)) | 4bd687a6e88966ddc13a25b600ad1aae8296a8d2 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12936/4bd687a6e88966ddc13a25b600ad1aae8296a8d2/setup.py |
def calc_cross_points(self,cross=None): sizes = self.sizes() hasc = self.houses[0] nnode = self.planets[10] h = 0 hn = self.which_house(nnode) while hn > h: #if h == 0 and hn == 1: if hn - h == 1 and hn < self.which_house((nnode - 30) % 360): break h = (h+1)%12 hasc = self.houses[h] nnode = (nnode - 30) % 360 hn = self.which_house(nnode) dist = nnode - hasc | a2e9e3ce03a8174ea34d5c6bfcb84a7af30e5217 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/2637/a2e9e3ce03a8174ea34d5c6bfcb84a7af30e5217/chart.py |
||
self.original.tap( x=x, y=y, z=self.z2(z), self.z2(zretract), depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction) | self.original.tap( x, y, self.z2(z), self.z2(zretract), depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction) | def tap(self, x=None, y=None, z=None, zretract=None, depth=None, standoff=None, dwell_bottom=None, pitch=None, stoppos=None, spin_in=None, spin_out=None, tap_mode=None, direction=None): self.original.tap( x=x, y=y, z=self.z2(z), self.z2(zretract), depth, standoff, dwell_bottom, pitch, stoppos, spin_in, spin_out, tap_mode, direction) | 119350e6230023dd559f4c554e1b9d5d3b7e8e84 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/119350e6230023dd559f4c554e1b9d5d3b7e8e84/attach.py |
move = False | no_move = True | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
move = False | no_move = True | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
drill = True; move = False; path_col = "feed"; col = "feed"; | drill = True no_move = True path_col = "feed" col = "feed" | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
move = False; | no_move = True | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
drill = True; move = False; | drill = True no_move = True | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
if (move): | if (move and not no_move): | def Parse(self, name, oname=None): self.files_open(name,oname) #self.begin_ncblock() #self.begin_path(None) #self.add_line(z=500) #self.end_path() #self.end_ncblock() path_col = None f = None arc = 0 | 9819f19aa4c4ab39b057b35d4cd7e3ad69069bae /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/9819f19aa4c4ab39b057b35d4cd7e3ad69069bae/iso_read.py |
if direction == "on": return if roll_on == None: return | if direction == "on": roll_on = None | def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy) | 14f1224859a258975d450c6a21374ef10834e897 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/14f1224859a258975d450c6a21374ef10834e897/kurve_funcs.py |
if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) | sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if roll_on == None: rollstartx = sx rollstarty = sy elif roll_on == 'auto': | def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy) | 14f1224859a258975d450c6a21374ef10834e897 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/14f1224859a258975d450c6a21374ef10834e897/kurve_funcs.py |
rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir | rollstartx, rollstarty = roll_on if sx == rollstartx and sy == rollstarty: rdir = 0 rcx = 0 rcy = 0 else: vx, vy = kurve.get_span_dir(k, 0, 0) rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir | def add_roll_on(k, roll_on_k, direction, roll_radius, offset_extra, roll_on): if direction == "on": return if roll_on == None: return num_spans = kurve.num_spans(k) if num_spans == 0: return if roll_on == 'auto': sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction if direction == 'right': off_vx = vy off_vy = -vx else: off_vx = -vy off_vy = vx rollstartx = sx + off_vx * roll_radius - vx * roll_radius rollstarty = sy + off_vy * roll_radius - vy * roll_radius else: rollstartx, rollstarty = roll_on sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) if sx == rollstartx and sy == rollstarty: return vx, vy = kurve.get_span_dir(k, 0, 0) # get start direction rcx, rcy, rdir = kurve.tangential_arc(sx, sy, -vx, -vy, rollstartx, rollstarty) rdir = -rdir # because the tangential_arc was used in reverse # add a start roll on point kurve.add_point(roll_on_k, 0, rollstartx, rollstarty, 0, 0) # add the roll on arc kurve.add_point(roll_on_k, rdir, sx, sy, rcx, rcy) # add the start of the original kurve sp, sx, sy, ex, ey, cx, cy = kurve.get_span(k, 0) kurve.add_point(roll_on_k, sp, ex, ey, cx, cy) | 14f1224859a258975d450c6a21374ef10834e897 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/14f1224859a258975d450c6a21374ef10834e897/kurve_funcs.py |
while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r bottom_y = math.cos(loop_count * math.pi / 180) * bottom_r | while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r bottom_y = math.cos(loop_count * math.pi / 180) * bottom_r | def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
feed(x=(x_cen + top_x), y=(y_cen + top_y), z=(z_cen)) feed(x=(x_cen + bottom_x), y=(y_cen + bottom_y), z=(z_cen - depth)) feed(z=(z_cen)) loop_count = loop_count + (360 / step_count) | feed(x=(x_cen + top_x), y=(y_cen + top_y), z=(z_cen)) feed(x=(x_cen + bottom_x), y=(y_cen + bottom_y), z=(z_cen - depth)) feed(z=(z_cen)) loop_count = loop_count + (360 / step_count) | def cutcone(x_cen, y_cen, z_cen, top_r, bottom_r, depth, step_over): if top_r >= bottom_r: step_count = math.pi * top_r * 2 / step_over else: step_count = math.pi * bottom_r * 2 / step_over loop_count = 0 while (loop_count < 360): top_x = math.sin(loop_count * math.pi / 180) * top_r top_y = math.cos(loop_count * math.pi / 180) * top_r bottom_x = math.sin(loop_count * math.pi / 180) * bottom_r | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
tool_r = tooldiameter / 2 top_r = diameter / 2 | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
|
comment('tool change') tool_change(id=tool_id) spindle(spindle_speed) feedrate_hv(horizontal_feedrate, vertical_feedrate) | tool_r = tooldiameter / 2 top_r = diameter / 2 | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
bottom_r = top_r - (math.tan(angle * math.pi / 180) * depth) | comment('tool change') tool_change(id=tool_id) spindle(spindle_speed) feedrate_hv(horizontal_feedrate, vertical_feedrate) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
if top_r >= bottom_r: top_r = top_r - tool_r bottom_r = bottom_r - tool_r if top_r < bottom_r: top_r = top_r + tool_r bottom_r = bottom_r + tool_r | bottom_r = top_r - (math.tan(angle * math.pi / 180) * depth) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
if bottom_r < 0: bottom_r = bottom_r * -1 depth = depth - (bottom_r / math.tan(angle * math.pi / 180)) bottom_r = 0 | if top_r >= bottom_r: top_r = top_r - tool_r bottom_r = bottom_r - tool_r if top_r < bottom_r: top_r = top_r + tool_r bottom_r = bottom_r + tool_r | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
no better idea) cone_feed = (step_down / math.tan(angle * math.pi / 180)) if angle < 0 : cone_feed = cone_feed * -1 flush_nc() | if bottom_r < 0: bottom_r = bottom_r * -1 depth = depth - (bottom_r / math.tan(angle * math.pi / 180)) bottom_r = 0 | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
rapid(x=(x_cen + bottom_r), y=y_cen) rapid(z=z_safe) | cone_feed = (step_down / math.tan(angle * math.pi / 180)) if angle < 0 : cone_feed = cone_feed * -1 flush_nc() | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
loop_feed = 0 while(loop_feed < depth): loop_feed = loop_feed + step_down if loop_feed >= depth: feed(z=(z_cen - depth)) else: feed(z=(z_cen - loop_feed)) arc_ccw(x=(x_cen - bottom_r), y=y_cen, i= -bottom_r, j=0) arc_ccw(x=(x_cen + bottom_r), y=y_cen, i=bottom_r, j=0) feed(z=z_cen) | rapid(x=(x_cen + bottom_r), y=y_cen) rapid(z=z_safe) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
loop_feed = 0 while(loop_feed < depth): loop_feed = loop_feed + cone_feed if loop_feed >= depth: temp_depth = depth else: temp_depth = loop_feed temp_top_r = bottom_r + (math.tan(angle * math.pi / 180) * temp_depth) | loop_feed = 0 while(loop_feed < depth): loop_feed = loop_feed + step_down if loop_feed >= depth: feed(z=(z_cen - depth)) else: feed(z=(z_cen - loop_feed)) arc_ccw(x=(x_cen - bottom_r), y=y_cen, i= -bottom_r, j=0) arc_ccw(x=(x_cen + bottom_r), y=y_cen, i=bottom_r, j=0) feed(z=z_cen) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
cutcone(x_cen, y_cen, z_cen, temp_top_r, bottom_r, temp_depth, step_over) | loop_feed = 0 while(loop_feed < depth): loop_feed = loop_feed + cone_feed if loop_feed >= depth: temp_depth = depth else: temp_depth = loop_feed temp_top_r = bottom_r + (math.tan(angle * math.pi / 180) * temp_depth) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
rapid(z=z_safe) | cutcone(x_cen, y_cen, z_cen, temp_top_r, bottom_r, temp_depth, step_over) rapid(z=z_safe) | def cone(x_cen, y_cen, z_cen, tool_id, tooldiameter, spindle_speed, horizontal_feedrate, vertical_feedrate, depth, diameter, angle, z_safe, step_over, step_down): | 7b1f7adcc50a8d0dbd3214cd964d89735652e76a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/7b1f7adcc50a8d0dbd3214cd964d89735652e76a/cut-cone.py |
else: self.write(iso_codes.codes.PECK_DRILL() + iso_codes.codes.PECK_DEPTH(self.fmt, peck_depth)) | else: self.write(iso_codes.codes.PECK_DRILL() + iso_codes.codes.PECK_DEPTH(self.fmt, peck_depth)) | def drill(self, x=None, y=None, z=None, depth=None, standoff=None, dwell=None, peck_depth=None, retract_mode=None, spindle_mode=None): | 847e2e14f5c9abb2b2dff83168323e3bf56b2e66 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/847e2e14f5c9abb2b2dff83168323e3bf56b2e66/emc2.py |
def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): | def cut_curve(curve, need_rapid, p, rapid_down_to_height, current_start_depth, final_depth): | def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p | ec3698dde3d110a186cf4acac001834823044738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/ec3698dde3d110a186cf4acac001834823044738/area_funcs.py |
rapid(z = rapid_down_to_height) | rapid(z = current_start_depth + rapid_down_to_height) | def cut_curve(curve, need_rapid, p, rapid_down_to_height, final_depth): prev_p = p first = True for vertex in curve.getVertices(): if need_rapid and first: # rapid across rapid(vertex.p.x, vertex.p.y) ##rapid down rapid(z = rapid_down_to_height) #feed down feed(z = final_depth) first = False else: dc = vertex.c - prev_p if vertex.type == 1: arc_ccw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) elif vertex.type == -1: arc_cw(vertex.p.x, vertex.p.y, i = dc.x, j = dc.y) else: feed(vertex.p.x, vertex.p.y) prev_p = vertex.p return prev_p | ec3698dde3d110a186cf4acac001834823044738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/ec3698dde3d110a186cf4acac001834823044738/area_funcs.py |
def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): | def cut_curvelist(curve_list, rapid_down_to_height, current_start_depth, depth, clearance_height, keep_tool_down_if_poss): | def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) first = False rapid(z = clearance_height) | ec3698dde3d110a186cf4acac001834823044738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/ec3698dde3d110a186cf4acac001834823044738/area_funcs.py |
p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) | p = cut_curve(curve, need_rapid, p, rapid_down_to_height, current_start_depth, depth) | def cut_curvelist(curve_list, rapid_down_to_height, depth, clearance_height, keep_tool_down_if_poss): p = area.Point(0, 0) first = True for curve in curve_list: need_rapid = True if first == False: s = curve.FirstVertex().p if keep_tool_down_if_poss == True: # see if we can feed across if feed_possible(p, s): need_rapid = False elif s.x == p.x and s.y == p.y: need_rapid = False if need_rapid: rapid(z = clearance_height) p = cut_curve(curve, need_rapid, p, rapid_down_to_height, depth) first = False rapid(z = clearance_height) | ec3698dde3d110a186cf4acac001834823044738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/12576/ec3698dde3d110a186cf4acac001834823044738/area_funcs.py |
flunkOnFailure=False, | def tools_run_tests(self): self.addStep(ShellCommand( workdir='tools/release/signing', command=['python', 'tests.py'], name='release_signing_tests', )) self.addStep(ShellCommand( workdir='tools/lib/python', env={'PYTHONPATH': WithProperties('%(topdir)s/tools/lib/python')}, name='run_lib_nosetests', command=['nosetests'], )) self.addStep(ShellCommand( workdir='tools/clobberer', flunkOnFailure=False, name='run_clobbberer_test', command=['python', 'test_clobberer.py', 'http://preproduction-master.build.mozilla.org/~cltbld/index.php', '/home/cltbld/public_html/db/clobberer.db'], )) | 24f5a0dc5ba11a78d8f4906aa6e63913c2e89731 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/24f5a0dc5ba11a78d8f4906aa6e63913c2e89731/preproduction_factory.py |
|
./bin/pip install Twisted || exit 1; | ./bin/pip install Twisted==10.1.0 || exit 1; | def createSummary(self, log): self.parent_class.createSummary(self, log) key = 'pylint-%s' % self.project if not self.build.getProperties().has_key(key): self.setProperty(key, {}) props = self.getProperty(key) for msg, fullmsg in self.MESSAGES.items(): props[fullmsg] = self.getProperty('pylint-%s' % fullmsg) props['total'] = self.getProperty('pylint-total') | 3360a811d983129df06a4ed476b6517c6ed98dfc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/3360a811d983129df06a4ed476b6517c6ed98dfc/preproduction_factory.py |
self.addStep(RemovePYCs(workdir=".")) | self.addStep(ShellCommand( name='rm_pyc', command=['find', '.', '-name', '*.pyc', '-exec', 'rm', '-fv', '{}', ';'], workdir=".", )) | def __init__(self, hgHost, **kwargs): self.parent_class = BuildFactory self.parent_class.__init__(self, **kwargs) self.hgHost = hgHost self.addStep(SetProperty(name='set_topdir', command=['pwd'], property='topdir', workdir='.', )) self.addStep(RemovePYCs(workdir=".")) | 3360a811d983129df06a4ed476b6517c6ed98dfc /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/3360a811d983129df06a4ed476b6517c6ed98dfc/preproduction_factory.py |
def __init__(self, hgHost, **kwargs): self.parent_class = BuildFactory self.parent_class.__init__(self, **kwargs) #self.addFactoryArguments(hgHost=hgHost) self.hgHost = hgHost self.addStep(SetProperty(name='set_topdir', command=['pwd'], property='topdir', workdir='.', )) self.addStep(ShellCommand(name='rm_pyc', command=['bash', '-c', 'find . -name "*.pyc" -exec rm -f {} ";"'], workdir=".", )) | 185d9d2a198fecc1a9a65320717bb1db12a6bd52 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/185d9d2a198fecc1a9a65320717bb1db12a6bd52/preproduction_factory.py |
||
'PYTHONPATH': WithProperties('%(topdir)s'), | 'PYTHONPATH': WithProperties('%(topdir)s:%(topdir)s/tools/lib/python'), | def test_masters(self): self.addStep(ShellCommand(name='test_masters', command=['./test-masters.sh', '-8'], env = { 'PYTHONPATH': WithProperties('%(topdir)s'), 'PATH': WithProperties('%(topdir)s/sandbox/bin:/bin:/usr/bin'), }, workdir="buildbot-configs", )) | 185d9d2a198fecc1a9a65320717bb1db12a6bd52 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/185d9d2a198fecc1a9a65320717bb1db12a6bd52/preproduction_factory.py |
flunkOnFailure=False, | def tools_pylint(self): # TODO: move pylintrc to tools self.addStep(PyLintExtended( command='../../../sandbox/bin/pylint --rcfile=../../.pylintrc *', workdir='tools/lib/python', flunkOnFailure=False, name='tools_lib_pylint', project='tools_lib', )) self.addStep(PyLintExtended( command='find buildbot-helpers buildfarm \ clobberer release stage \ -name \'*.py\' -type f -print0 | \ xargs -0 ../sandbox/bin/pylint \ --rcfile=.pylintrc', workdir="tools", env = {'PYTHONPATH': WithProperties('%(topdir)s:%(topdir)s/tools/lib/python')}, flunkOnFailure=False, name='tools_scripts_pylint', project='tools_scripts', )) | b26538652f371234249676f23f4a9a3bab4a5156 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/13112/b26538652f371234249676f23f4a9a3bab4a5156/preproduction_factory.py |
|
else: values[field] = getattr(self, field) values['id'] = self.id return values def _on_change_args(self, args): res = {} values = {} for field, definition in self._fields.iteritems(): if definition['type'] in ('one2many', 'many2many'): values[field] = [x._get_eval() for x in getattr(self, field)] | def _get_eval(self): values = {} for field, definition in self._fields.iteritems(): if definition['type'] in ('one2many', 'many2many'): values[field] = [x.id for x in getattr(self, field) or []] else: values[field] = getattr(self, field) values['id'] = self.id return values | 39c1c7f7309fe3bdb4aa869b46297cdb28b3986d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/9150/39c1c7f7309fe3bdb4aa869b46297cdb28b3986d/__init__.py |
|
setting.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), | settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), | def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( setting.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) } }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) | 6c450d9787ea656fd345ef6dc1ad51ec3071a894 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/6c450d9787ea656fd345ef6dc1ad51ec3071a894/gennyd.py |
snr = float(signal) / float(noise) | snr = float(signal) - float(noise) | def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Wireless: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Treat missing firmware version file as NULL version if n.firmware_version == "missing": n.firmware_version = None # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.project.captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check node's multicast rate if 'mcast_rate' in info['wifi']: rate = safe_int_convert(info['wifi']['mcast_rate']) if rate != 5500: NodeWarning.create(n, WarningCode.McastRateMismatch, EventSource.Monitor) # Check node's wifi bitrate, level and noise if 'signal' in info['wifi']: bitrate = safe_int_convert(info['wifi']['bitrate']) signal = safe_dbm_convert(info['wifi']['signal']) noise = safe_dbm_convert(info['wifi']['noise']) snr = float(signal) / float(noise) add_graph(n, '', GraphType.WifiBitrate, RRAWifiBitrate, 'Wifi Bitrate', 'wifibitrate', bitrate) add_graph(n, '', GraphType.WifiSignalNoise, RRAWifiSignalNoise, 'Wifi Signal/Noise', 'wifisignalnoise', signal, noise) add_graph(n, '', GraphType.WifiSNR, RRAWifiSNR, 'Wifi Signal/Noise Ratio', 'wifisnr', snr) # Generate a graph for number of clients if 'nds' in info: add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 2facf9bd4088b9a0749529cfaa5a4e1b8f40828c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/2facf9bd4088b9a0749529cfaa5a4e1b8f40828c/monitor.py |
self.addService('S35', 'misc') self.addService('K35', 'misc') | self.addService('S46', 'misc') self.addService('K46', 'misc') | def __init__(self): """ Class constructor. """ NodeConfig.__init__(self) # Add some basic services self.addService('S35', 'misc') self.addService('K35', 'misc') | d7a79e468486423d05dec3ad4c1584ccc0901738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/d7a79e468486423d05dec3ad4c1584ccc0901738/config_generator.py |
f.write('START=35') f.write('\n') f.write('STOP=35') f.write('\n') | f.write('START=46\n') f.write('STOP=46\n') | def __generateMiscScript(self, f): f.write('#!/bin/sh /etc/rc.common\n') f.write('START=35') f.write('\n') f.write('STOP=35') f.write('\n') f.write('start() {\n') # Prevent the time from reseting to far into the past t = datetime.today() f.write('\tif [ ! -f /etc/datetime.save ]; then\n') f.write('\t echo -n "%02d%02d%02d%02d%04d" > /etc/datetime.save\n' % (t.month, t.day, t.hour, t.minute, t.year)) f.write('\tfi\n') f.write('\tDT=`cat /etc/datetime.save`\n') f.write('\tdate $DT\n') f.write('\n') # Allow txtinfo access when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('\tiptables -A INPUT -p tcp --dport 2006 -j ACCEPT\n') f.write('\n') # Set boot_wait to on if it is not set f.write('\tif [ -x /usr/sbin/nvram ]; then\n') f.write('\t\tBOOT_WAIT=`nvram get boot_wait`\n') f.write('\t\t[ "$BOOT_WAIT" != "on" ] && {\n') f.write('\t\t nvram set boot_wait=on\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') # Set boardflags on WHR-HP-G54 if self.portLayout == 'whr-hp-g54': f.write('\tBOARDFLAGS=`nvram get boardflags`\n') f.write('\t\t[ "$BOARDFLAGS" != "0x3758" ] && {\n') f.write('\t\t nvram set boardflags=0x3758\n') f.write('\t\t nvram commit\n') f.write('\t\t}\n') f.write('\tfi\n') f.write('}\n') f.write('stop() {\n') f.write('\tDT=`date +%m%d%H%M%Y`\n') f.write('\techo $DT > /etc/datetime.save\n') f.write('}\n') f.close() if self.openwrtVersion == "old": # Copy timezone template self.__copyTemplate("general/timezone", os.path.join(self.base, 'TZ')) | d7a79e468486423d05dec3ad4c1584ccc0901738 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/d7a79e468486423d05dec3ad4c1584ccc0901738/config_generator.py |
fresh_subnet = pool.allocate_subnet() | fresh_subnet = pool.allocate_subnet(prefix_len) | def save(self, user): """ Completes node registration. """ ip = self.cleaned_data.get('ip') project = self.cleaned_data.get('project') pool = self.cleaned_data.get('pool') subnet = None | 56a4d140bead09ad9ccc152a81d31f11d084b585 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/56a4d140bead09ad9ccc152a81d31f11d084b585/forms.py |
return _("If this is not intentional, it is a bug. Please report it. If it is intentional, please get into a contact with network administrators to arrange new project entry with you own ESSID for you.") | return _("If this is not intentional, it is a bug. Please report it. If it is intentional, please get in contact with network administrators to arrange a new project entry with your own ESSID for you.") | def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string. | d2fcb4c3cbb9c2e72ca5af1f6972909525d65735 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/d2fcb4c3cbb9c2e72ca5af1f6972909525d65735/models.py |
f.write('LinkQualityDijkstraLimit 0 9.0\n') | def __generateOlsrdConfig(self, f): # Subnet configuration if self.subnets: f.write('Hna4\n') f.write('{\n') for subnet in self.subnets: if subnet['olsr'] and subnet['cidr'] < 29: f.write(' %(subnet)s %(mask)s\n' % subnet) f.write('}\n\n') # General configuration (static) f.write('AllowNoInt yes\n') f.write('UseHysteresis no\n') f.write('LinkQualityFishEye 0\n') f.write('Willingness 3\n') f.write('LinkQualityLevel 2\n') f.write('LinkQualityAging 0.1\n') f.write('LinkQualityAlgorithm "etx_ff"\n') f.write('LinkQualityDijkstraLimit 0 9.0\n') f.write('FIBMetric "flat"\n') f.write('Pollrate 0.025\n') f.write('TcRedundancy 2\n') f.write('MprCoverage 3\n') f.write('NatThreshold 0.75\n') f.write('SmartGateway no\n') f.write('MainIp {0}\n'.format(self.ip)) f.write('SrcIpRoutes yes\n') f.write('\n') # Setup txtinfo plugin when selected if 'olsrd-mod-txtinfo' in self.packages: f.write('LoadPlugin "olsrd_txtinfo.so.0.1"\n') f.write('{\n') f.write(' PlParam "accept" "0.0.0.0"\n') f.write('}\n') f.write('\n') # Setup actions plugin to trigger a nodewatcher script when the default # route is added or removed from the routing table if self.hasClientSubnet: f.write('LoadPlugin "olsrd_actions.so.0.1"\n') f.write('{\n') f.write(' PlParam "trigger" "0.0.0.0>/etc/actions.d/olsr_gateway_action"\n') for dns in self.dns: f.write(' PlParam "trigger" "%s>/etc/actions.d/olsr_dns_action"\n' % dns) f.write('}\n') f.write('\n') # Add the olsrd-mod-actions package self.addPackage('olsrd-mod-actions') # General interface configuration (static) def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') # Additional interface configuration for interface in self.interfaces: if interface['olsr']: interfaceConfiguration(interface['name'], interface['ip']) f.close() | 859d5d51a12277ae093292aee6ba0ef35f8d64b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/859d5d51a12277ae093292aee6ba0ef35f8d64b1/config_generator.py |
|
def interfaceConfiguration(name, ip): | def interfaceConfiguration(name): | def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') | 859d5d51a12277ae093292aee6ba0ef35f8d64b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/859d5d51a12277ae093292aee6ba0ef35f8d64b1/config_generator.py |
if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) | def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') | 859d5d51a12277ae093292aee6ba0ef35f8d64b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/859d5d51a12277ae093292aee6ba0ef35f8d64b1/config_generator.py |
|
interfaceConfiguration(interface['name'], interface['ip']) | interfaceConfiguration(interface['name']) | def interfaceConfiguration(name, ip): f.write('Interface "{0}"\n'.format(name)) f.write('{\n') f.write(' IPv4Multicast 255.255.255.255\n') if ip is not None: f.write(' IPv4Src {0}\n'.format(ip)) f.write(' HelloInterval 5.0\n') f.write(' HelloValidityTime 40.0\n') f.write(' TcInterval 7.0\n') f.write(' TcValidityTime 161.0\n') f.write(' MidInterval 18.0\n') f.write(' MidValidityTime 324.0\n') f.write(' HnaInterval 18.0\n') f.write(' HnaValidityTime 324.0\n') f.write('}\n') f.write('\n') | 859d5d51a12277ae093292aee6ba0ef35f8d64b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/859d5d51a12277ae093292aee6ba0ef35f8d64b1/config_generator.py |
f.write('\n') f.write(' f.write('config alias routerid\n') f.write('\toption interface loopback\n') f.write('\toption proto static\n') f.write('\toption ipaddr %s\n' % self.ip) f.write('\toption netmask 255.255.255.255\n') | def __generateNetworkConfig(self, f): # VLAN configuration layout = portLayouts[self.portLayout] if isinstance(layout, tuple): f.write('#### VLAN configuration\n') f.write('config switch %s\n' % ("eth0" if not self.portLayout in switchIds else switchIds[self.portLayout])) f.write('\toption vlan0 "%s"\n' % layout[0]) f.write('\toption vlan1 "%s"\n' % layout[1]) f.write('\n') # Loopback configuration (static) f.write('#### Loopback configuration\n') f.write('config interface loopback\n') f.write('\toption ifname "lo"\n') f.write('\toption proto static\n') f.write('\toption ipaddr 127.0.0.1\n') f.write('\toption netmask 255.0.0.0\n') f.write('\n') | 859d5d51a12277ae093292aee6ba0ef35f8d64b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/859d5d51a12277ae093292aee6ba0ef35f8d64b1/config_generator.py |
|
if not self.profile: | from wlanlj.generator.models import Profile try: self.profile except Profile.DoesNotExist: | def adapt_to_router_type(self): """ Ensures that new router type is compatible with current configuration. """ if not self.profile: return for entry in self.profile.template.adaptation_chain.all().order_by("priority"): cls = load_plugin(entry.class_name, required_super = RouterTransition) transition = cls() transition.adapt(self) | 7b43dac1aa76601db5738fae8acb301205eba4ec /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/7b43dac1aa76601db5738fae8acb301205eba4ec/models.py |
if db_backend.startswith('postgresql'): | if db_backend.find('postgresql') != -1: | def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1) | fa522864c2c4f5ce19485f933ba5e4775e537f48 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/fa522864c2c4f5ce19485f933ba5e4775e537f48/prepare-database.py |
elif db_backend.startswith('sqlite'): | elif db_backend.find('sqlite') != -1: | def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1) | fa522864c2c4f5ce19485f933ba5e4775e537f48 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/fa522864c2c4f5ce19485f933ba5e4775e537f48/prepare-database.py |
elif db_backend.startswith('mysql'): | elif db_backend.find('mysql') != -1: | def ensure_success(errcode): if errcode != 0: print "ERROR: Command failed to execute, aborting!" exit(1) | fa522864c2c4f5ce19485f933ba5e4775e537f48 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/fa522864c2c4f5ce19485f933ba5e4775e537f48/prepare-database.py |
return _("unknown nodes") | return _("Unknown nodes") | def node_type_as_string_plural(self): """ Returns node type as string. """ if self.node_type == NodeType.Mesh: return _("Mesh nodes") elif self.node_type == NodeType.Server: return _("Server nodes") elif self.node_type == NodeType.Test: return _("Test nodes") elif self.node_type == NodeType.Mobile: return _("Mobile nodes") else: return _("unknown nodes") | 7c7e50be05a1714172cf255be20a5bfd1e31fde8 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/7c7e50be05a1714172cf255be20a5bfd1e31fde8/models.py |
twitter_api = twitter.Api(username=settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD) | twitter_api = twitter.Api(username = settings.TWITTER_USERNAME, password = settings.TWITTER_PASSWORD) | def generate_new_node_tweet(node): if not tweets_enabled(): return try: bit_api = bitly.Api(login=settings.BITLY_LOGIN, apikey=settings.BITLY_API_KEY) twitter_api = twitter.Api(username=settings.TWITTER_USERNAME, settings.TWITTER_PASSWORD) node_link = bit_api.shorten(node.get_url()) msg = "A new node %s has just connected to the mesh %s" % (node.name, node_link) twitter_api.PostUpdate(msg) except: logging.warning(format_exc()) | 15fc06b4ed7cd075be6731793fe9ad03b4f06063 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/15fc06b4ed7cd075be6731793fe9ad03b4f06063/monitor.py |
if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi | try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass | def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 0ea86c54de878c0613893ce1db1d97823abdca5e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/0ea86c54de878c0613893ce1db1d97823abdca5e/monitor.py |
l = hna.setdefault(ip, []) l.append('%s/32' % alias) | for x in alias: l = hna.setdefault(ip, []) l.append('%s/32' % x) | def parse_tables(data): """ Parses the OLSR routing tables. """ isTable = False isTableHead = False currentTable = '' nodes = {} hna = {} for line in data.splitlines(): line = line.strip() if line[0:6] == 'Table:' and line[7:] in ('Topology', 'HNA', 'MID'): isTable = True isTableHead = True currentTable = line[7:] continue if isTable and isTableHead: isTableHead = False continue if isTable and not line: isTable = False currentTable = '' continue if currentTable == 'Topology': srcIp, dstIp, LQ, ILQ, ETX = line.split('\t') try: if not float(ETX): continue except ValueError: # Newer OLSR versions can use INFINITE as ETX continue srcNode = create_node(srcIp, nodes, hna) dstNode = create_node(dstIp, nodes, hna) srcNode.links.append((dstIp, LQ, ILQ, ETX)) elif currentTable == 'HNA': try: network, cidr, gwIp = line.split('\t') except ValueError: # Newer OLSR versions have changed the format network, gwIp = line.split('\t') network, cidr = network.split('/') node = hna.setdefault(gwIp, []) node.append('%s/%s' % (network, cidr)) elif currentTable == 'MID': ip, alias = line.split('\t') # Treat MIDs as /32 HNAs l = hna.setdefault(ip, []) l.append('%s/32' % alias) return nodes, hna | e90fb94a3ff57a7bcb1bd62000007cfaac67728a /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/e90fb94a3ff57a7bcb1bd62000007cfaac67728a/wifi_utils.py |
return item.get_full_url() | return item.node.get_full_url() | def item_link(self, item): return item.get_full_url() | 7bb6c7534dd8ed55da570227fbe2038ac007d1ad /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/7bb6c7534dd8ed55da570227fbe2038ac007d1ad/feeds.py |
"LINE1:loss_def r'GPRINT:loss_def:LAST: Current\:%8.2lf', r'GPRINT:loss_def:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_def:MAX:Maximum\:%8.2lf\n', "LINE1:loss_100 r'GPRINT:loss_100:LAST: Current\:%8.2lf', r'GPRINT:loss_100:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_100:MAX:Maximum\:%8.2lf\n', "LINE1:loss_500 r'GPRINT:loss_500:LAST: Current\:%8.2lf', r'GPRINT:loss_500:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_500:MAX:Maximum\:%8.2lf\n', "LINE1:loss_1000 r'GPRINT:loss_1000:LAST:Current\:%8.2lf', r'GPRINT:loss_1000:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_1000:MAX:Maximum\:%8.2lf\n', "LINE1:loss_1480 r'GPRINT:loss_1480:LAST:Current\:%8.2lf', r'GPRINT:loss_1480:AVERAGE:Average\:%8.2lf', r'GPRINT:loss_1480:MAX:Maximum\:%8.2lf\n', '--alt-y-grid', '--units-exponent', '0', | "CDEF:nloss_def=loss_def,100,/", "CDEF:nloss_100=loss_100,100,/", "CDEF:nloss_500=loss_500,100,/", "CDEF:nloss_1000=loss_1000,100,/", "CDEF:nloss_1480=loss_1480,100,/", "LINE1:nloss_def r'GPRINT:nloss_def:LAST: Current\:%8.2lf', r'GPRINT:nloss_def:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_def:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_100 r'GPRINT:nloss_100:LAST: Current\:%8.2lf', r'GPRINT:nloss_100:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_100:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_500 r'GPRINT:nloss_500:LAST: Current\:%8.2lf', r'GPRINT:nloss_500:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_500:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_1000 r'GPRINT:nloss_1000:LAST:Current\:%8.2lf', r'GPRINT:nloss_1000:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_1000:MAX:Maximum\:%8.2lf\n', "LINE1:nloss_1480 r'GPRINT:nloss_1480:LAST:Current\:%8.2lf', r'GPRINT:nloss_1480:AVERAGE:Average\:%8.2lf', r'GPRINT:nloss_1480:MAX:Maximum\:%8.2lf\n', | def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat) | 94f4d4c1e8b50a7d7f6c04d763562c0efc9c4b6c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/94f4d4c1e8b50a7d7f6c04d763562c0efc9c4b6c/rra.py |
'--upper-limit', '100' | '--upper-limit', '1' | def __str__(self): return "DS:%s:%s:%s:U:U" % (self.name, self.type, self.heartbeat) | 94f4d4c1e8b50a7d7f6c04d763562c0efc9c4b6c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/94f4d4c1e8b50a7d7f6c04d763562c0efc9c4b6c/rra.py |
if len(wifiSubnet) and n.clients >= ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4: | if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): | def process_node(node_ip, ping_results, is_duped, peers): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients >= ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4: Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 5df71f9ef8f332b065dba1f70c0dd76d084a221b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/5df71f9ef8f332b065dba1f70c0dd76d084a221b/monitor.py |
last_updated = graph.last_update, | last_update = graph.last_update, | def draw_graph(graph_id, timespan): """ Draws the specified graph. @param graph_id: Graph primary key @param timespan: Timespan to draw the graph for @return: True on success, False on failure """ logger = draw_graph.get_logger() # First check that we haven't drawn this graph already result = cache.get('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan)) if result is not None: return bool(result) # Since the graph has not yet been drawn, let's draw it try: graph_id = int(graph_id) # XXX Check for hardcoded graphs if graph_id > 0: graph = nodes_models.GraphItem.objects.get(pk = graph_id) archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', graph.rra)) # Actually draw the graph rrd.RRA.graph( graphs.RRA_CONF_MAP[graph.type], str(graph.title), graph.id, archive_path, end_time = int(time.mktime(graph.last_update.timetuple())), dead = graph.dead, last_updated = graph.last_update, timespan = timespan ) else: # XXX One of the hardcoded graphs conf, title, rrd_path = GLOBAL_GRAPHS[graph_id] archive_path = str(os.path.join(settings.MONITOR_WORKDIR, 'rra', rrd_path)) # Actually draw the graph rrd.RRA.graph(conf, title, graph_id, archive_path, timespan = timespan) result = True except: logger.error(traceback.format_exc()) result = False # Mark the graph as drawn cache.set('nodewatcher.graphs.drawn.{0}.{1}'.format(graph_id, timespan), result) return result | 41b6f21fba709e6814bc433c791c4190cc862ce0 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/41b6f21fba709e6814bc433c791c4190cc862ce0/tasks.py |
upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 | offset = -3 unit = 1000 if 'Kbit' in info['net']['vpn']['upload_limit']: offset = -4 unit = 1 upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:offset]) // unit | def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in n.get_peers(): lq_avg += float(peer.lq) ilq_avg += float(peer.ilq) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.get_peers(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) # Validate BSSID and ESSID if n.bssid != "02:CA:FF:EE:BA:BE": NodeWarning.create(n, WarningCode.BSSIDMismatch, EventSource.Monitor) try: if n.essid != n.project.ssid: NodeWarning.create(n, WarningCode.ESSIDMismatch, EventSource.Monitor) except Project.DoesNotExist: pass if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) try: if n.channel != n.profile.channel: NodeWarning.create(n, WarningCode.ChannelMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) if 'errors' in info['wifi']: error_count = safe_int_convert(info['wifi']['errors']) if error_count != n.wifi_error_count and error_count > 0: Event.create_event(n, EventCode.WifiErrors, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.wifi_error_count, error_count)) n.wifi_error_count = error_count if 'net' in info: loss_count = safe_int_convert(info['net']['losses']) if loss_count != n.loss_count and loss_count > 1: Event.create_event(n, EventCode.ConnectivityLoss, '', EventSource.Monitor, data = 'Old count: %s\n New count: %s' % (n.loss_count, loss_count)) n.loss_count = loss_count # Check VPN configuration if 'vpn' in info['net']: n.vpn_mac = info['net']['vpn']['mac'] or None try: upload_limit = safe_int_convert(info['net']['vpn']['upload_limit'][:-3]) // 1000 except TypeError: upload_limit = None if n.vpn_mac and n.vpn_mac != n.vpn_mac_conf: NodeWarning.create(n, WarningCode.VPNMacMismatch, EventSource.Monitor) try: if upload_limit != n.profile.vpn_egress_limit: NodeWarning.create(n, WarningCode.VPNLimitMismatch, EventSource.Monitor) except Profile.DoesNotExist: pass # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile try: if n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) except Profile.DoesNotExist: pass else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs try: if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi except Profile.DoesNotExist: pass add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state']), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if all selected optional packages are present in package listing try: missing_packages = [] for package in n.profile.optional_packages.all(): if n.installedpackage_set.filter(name = package.name).count() == 0: missing_packages.append(package.name) if missing_packages: NodeWarning.create(n, WarningCode.OptPackageNotFound, EventSource.Monitor, details = ("Packages missing: %s" % ", ".join(missing_packages))) except Profile.DoesNotExist: pass # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning("Failed to interpret nodewatcher data for node '%s (%s)'!" % (n.name, n.ip)) logging.warning(format_exc()) NodeWarning.create(n, WarningCode.NodewatcherInterpretFailed, EventSource.Monitor) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 4fb6af186335addb1a39c08e825290be33ba7fea /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/4fb6af186335addb1a39c08e825290be33ba7fea/monitor.py |
NodeWarning.create(n, WarningCode.AnnounceConflict, EventSource.Monitor) | NodeWarning.create(n, WarningCode.UnregisteredAnnounce, EventSource.Monitor) | def check_mesh_status(): """ Performs a mesh status check. """ # Initialize the state of nodes and subnets, remove out of date ap clients and graph items Node.objects.all().update(visible = False) Subnet.objects.all().update(visible = False) APClient.objects.filter(last_update__lt = datetime.now() - timedelta(minutes = 11)).delete() GraphItem.objects.filter(last_update__lt = datetime.now() - timedelta(days = 30)).delete() # Reset some states NodeWarning.objects.all().update(source = EventSource.Monitor, dirty = False) Node.objects.all().update(warnings = False, conflicting_subnets = False) Link.objects.all().delete() # Fetch routing tables from OLSR try: nodes, hna = wifi_utils.get_tables(settings.MONITOR_OLSR_HOST) except TypeError: logging.error("Unable to fetch routing tables from '%s'!" % settings.MONITOR_OLSR_HOST) return # Ping nodes present in the database and visible in OLSR dbNodes = {} nodesToPing = [] for nodeIp in nodes.keys(): try: # Try to get the node from the database n = Node.get_exclusive(ip = nodeIp) n.visible = True n.peers = len(nodes[nodeIp].links) # If we have succeeded, add to list (if not invalid) if not n.is_invalid(): if n.awaiting_renumber: # Reset any status from awaiting renumber to invalid for notice in n.renumber_notices.all(): try: rn = Node.objects.get(ip = notice.original_ip) if rn.status == NodeStatus.AwaitingRenumber: rn.status = NodeStatus.Invalid rn.node_type = NodeType.Unknown rn.awaiting_renumber = False rn.save() except Node.DoesNotExist: pass notice.delete() n.awaiting_renumber = False n.save() nodesToPing.append(nodeIp) else: n.last_seen = datetime.now() n.peers = len(nodes[nodeIp].links) # Create a warning since node is not registered NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) n.save() dbNodes[nodeIp] = n except Node.DoesNotExist: # Node does not exist, create an invalid entry for it n = Node(ip = nodeIp, status = NodeStatus.Invalid, last_seen = datetime.now()) n.visible = True n.node_type = NodeType.Unknown n.peers = len(nodes[nodeIp].links) # Check if there are any renumber notices for this IP address try: notice = RenumberNotice.objects.get(original_ip = nodeIp) n.status = NodeStatus.AwaitingRenumber n.node_type = notice.node.node_type n.awaiting_renumber = True except RenumberNotice.DoesNotExist: pass n.save(force_insert = True) dbNodes[nodeIp] = n # Create an event and append a warning since an unknown node has appeared NodeWarning.create(n, WarningCode.UnregisteredNode, EventSource.Monitor) Event.create_event(n, EventCode.UnknownNodeAppeared, '', EventSource.Monitor) # Add a warning to all nodes that have been stuck in renumbering state for over a week for node in Node.objects.filter(renumber_notices__renumbered_at__lt = datetime.now() - timedelta(days = 7)): NodeWarning.create(node, WarningCode.LongRenumber, EventSource.Monitor) node.save() # Mark invisible nodes as down for node in Node.objects.exclude(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber)): oldStatus = node.status if node.ip not in dbNodes: if node.status == NodeStatus.New: node.status = NodeStatus.Pending elif node.status != NodeStatus.Pending: node.status = NodeStatus.Down node.save() if oldStatus in (NodeStatus.Up, NodeStatus.Visible, NodeStatus.Duped) and node.status == NodeStatus.Down: Event.create_event(node, EventCode.NodeDown, '', EventSource.Monitor) # Invalidate uptime credit for this node node.uptime_last = None node.save() # Setup all node peerings for nodeIp, node in nodes.iteritems(): n = dbNodes[nodeIp] oldRedundancyLink = n.redundancy_link n.redundancy_link = False for peerIp, lq, ilq, etx, vtime in node.links: l = Link(src = n, dst = dbNodes[peerIp], lq = float(lq), ilq = float(ilq), etx = float(etx), vtime = vtime) l.save() # Check if any of the peers has never peered with us before if n.is_adjacency_important() and l.dst.is_adjacency_important() and not n.peer_history.filter(pk = l.dst.pk).count(): n.peer_history.add(l.dst) Event.create_event(n, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % l.dst, aggregate = False) Event.create_event(l.dst, EventCode.AdjacencyEstablished, '', EventSource.Monitor, data = 'Peer node: %s' % n, aggregate = False) # Check if we have a peering with any border routers if l.dst.border_router: n.redundancy_link = True if not n.is_invalid(): if oldRedundancyLink and not n.redundancy_link: Event.create_event(n, EventCode.RedundancyLoss, '', EventSource.Monitor) elif not oldRedundancyLink and n.redundancy_link: Event.create_event(n, EventCode.RedundancyRestored, '', EventSource.Monitor) if n.redundancy_req and not n.redundancy_link: NodeWarning.create(n, WarningCode.NoBorderPeering, EventSource.Monitor) n.save() # Add nodes to topology map and generate output if not getattr(settings, 'MONITOR_DISABLE_GRAPHS', None): # Only generate topology when graphing is not disabled topology = DotTopologyPlotter() for node in dbNodes.values(): topology.addNode(node) topology.save(os.path.join(settings.GRAPH_DIR, 'mesh_topology.png'), os.path.join(settings.GRAPH_DIR, 'mesh_topology.dot')) # Update valid subnet status in the database for nodeIp, subnets in hna.iteritems(): if nodeIp not in dbNodes: continue for subnet in subnets: subnet, cidr = subnet.split("/") try: s = Subnet.objects.get(node__ip = nodeIp, subnet = subnet, cidr = int(cidr)) s.last_seen = datetime.now() s.visible = True if s.status == SubnetStatus.Subset: pass elif s.status in (SubnetStatus.AnnouncedOk, SubnetStatus.NotAnnounced): s.status = SubnetStatus.AnnouncedOk elif not s.node.border_router or s.status == SubnetStatus.Hijacked: NodeWarning.create(s.node, WarningCode.UnregisteredAnnounce, EventSource.Monitor) s.node.save() # Recheck if this is a more specific prefix announce for an allocated prefix if s.status == SubnetStatus.NotAllocated and s.is_more_specific(): s.status = SubnetStatus.Subset s.save() except Subnet.DoesNotExist: # Subnet does not exist, prepare one s = Subnet(node = dbNodes[nodeIp], subnet = subnet, cidr = int(cidr), last_seen = datetime.now()) s.visible = True # Check if this is a more specific prefix announce for an allocated prefix if s.is_more_specific(): s.status = SubnetStatus.Subset else: s.status = SubnetStatus.NotAllocated s.save() # Check if this is a hijack n = dbNodes[nodeIp] try: origin = Subnet.objects.ip_filter( # Subnet overlaps with another one ip_subnet__contains = '%s/%s' % (subnet, cidr) ).exclude( # Of another node (= filter all subnets belonging to current node) node = s.node ).get( # That is allocated and visible allocated = True, visible = True ) s.status = SubnetStatus.Hijacked s.save() # Generate an event Event.create_event(n, EventCode.SubnetHijacked, '', EventSource.Monitor, data = 'Subnet: %s/%s\n Allocated to: %s' % (s.subnet, s.cidr, origin.node)) except Subnet.DoesNotExist: pass # Flag node entry with warnings flag (if not a border router) if s.status != SubnetStatus.Subset and (not n.border_router or s.status == SubnetStatus.Hijacked): NodeWarning.create(n, WarningCode.AnnounceConflict, EventSource.Monitor) n.save() # Detect subnets that cause conflicts and raise warning flags for all involved # nodes if s.is_conflicting(): NodeWarning.create(s.node, WarningCode.AnnounceConflict, EventSource.Monitor) s.node.conflicting_subnets = True s.node.save() for cs in s.get_conflicting_subnets(): NodeWarning.create(cs.node, WarningCode.AnnounceConflict, EventSource.Monitor) cs.node.conflicting_subnets = True cs.node.save() # Remove (or change their status) subnets that are not visible Subnet.objects.filter(status__in = (SubnetStatus.NotAllocated, SubnetStatus.Subset), visible = False).delete() Subnet.objects.filter(status = SubnetStatus.AnnouncedOk, visible = False).update(status = SubnetStatus.NotAnnounced) for subnet in Subnet.objects.filter(status = SubnetStatus.NotAnnounced, node__visible = True): NodeWarning.create(subnet.node, WarningCode.OwnNotAnnounced, EventSource.Monitor) subnet.node.save() # Remove subnets that were hijacked but are not visible anymore for s in Subnet.objects.filter(status = SubnetStatus.Hijacked, visible = False): Event.create_event(s.node, EventCode.SubnetRestored, '', EventSource.Monitor, data = 'Subnet: %s/%s' % (s.subnet, s.cidr)) s.delete() # Remove invisible unknown nodes for node in Node.objects.filter(status = NodeStatus.Invalid, visible = False).all(): # Create an event since an unknown node has disappeared Event.create_event(node, EventCode.UnknownNodeDisappeared, '', EventSource.Monitor) Node.objects.filter(status__in = (NodeStatus.Invalid, NodeStatus.AwaitingRenumber), visible = False).delete() # Ping the nodes to prepare information for later node processing varsize_results = {} results, dupes = wifi_utils.ping_hosts(10, nodesToPing) for packet_size in (100, 500, 1000, 1480): r, d = wifi_utils.ping_hosts(10, nodesToPing, packet_size - 8) for node_ip in nodesToPing: varsize_results.setdefault(node_ip, []).append(r[node_ip][3] if node_ip in r else None) if getattr(settings, 'MONITOR_DISABLE_MULTIPROCESSING', None): # Multiprocessing is disabled (the MONITOR_DISABLE_MULTIPROCESSING option is usually # used for debug purpuses where a single process is prefered) for node_ip in nodesToPing: process_node(node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip)) # Commit the transaction here since we do everything in the same session transaction.commit() else: # We MUST commit the current transaction here, because we will be processing # some transactions in parallel and must ensure that this transaction that has # modified the nodes is commited. Otherwise this will deadlock! transaction.commit() worker_results = [] for node_ip in nodesToPing: worker_results.append( WORKER_POOL.apply_async(process_node, (node_ip, results.get(node_ip), node_ip in dupes, nodes[node_ip].links, varsize_results.get(node_ip))) ) # Wait for all workers to finish processing objects = {} for result in worker_results: try: k, v = result.get() objects[k] = v except Exception, e: logging.warning(format_exc()) # When GC debugging is enabled make some additional computations if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): global _MAX_GC_OBJCOUNT objcount = sum(objects.values()) if '_MAX_GC_OBJCOUNT' not in globals(): _MAX_GC_OBJCOUNT = objcount logging.debug("GC object count: %d %s" % (objcount, "!M" if objcount > _MAX_GC_OBJCOUNT else "")) _MAX_GC_OBJCOUNT = max(_MAX_GC_OBJCOUNT, objcount) # Cleanup all out of date warnings NodeWarning.clear_obsolete_warnings(EventSource.Monitor) | f0fe4f5230205dc028f7b4b978cb8367ae0b2400 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/f0fe4f5230205dc028f7b4b978cb8367ae0b2400/monitor.py |
self.fields['prefix_%s' % subnet.pk] = forms.IntegerField() | self.fields['prefix_%s' % subnet.pk] = forms.IntegerField(required = False, initial = 27) | def __init__(self, user, node, *args, **kwargs): """ Class constructor. """ super(RenumberForm, self).__init__(*args, **kwargs) self.__node = node # Use renumber with subnet only when this is possible self.fields['primary_ip'] = forms.ChoiceField( choices = [ (RenumberAction.SetManually, _("Set manually")) ], initial = RenumberAction.SetManually ) if node.is_primary_ip_in_subnet(): self.fields['primary_ip'].choices.insert(0, (RenumberAction.Renumber, _("Renumber with subnet")) ) self.fields['primary_ip'].initial = RenumberAction.Renumber else: self.fields['primary_ip'].choices.insert(0, (RenumberAction.Keep, _("Keep")), ) self.fields['primary_ip'].initial = RenumberAction.Keep if not user.is_staff: del self.fields['primary_ip'].choices[1] # Setup dynamic form fields, depending on how may subnets a node has primary = node.subnet_set.ip_filter(ip_subnet__contains = "%s/32" % node.ip).filter(allocated = True).exclude(cidr = 0) for subnet in node.subnet_set.filter(allocated = True).order_by('ip_subnet'): pools = [] for pool in node.project.pools.exclude(status = PoolStatus.Full).order_by('network'): pools.append((pool.pk, _("Renumber to %s [%s/%s]") % (pool.description, pool.network, pool.cidr))) choices = [ (RenumberAction.Keep, _("Keep")), (RenumberAction.Remove, _("Remove")) ] # Primary subnets should not be removed if primary and primary[0] == subnet: del choices[1] self.fields['subnet_%s' % subnet.pk] = forms.ChoiceField( choices = choices + pools, initial = RenumberAction.Keep, widget = forms.Select(attrs = { 'class' : 'subnet' }) ) # Field for choosing new subnet prefix size self.fields['prefix_%s' % subnet.pk] = forms.IntegerField() | c4145a241c9ee074d2dc6640b2a2ea71ad2186b1 /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/c4145a241c9ee074d2dc6640b2a2ea71ad2186b1/forms.py |
return _("If this is not intentional, you are using an old firmware version or it is a bug. In the later case please report it. If it is intentional, please get in contact with network administrators to arrange a configuration option in the firmware for it.") | return _("If this is not intentional, it is a bug. Please report it. If it is intentional, please get in contact with network administrators to arrange a configuration option in the firmware for it.") | def to_help_string(code): """ A helper method for transforming a warning code to a human readable help string. | d98f5d44d05ad34b144d324e2b87938ce0581f1e /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/d98f5d44d05ad34b144d324e2b87938ce0581f1e/models.py |
for key, value in info['solar'].iteritems(): if not value.strip(): info['solar'][key] = None | def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 536183f8df7a59f56780c77694d525a16f7d300f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/536183f8df7a59f56780c77694d525a16f7d300f/monitor.py |
|
states.get(info['solar']['state'], 1), | states.get(info['solar']['state']), | def process_node(node_ip, ping_results, is_duped, peers, varsize_results): """ Processes a single node. @param node_ip: Node's IP address @param ping_results: Results obtained from ICMP ECHO tests @param is_duped: True if duplicate echos received @param peers: Peering info from routing daemon @param varsize_results: Results of ICMP ECHO tests with variable payloads """ transaction.set_dirty() try: n = Node.get_exclusive(ip = node_ip) except Node.DoesNotExist: # This might happen when we were in the middle of a renumbering and # did not yet have access to the node. Then after the node has been # renumbered we gain access, but the IP has been changed. In this # case we must ignore processing of this node. return oldStatus = n.status # Determine node status if ping_results is not None: n.status = NodeStatus.Up n.rtt_min, n.rtt_avg, n.rtt_max, n.pkt_loss = ping_results # Add RTT graph add_graph(n, '', GraphType.RTT, RRARTT, 'Latency', 'latency', n.rtt_avg, n.rtt_min, n.rtt_max) # Add uptime credit if n.uptime_last: n.uptime_so_far = (n.uptime_so_far or 0) + (datetime.now() - n.uptime_last).seconds n.uptime_last = datetime.now() else: n.status = NodeStatus.Visible # Measure packet loss with different packet sizes and generate a graph if ping_results is not None and varsize_results is not None: losses = [n.pkt_loss] + varsize_results add_graph(n, '', GraphType.PacketLoss, RRAPacketLoss, 'Packet Loss', 'packetloss', *losses) if is_duped: n.status = NodeStatus.Duped NodeWarning.create(n, WarningCode.DupedReplies, EventSource.Monitor) # Generate status change events if oldStatus in (NodeStatus.Down, NodeStatus.Pending, NodeStatus.New) and n.status in (NodeStatus.Up, NodeStatus.Visible): if oldStatus in (NodeStatus.New, NodeStatus.Pending): n.first_seen = datetime.now() if n.node_type == NodeType.Mesh: generate_new_node_tweet(n) Event.create_event(n, EventCode.NodeUp, '', EventSource.Monitor) elif oldStatus != NodeStatus.Duped and n.status == NodeStatus.Duped: Event.create_event(n, EventCode.PacketDuplication, '', EventSource.Monitor) # Add olsr peer count graph add_graph(n, '', GraphType.OlsrPeers, RRAOlsrPeers, 'Routing Peers', 'olsrpeers', n.peers) # Add LQ/ILQ graphs if n.peers > 0: lq_avg = ilq_avg = 0.0 for peer in peers: lq_avg += float(peer[1]) ilq_avg += float(peer[2]) lq_graph = add_graph(n, '', GraphType.LQ, RRALinkQuality, 'Average Link Quality', 'lq', lq_avg / n.peers, ilq_avg / n.peers) for peer in n.src.all(): add_graph(n, peer.dst.ip, GraphType.LQ, RRALinkQuality, 'Link Quality to %s' % peer.dst, 'lq_peer_%s' % peer.dst.pk, peer.lq, peer.ilq, parent = lq_graph) n.last_seen = datetime.now() # Check if we have fetched nodewatcher data info = nodewatcher.fetch_node_info(node_ip) if info is not None and 'general' in info: try: oldUptime = n.uptime or 0 oldChannel = n.channel or 0 oldVersion = n.firmware_version n.firmware_version = info['general']['version'] n.local_time = safe_date_convert(info['general']['local_time']) n.bssid = info['wifi']['bssid'] n.essid = info['wifi']['essid'] n.channel = nodewatcher.frequency_to_channel(info['wifi']['frequency']) n.clients = 0 n.uptime = safe_uptime_convert(info['general']['uptime']) if 'uuid' in info['general']: n.reported_uuid = info['general']['uuid'] if n.reported_uuid and n.reported_uuid != n.uuid: NodeWarning.create(n, WarningCode.MismatchedUuid, EventSource.Monitor) if oldVersion != n.firmware_version: Event.create_event(n, EventCode.VersionChange, '', EventSource.Monitor, data = 'Old version: %s\n New version: %s' % (oldVersion, n.firmware_version)) if oldUptime > n.uptime: Event.create_event(n, EventCode.UptimeReset, '', EventSource.Monitor, data = 'Old uptime: %s\n New uptime: %s' % (oldUptime, n.uptime)) if oldChannel != n.channel and oldChannel != 0: Event.create_event(n, EventCode.ChannelChanged, '', EventSource.Monitor, data = 'Old channel: %s\n New channel %s' % (oldChannel, n.channel)) if n.has_time_sync_problems(): NodeWarning.create(n, WarningCode.TimeOutOfSync, EventSource.Monitor) # Parse nodogsplash client information oldNdsStatus = n.captive_portal_status if 'nds' in info: if 'down' in info['nds'] and info['nds']['down'] == '1': n.captive_portal_status = False # Create a node warning when captive portal is down and the node has it # selected in its image generator profile if not n.profile or n.profile.use_captive_portal: NodeWarning.create(n, WarningCode.CaptivePortalDown, EventSource.Monitor) else: n.captive_portal_status = True for cid, client in info['nds'].iteritems(): if not cid.startswith('client'): continue try: c = APClient.objects.get(node = n, ip = client['ip']) except APClient.DoesNotExist: c = APClient(node = n) n.clients_so_far += 1 n.clients += 1 c.ip = client['ip'] c.connected_at = safe_date_convert(client['added_at']) c.uploaded = safe_int_convert(client['up']) c.downloaded = safe_int_convert(client['down']) c.last_update = datetime.now() c.save() else: n.captive_portal_status = True # Check for captive portal status change if oldNdsStatus and not n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalDown, '', EventSource.Monitor) elif not oldNdsStatus and n.captive_portal_status: Event.create_event(n, EventCode.CaptivePortalUp, '', EventSource.Monitor) # Generate a graph for number of wifi cells if 'cells' in info['wifi']: add_graph(n, '', GraphType.WifiCells, RRAWifiCells, 'Nearby Wifi Cells', 'wificells', safe_int_convert(info['wifi']['cells']) or 0) # Update node's MAC address on wifi iface if 'mac' in info['wifi']: n.wifi_mac = info['wifi']['mac'] # Update node's RTS and fragmentation thresholds if 'rts' in info['wifi'] and 'frag' in info['wifi']: n.thresh_rts = safe_int_convert(info['wifi']['rts']) or 2347 n.thresh_frag = safe_int_convert(info['wifi']['frag']) or 2347 # Check for VPN statistics if 'vpn' in info: n.vpn_mac = info['vpn']['mac'] # Generate a graph for number of clients add_graph(n, '', GraphType.Clients, RRAClients, 'Connected Clients', 'clients', n.clients) # Check for IP shortage wifiSubnet = n.subnet_set.filter(gen_iface_type = IfaceType.WiFi, allocated = True) if len(wifiSubnet) and n.clients > max(0, ipcalc.Network(wifiSubnet[0].subnet, wifiSubnet[0].cidr).size() - 4): Event.create_event(n, EventCode.IPShortage, '', EventSource.Monitor, data = 'Subnet: %s\n Clients: %s' % (wifiSubnet[0], n.clients)) # Record interface traffic statistics for all interfaces for iid, iface in info['iface'].iteritems(): if iid not in ('wifi0', 'wmaster0'): # Check mappings for known wifi interfaces so we can handle hardware changes while # the node is up and not generate useless intermediate graphs if n.profile: iface_wifi = n.profile.template.iface_wifi if Template.objects.filter(iface_wifi = iid).count() >= 1: iid = iface_wifi add_graph(n, iid, GraphType.Traffic, RRAIface, 'Traffic - %s' % iid, 'traffic_%s' % iid, iface['up'], iface['down']) # Generate load average statistics if 'loadavg' in info['general']: n.loadavg_1min, n.loadavg_5min, n.loadavg_15min, n.numproc = safe_loadavg_convert(info['general']['loadavg']) add_graph(n, '', GraphType.LoadAverage, RRALoadAverage, 'Load Average', 'loadavg', n.loadavg_1min, n.loadavg_5min, n.loadavg_15min) add_graph(n, '', GraphType.NumProc, RRANumProc, 'Number of Processes', 'numproc', n.numproc) # Generate free memory statistics if 'memfree' in info['general']: n.memfree = safe_int_convert(info['general']['memfree']) buffers = safe_int_convert(info['general'].get('buffers', 0)) cached = safe_int_convert(info['general'].get('cached', 0)) add_graph(n, '', GraphType.MemUsage, RRAMemUsage, 'Memory Usage', 'memusage', n.memfree, buffers, cached) # Generate solar statistics when available if 'solar' in info and all([x in info['solar'] for x in ('batvoltage', 'solvoltage', 'charge', 'state', 'load')]): states = { 'boost' : 1, 'equalize' : 2, 'absorption' : 3, 'float' : 4 } add_graph(n, '', GraphType.Solar, RRASolar, 'Solar Monitor', 'solar', info['solar']['batvoltage'], info['solar']['solvoltage'], info['solar']['charge'], states.get(info['solar']['state'], 1), info['solar']['load'] ) # Check for installed package versions (every hour) try: last_pkg_update = n.installedpackage_set.all()[0].last_update except: last_pkg_update = None if not last_pkg_update or last_pkg_update < datetime.now() - timedelta(hours = 1): packages = nodewatcher.fetch_installed_packages(n.ip) or {} # Remove removed packages and update existing package versions for package in n.installedpackage_set.all(): if package.name not in packages: package.delete() else: package.version = packages[package.name] package.last_update = datetime.now() package.save() del packages[package.name] # Add added packages for packageName, version in packages.iteritems(): package = InstalledPackage(node = n) package.name = packageName package.version = version package.last_update = datetime.now() package.save() # Check if DNS works if 'dns' in info: old_dns_works = n.dns_works n.dns_works = info['dns']['local'] == '0' and info['dns']['remote'] == '0' if not n.dns_works: NodeWarning.create(n, WarningCode.DnsDown, EventSource.Monitor) if old_dns_works != n.dns_works: # Generate a proper event when the state changes if n.dns_works: Event.create_event(n, EventCode.DnsResolverRestored, '', EventSource.Monitor) else: Event.create_event(n, EventCode.DnsResolverFailed, '', EventSource.Monitor) except: logging.warning(format_exc()) n.save() # When GC debugging is enabled perform some more work if getattr(settings, 'MONITOR_ENABLE_GC_DEBUG', None): gc.collect() return os.getpid(), len(gc.get_objects()) return None, None | 536183f8df7a59f56780c77694d525a16f7d300f /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/536183f8df7a59f56780c77694d525a16f7d300f/monitor.py |
result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) | router_name = d['router_name'].replace('-', '') result = "%s-%s-%s%s-%s%s" % (d['hostname'], router_name, version, ("-%s" % type if type else "-all"), filechecksum, ext) | def generate_image(d): """ Generates an image accoording to given configuration. """ logging.debug(repr(d)) if d['imagebuilder'] not in IMAGEBUILDERS: raise Exception("Invalid imagebuilder specified!") x = OpenWrtConfig() x.setUUID(d['uuid']) x.setOpenwrtVersion(d['openwrt_ver']) x.setArch(d['arch']) x.setPortLayout(d['port_layout']) x.setWifiIface(d['iface_wifi'], d['driver'], d['channel']) x.setWifiAnt(d['rx_ant'], d['tx_ant']) x.setLanIface(d['iface_lan']) x.setNodeType("adhoc") x.setPassword(d['root_pass']) x.setHostname(d['hostname']) x.setIp(d['ip']) x.setSSID(d['ssid']) # Add WAN interface and all subnets if d['wan_dhcp']: x.addInterface("wan", d['iface_wan'], init = True) else: x.addInterface("wan", d['iface_wan'], d['wan_ip'], d['wan_cidr'], d['wan_gw'], init = True) for subnet in d['subnets']: x.addSubnet(str(subnet['iface']), str(subnet['network']), subnet['cidr'], subnet['dhcp'], True) x.setCaptivePortal(d['captive_portal']) if d['vpn']: x.setVpn(d['vpn_username'], d['vpn_password'], d['vpn_mac'], d['vpn_limit']) if d['lan_wifi_bridge']: x.enableLanWifiBridge() if d['lan_wan_switch']: x.switchWanToLan() # Add optional packages for package in d['opt_pkg']: x.addPackage(package) # Cleanup stuff from previous builds os.chdir(WORKDIR) os.system("rm -rf build/files/*") os.system("rm -rf build/%s/bin/*" % d['imagebuilder']) os.mkdir("build/files/etc") x.generate("build/files/etc") if d['only_config']: # Just pack configuration and send it prefix = hashlib.md5(os.urandom(32)).hexdigest()[0:16] tempfile = os.path.join(DESTINATION, prefix + "-config.zip") zip = ZipFile(tempfile, 'w', ZIP_DEFLATED) os.chdir('build/files') for root, dirs, files in os.walk("etc"): for file in files: zip.write(os.path.join(root, file)) zip.close() # Generate checksum f = open(tempfile, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() result = "%s-%s-config-%s.zip" % (d['hostname'], d['router_name'], filechecksum) destination = os.path.join(DESTINATION, result) os.rename(tempfile, destination) # Send an e-mail t = loader.get_template('generator/email_config.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'config' : result, 'checksum' : checksum, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Configuration for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) else: # Generate full image x.build("build/%s" % d['imagebuilder']) # Read image version try: f = open(glob('%s/build/%s/build_dir/target-*/root-*/etc/version' % (WORKDIR, d['imagebuilder']))[0], 'r') version = f.read().strip().replace('.', '_') f.close() except: version = 'unknown' # Get resulting image files = [] for file, type in d['imagefiles']: file = str(file) source = "%s/build/%s/bin/%s" % (WORKDIR, d['imagebuilder'], file) f = open(source, 'r') checksum = hashlib.md5(f.read()) f.close() # We can take just first 22 characters as checksums are fixed size and we can reconstruct it filechecksum = urlsafe_b64encode(checksum.digest())[:22] checksum = checksum.hexdigest() ext = os.path.splitext(file)[1] result = "%s-%s-%s%s-%s%s" % (d['hostname'], d['router_name'], version, ("-%s" % type if type else ""), filechecksum, ext) destination = os.path.join(DESTINATION, result) os.rename(source, destination) files.append({ 'name' : result, 'checksum' : checksum }) # Send an e-mail t = loader.get_template('generator/email.txt') c = Context({ 'hostname' : d['hostname'], 'ip' : d['ip'], 'username' : d['vpn_username'], 'files' : files, 'network' : { 'name' : settings.NETWORK_NAME, 'home' : settings.NETWORK_HOME, 'contact' : settings.NETWORK_CONTACT, 'description' : getattr(settings, 'NETWORK_DESCRIPTION', None) }, 'images_bindist_url' : getattr(settings, 'IMAGES_BINDIST_URL', None) }) send_mail( settings.EMAIL_SUBJECT_PREFIX + (_("Router images for %s/%s") % (d['hostname'], d['ip'])), t.render(c), settings.EMAIL_IMAGE_GENERATOR_SENDER, [d['email']], fail_silently = False ) | 138bcd7573be01f98360aeb1f0b9ba6c80cc2b1b /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/138bcd7573be01f98360aeb1f0b9ba6c80cc2b1b/gennyd.py |
raise TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) | raise template.TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) | def setcontext(parser, token): """ Sets (updates) current template context with the rendered output of the block inside tags. """ nodelist = parser.parse(('endsetcontext',)) args = list(token.split_contents()) if len(args) != 3 or args[1] != "as": raise TemplateSyntaxError("'%s' tag expected format is 'as name'" % args[0]) variable = args[2] parser.delete_first_token() return SetContextNode(nodelist, variable) | f902f594dd95a58c1ade9ec5956970a29b5a2e5c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/f902f594dd95a58c1ade9ec5956970a29b5a2e5c/misc.py |
raise TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) | raise template.TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) | def notice(parser, token): """ Renders notice. """ nodelist = parser.parse(('endnotice',)) args = list(token.split_contents()) if len(args) > 3: raise TemplateSyntaxError("'%s' tag requires at most two arguments" % args[0]) classes = args[2] if len(args) > 2 else '""' notice_type = args[1] if len(args) > 1 else '""' parser.delete_first_token() notice_type = parser.compile_filter(notice_type) classes = parser.compile_filter(classes) return NoticeNode(nodelist, notice_type, classes) | f902f594dd95a58c1ade9ec5956970a29b5a2e5c /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/f902f594dd95a58c1ade9ec5956970a29b5a2e5c/misc.py |
form = InfoStickerForm({ | form = InfoStickerForm(initial = { | def sticker(request): """ Display a form for generating an info sticker. """ user = UserAccount.for_user(request.user) # We want disabled error to show only after POST (to be same as image generation behavior) disabled = False if request.method == 'POST': form = InfoStickerForm(request.POST) if form.is_valid(): if getattr(settings, 'STICKERS_ENABLED', None): return HttpResponseRedirect(form.save(user)) else: disabled = True else: form = InfoStickerForm({ 'name' : user.name, 'phone' : user.phone, 'project' : user.project.id if user.project else 0 }) return render_to_response('nodes/sticker.html', { 'form' : form, 'stickers_disabled' : disabled }, context_instance = RequestContext(request) ) | bc4b2a9359248ddb8c6b0b9ce6efcdb4e9c36a5d /local1/tlutelli/issta_data/temp/all_python//python/2010_temp/2010/11345/bc4b2a9359248ddb8c6b0b9ce6efcdb4e9c36a5d/views.py |