bugged
stringlengths 4
228k
| fixed
stringlengths 0
96.3M
| __index_level_0__
int64 0
481k
|
---|---|---|
def test_suite(): optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS checker = renormalizing.RENormalizing([ (re.compile(r'<zope\.testing\.doctest\.'), '<doctest.'), ]) tests = ['../index.txt', 'table-example.txt', 'README.txt', 'bugs.txt', 'capture.txt'] m = manuel.ignore.Manuel() m += manuel.doctest.Manuel(optionflags=optionflags, checker=checker) m += manuel.codeblock.Manuel() m += manuel.capture.Manuel() return manuel.testing.TestSuite(m, *tests, globs={'path_to_test': os.path.join(here, 'bugs.txt')})
|
def test_suite(): optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS checker = renormalizing.RENormalizing([ (re.compile(r'<zope\.testing\.doctest\.'), '<doctest.'), ]) tests = ['../index.txt', 'table-example.txt', 'README.txt', 'bugs.txt', 'capture.txt'] m = manuel.ignore.Manuel() m += manuel.doctest.Manuel(optionflags=optionflags, checker=checker) m += manuel.codeblock.Manuel() m += manuel.capture.Manuel() return manuel.testing.TestSuite(m, *tests, **dict( globs={'path_to_test': os.path.join(here, 'bugs.txt')}))
| 480,600 |
def testUTF8(self): self.stc.encoding = detectEncoding(self.utf8) print self.stc.encoding self.stc.decodeText(self.utf8) self.stc.prepareEncoding() assert self.utf8 == self.stc.refstc.encoded
|
def testUTF8(self): text = self.utf8 self.stc.encoding, self.stc.refstc.bom = detectEncoding(text) print self.stc.encoding self.stc.decodeText(self.utf8) self.stc.prepareEncoding() assert self.utf8 == self.stc.refstc.encoded
| 480,601 |
def testUTF8(self): self.stc.encoding = detectEncoding(self.utf8) print self.stc.encoding self.stc.decodeText(self.utf8) self.stc.prepareEncoding() assert self.utf8 == self.stc.refstc.encoded
|
def testUTF8(self): self.stc.encoding = detectEncoding(self.utf8) print self.stc.encoding self.stc.decodeText(text) self.stc.prepareEncoding() assert self.utf8 == self.stc.refstc.encoded
| 480,602 |
def testUTF8(self): self.stc.encoding = detectEncoding(self.utf8) print self.stc.encoding self.stc.decodeText(self.utf8) self.stc.prepareEncoding() assert self.utf8 == self.stc.refstc.encoded
|
def testUTF8(self): self.stc.encoding = detectEncoding(self.utf8) print self.stc.encoding self.stc.decodeText(self.utf8) self.stc.prepareEncoding() assert text == self.stc.refstc.encoded def testUTF8BOM(self): text = self.utf8_bom self.stc.encoding, self.stc.refstc.bom = detectEncoding(text) print self.stc.encoding self.stc.decodeText(text) self.stc.prepareEncoding() assert text == self.stc.refstc.encoded
| 480,603 |
def testChangeLatin(self): self.stc.refstc.encoding = detectEncoding(self.utf8) print self.stc.refstc.encoding self.stc.decodeText(self.utf8) unicode1 = self.stc.GetLine(1) print repr(unicode1) start = self.stc.FindText(0, self.stc.GetLength(), 'UTF-8') self.stc.SetTargetStart(start) self.stc.SetTargetEnd(start+5) self.stc.ReplaceTarget('latin-1') self.stc.prepareEncoding() print self.stc.refstc.encoded unicode2 = self.stc.GetLine(1) print repr(unicode2) assert unicode1 == unicode2
|
def testChangeLatin(self): self.stc.refstc.encoding, self.stc.refstc.bom = detectEncoding(self.utf8) print self.stc.refstc.encoding self.stc.decodeText(self.utf8) unicode1 = self.stc.GetLine(1) print repr(unicode1) start = self.stc.FindText(0, self.stc.GetLength(), 'UTF-8') self.stc.SetTargetStart(start) self.stc.SetTargetEnd(start+5) self.stc.ReplaceTarget('latin-1') self.stc.prepareEncoding() print self.stc.refstc.encoded unicode2 = self.stc.GetLine(1) print repr(unicode2) assert unicode1 == unicode2
| 480,604 |
def testLatin1(self): self.stc.refstc.encoding = detectEncoding(self.latin1) print repr(self.latin1) utf8 = unicode(self.latin1, "iso-8859-1").encode('utf-8') print repr(utf8) print repr(utf8.decode('utf-8')) print repr(self.latin1.decode('latin-1')) self.stc.decodeText(self.latin1) print repr(self.stc.GetText()) print self.stc.refstc.encoding self.stc.prepareEncoding() print "encoded: " + repr(self.stc.refstc.encoded) assert self.latin1 == self.stc.refstc.encoded
|
def testLatin1(self): self.stc.refstc.encoding, self.stc.refstc.bom = detectEncoding(self.latin1) print repr(self.latin1) utf8 = unicode(self.latin1, "iso-8859-1").encode('utf-8') print repr(utf8) print repr(utf8.decode('utf-8')) print repr(self.latin1.decode('latin-1')) self.stc.decodeText(self.latin1) print repr(self.stc.GetText()) print self.stc.refstc.encoding self.stc.prepareEncoding() print "encoded: " + repr(self.stc.refstc.encoded) assert self.latin1 == self.stc.refstc.encoded
| 480,605 |
def finishRecordingHook(self, accelerator_text): self.mode.buffer.stc.setRemappedAccelerator(self.action, accelerator_text, self.append) wx.CallAfter(self.mode.resetList)
|
def finishRecordingHook(self, accelerator_text): self.mode.buffer.stc.setRemappedAccelerator(self.action, accelerator_text, self.append) wx.CallAfter(self.mode.resetList)
| 480,606 |
def startupFailureCallback(self, p): dprint("Couldn't run %s" % p.cmd)
|
def startupFailureCallback(self, p): dprint("Couldn't run %s" % p.cmd)
| 480,607 |
def startupFailureCallback(self, p): self.callback(self)
|
def startupFailureCallback(self, p, text): self.callback(self)
| 480,608 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,609 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
| 480,610 |
def processMinibuffer(self, minibuffer, mode, text): #dprint("Revert to encoding %s" % text) # see if it's a known encoding try: 'test'.encode(text) # if we get here, it's valid self.mode.buffer.revert(text) if text != self.mode.buffer.stc.encoding: self.mode.setStatusText("Failed converting to %s; loaded as binary (probably not what you want)" % text) except LookupError: self.mode.setStatusText("Unknown encoding %s" % text)
|
def processMinibuffer(self, minibuffer, mode, text): #dprint("Revert to encoding %s" % text) # see if it's a known encoding try: 'test'.encode(text) # if we get here, it's valid self.mode.buffer.revert(encoding=text) if text != self.mode.buffer.stc.encoding: self.mode.setStatusText("Failed converting to %s; loaded as binary (probably not what you want)" % text) except LookupError: self.mode.setStatusText("Unknown encoding %s" % text)
| 480,611 |
def _do_callback(self, result, callback): # Ignore encoding errors and return an empty line instead try: result = result.decode(sys.getfilesystemencoding()) except UnicodeDecodeError: result = os.linesep
|
def _do_callback(self, result, callback): # Ignore encoding errors and return an empty line instead try: result = result.decode(sys.getfilesystemencoding()) except UnicodeDecodeError: result = os.linesep
| 480,612 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,613 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,614 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") try: #dprint(cls.layout[major_mode_keyword]) return cls.layout[major_mode_keyword][str(url)] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") try: #dprint(cls.layout[major_mode_keyword])key = str(url) if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
| 480,615 |
def updateLayoutSubsequent(cls, major_mode_keyword, url, perspective): #dprint("updateLayoutSubsequent") if major_mode_keyword not in cls.layout: cls.layout[major_mode_keyword] = {} cls.layout[major_mode_keyword][str(url)] = perspective
|
def updateLayoutSubsequent(cls, major_mode_keyword, url, perspective): #dprint("updateLayoutSubsequent") if major_mode_keyword not in cls.layout: cls.layout[major_mode_keyword] = {} cls.layout[major_mode_keyword][unicode(url)] = perspective
| 480,616 |
def reportSuccess(self, text, data): import wx wx.CallAfter(self.reportSuccessGUI, text, data)
|
def reportSuccess(self, text, data=None): import wx wx.CallAfter(self.reportSuccessGUI, text, data)
| 480,617 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,618 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,619 |
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
|
def run(self): """Run the process until finished or aborted. Don't call this directly instead call self.start() to start the thread else this will run in the context of the current thread. @note: overridden from Thread
| 480,620 |
def scanLanguage(cls, header, modes): """Scan for a pattern match in the first bytes of the file. Determine if there is a 'magic' pattern in the first n bytes of the file that can associate it with a major mode.
|
def scanLanguage(cls, header, modes): """Scan for a pattern match in the first bytes of the file. Determine if there is a 'magic' pattern in the first n bytes of the file that can associate it with a major mode.
| 480,621 |
def getEuclideanDistanceByBand(self,nbins=500): """Calculate the euclidean distance for every pixel in two cubes using bands Fast for BSQ cubes, slow for BIL, and extremely slow for BIP. """ self.heatmap = HSI.createCube('bsq', self.lines, self.samples, 1, self.dtype) data = self.heatmap.getBandRaw(0) working = numpy.zeros((self.lines, self.samples), dtype=numpy.float32) for i in range(self.bands): if self.bbl[i]: band1 = self.cube1.getBand(i) band2 = self.cube2.getBand(i) band = band1 - band2 working += numpy.square(band) data = numpy.sqrt(working) self.dprint(data) return self.euclidean
|
def getEuclideanDistanceByBand(self,nbins=500): """Calculate the euclidean distance for every pixel in two cubes using bands Fast for BSQ cubes, slow for BIL, and extremely slow for BIP. """ self.euclidean = HSI.createCube('bsq', self.lines, self.samples, 1, self.dtype) data = self.euclidean.getBandRaw(0) working = numpy.zeros((self.lines, self.samples), dtype=numpy.float32) for i in range(self.bands): if self.bbl[i]: band1 = self.cube1.getBand(i) band2 = self.cube2.getBand(i) band = band1 - band2 working += numpy.square(band) data = numpy.sqrt(working) self.dprint(data) return self.euclidean
| 480,622 |
def SetLabel(self, value): """ Set the label's current text """ rvalue = self.label.SetLabel(value) self.Refresh(True) return rvalue
|
def SetLabel(self, value): """ Set the label's current text """ rvalue = self.label.SetLabel(value) self.Refresh(True) return rvalue
| 480,623 |
def __init__(self, frame, delay=5000, style=wx.BORDER_SIMPLE): """Creates (but doesn't show) the PopupStatusBar @param frame: the parent frame @kwarg delay: (optional) delay in milliseconds before each message decays """ wx.PopupWindow.__init__(self, frame, style) self.SetBackgroundColour("#B6C1FF") self.stack = wx.BoxSizer(wx.VERTICAL) self.SetSizer(self.stack) self.timer = wx.Timer(self) self.delay = delay self.Bind(wx.EVT_TIMER, self.OnTimer) # Take over the frame's EVT_MENU_HIGHLIGHT frame.Bind(wx.EVT_MENU_HIGHLIGHT, self.OnMenuHighlight) # List of display times self.display_times = [] self.last_is_status = False self.Hide()
|
def __init__(self, frame, delay=5000, style=wx.BORDER_SIMPLE): """Creates (but doesn't show) the PopupStatusBar @param frame: the parent frame @kwarg delay: (optional) delay in milliseconds before each message decays """ PopupClass.__init__(self, frame, style) self.SetBackgroundColour("#B6C1FF") self.stack = wx.BoxSizer(wx.VERTICAL) self.SetSizer(self.stack) self.timer = wx.Timer(self) self.delay = delay self.Bind(wx.EVT_TIMER, self.OnTimer) # Take over the frame's EVT_MENU_HIGHLIGHT frame.Bind(wx.EVT_MENU_HIGHLIGHT, self.OnMenuHighlight) # List of display times self.display_times = [] self.last_is_status = False self.Hide()
| 480,624 |
def OnTimer(self, evt): if not self.display_times: # It is possible the timer could go off after the call to clear(), # so if the list is empty, return without restarting the timer return current = time.time() * 1000 #print("Timer at %f" % current) remove = True if (self.last_is_status and len(self.display_times) == 1): last_time, last_text = self.display_times[0] expires = last_time + self.delay #print("expires at: %d current=%d" % (expires, current)) if last_time + self.delay > current: remove = False if remove: expired_time, expired_text = self.display_times.pop(0) self.stack.Remove(expired_text) expired_text.Destroy() if self.display_times: remaining = self.delay - (current - self.display_times[0][0]) #print("Next timer: %d" % remaining) #print("\n".join("%f" % d[0] for d in self.display_times)) if remaining < 0: remaining = 1 self.timer.Start(remaining, True) self.positionAndShow() else: self.clear()
|
def OnTimer(self, evt): if not self.display_times: # It is possible the timer could go off after the call to clear(), # so if the list is empty, return without restarting the timer return current = time.time() * 1000 #print("Timer at %f" % current) remove = True if (self.last_is_status and len(self.display_times) == 1): last_time, last_text = self.display_times[0] expires = last_time + self.delay #print("expires at: %d current=%d" % (expires, current)) if last_time + self.delay > current: remove = False if remove: expired_time, expired_text = self.display_times.pop(0) self.stack.Remove(expired_text) expired_text.Destroy() if self.display_times: remaining = self.delay - (current - self.display_times[0][0]) #print("Next timer: %d" % remaining) #print("\n".join("%f" % d[0] for d in self.display_times)) if remaining < 0: remaining = 1 self.timer.Start(remaining, True) self.positionAndShow() else: self.clear()
| 480,625 |
def OnTimer(self, evt): if not self.display_times: # It is possible the timer could go off after the call to clear(), # so if the list is empty, return without restarting the timer return current = time.time() * 1000 #print("Timer at %f" % current) remove = True if (self.last_is_status and len(self.display_times) == 1): last_time, last_text = self.display_times[0] expires = last_time + self.delay #print("expires at: %d current=%d" % (expires, current)) if last_time + self.delay > current: remove = False if remove: expired_time, expired_text = self.display_times.pop(0) self.stack.Remove(expired_text) expired_text.Destroy() if self.display_times: remaining = self.delay - (current - self.display_times[0][0]) #print("Next timer: %d" % remaining) #print("\n".join("%f" % d[0] for d in self.display_times)) if remaining < 0: remaining = 1 self.timer.Start(remaining, True) self.positionAndShow() else: self.clear()
|
def OnTimer(self, evt): if not self.display_times: # It is possible the timer could go off after the call to clear(), # so if the list is empty, return without restarting the timer return current = time.time() * 1000 #print("Timer at %f" % current) remove = True if (self.last_is_status and len(self.display_times) == 1): last_time, last_text = self.display_times[0] expires = last_time + self.delay #print("expires at: %d current=%d" % (expires, current)) if last_time + self.delay > current: remove = False if remove: expired_time, expired_text = self.display_times.pop(0) self.stack.Remove(expired_text) expired_text.Destroy() if self.display_times: remaining = self.delay - (current - self.display_times[0][0]) #print("Next timer: %d" % remaining) #print("\n".join("%f" % d[0] for d in self.display_times)) if remaining < 1: remaining = 1 self.timer.Start(remaining, True) self.positionAndShow() else: self.clear()
| 480,626 |
def findNextWord(self, utext, index, length): """Find the next valid word to check. Designed to be overridden in subclasses, this method takes a starting position in an array of text and returns a tuple indicating the next valid word in the string. @param utext: array of unicode chars @param i: starting index within the array to search @param length: length of the text @return: tuple indicating the word start and end indexes, or (-1, -1) indicating that the end of the array was reached and no word was found """ while index < length: if utext[index].isalpha(): end = index + 1 while end < length and utext[end].isalpha(): end += 1 return (index, end) index += 1 return (-1, -1)
|
def findNextWord(self, utext, index, length): """Find the next valid word to check. Designed to be overridden in subclasses, this method takes a starting position in an array of text and returns a tuple indicating the next valid word in the string. @param utext: array of unicode chars @param i: starting index within the array to search @param length: length of the text @return: tuple indicating the word start and end indexes, or (-1, -1) indicating that the end of the array was reached and no word was found """ while index < length: if utext[index].isalpha(): end = index + 1 while end < length and (utext[end].isalpha() or utext[end] == "'"): end += 1 return (index, end) index += 1 return (-1, -1)
| 480,627 |
def checkWord(self, pos=None, atend=False): """Check the word at the current or specified position. @param pos: position of a character in the word (or at the start or end of the word), or None to use the current position @param atend: True if you know the cursor is at the end of the word """ if pos is None: pos = self.stc.GetCurrentPos() if atend: end = pos else: end = self.stc.WordEndPosition(pos, True) start = self.stc.WordStartPosition(pos, True) if self._spelling_debug: print("%d-%d: %s" % (start, end, self.stc.GetTextRange(start, end))) self.checkRange(start, end)
|
def checkWord(self, pos=None, atend=False): """Check the word at the current or specified position. @param pos: position of a character in the word (or at the start or end of the word), or None to use the current position @param atend: True if you know the cursor is at the end of the word """ if pos is None: pos = self.stc.GetCurrentPos() if atend: end = pos else: end = self.getLikelyWordEnd(pos) start = self.getLikelyWordStart(pos) if self._spelling_debug: print("%d-%d: %s" % (start, end, self.stc.GetTextRange(start, end))) self.checkRange(start, end)
| 480,628 |
def processDirtyRanges(self): cursor = self.stc.GetCurrentPos() # Check that the cursor has moved off the current word and if so check # its spelling if self.current_word_start > 0: if cursor < self.current_word_start or cursor > self.current_word_end: self.checkRange(self.current_word_start, self.current_word_end) self.current_word_start = -1 # Check spelling around the region currently being typed if self.current_dirty_start >= 0: range_start, range_end = self.processDirtyRange(self.current_dirty_start, self.current_dirty_end) # If the cursor is in the middle of a word, remove the spelling # markers if cursor >= range_start and cursor <= range_end: word_start = self.stc.WordStartPosition(cursor, True) word_end = self.stc.WordEndPosition(cursor, True) mask = self._spelling_indicator_mask self.stc.StartStyling(word_start, mask) self.stc.SetStyling(word_end - word_start, 0) if word_start != word_end: self.current_word_start = word_start self.current_word_end = word_end else: self.current_word_start = -1 self.current_dirty_start = self.current_dirty_end = -1 # Process a chunk of dirty ranges needed = min(len(self.dirty_ranges), self.dirty_range_count_per_idle) ranges = self.dirty_ranges[0:needed] self.dirty_ranges = self.dirty_ranges[needed:] for start, end in ranges: if self._spelling_debug: print("processing %d-%d" % (start, end)) self.processDirtyRange(start, end)
|
def processDirtyRanges(self): cursor = self.stc.GetCurrentPos() # Check that the cursor has moved off the current word and if so check # its spelling if self.current_word_start > 0: if cursor < self.current_word_start or cursor > self.current_word_end: self.checkRange(self.current_word_start, self.current_word_end) self.current_word_start = -1 # Check spelling around the region currently being typed if self.current_dirty_start >= 0: range_start, range_end = self.processDirtyRange(self.current_dirty_start, self.current_dirty_end) # If the cursor is in the middle of a word, remove the spelling # markers if cursor >= range_start and cursor <= range_end: word_start = self.getLikelyWordStart(cursor) word_end = self.getLikelyWordEnd(cursor) if self._spelling_debug: print("cursor in middle of word, removing styling %d-%d" % (word_start, word_end)) mask = self._spelling_indicator_mask self.stc.StartStyling(word_start, mask) self.stc.SetStyling(word_end - word_start, 0) if word_start != word_end: self.current_word_start = word_start self.current_word_end = word_end else: self.current_word_start = -1 self.current_dirty_start = self.current_dirty_end = -1 # Process a chunk of dirty ranges needed = min(len(self.dirty_ranges), self.dirty_range_count_per_idle) ranges = self.dirty_ranges[0:needed] self.dirty_ranges = self.dirty_ranges[needed:] for start, end in ranges: if self._spelling_debug: print("processing %d-%d" % (start, end)) self.processDirtyRange(start, end)
| 480,629 |
def processDirtyRange(self, start, end): range_start = self.stc.WordStartPosition(start, True) range_end = self.stc.WordEndPosition(end, True) if self._spelling_debug: print("processing dirty range %d-%d (modified from %d-%d): %s" % (range_start, range_end, start, end, repr(self.stc.GetTextRange(range_start, range_end)))) self.checkRange(range_start, range_end) return range_start, range_end
|
def processDirtyRange(self, start, end): range_start = self.getLikelyWordStart(start) range_end = self.getLikelyWordEnd(end) if self._spelling_debug: print("processing dirty range %d-%d (modified from %d-%d): %s" % (range_start, range_end, start, end, repr(self.stc.GetTextRange(range_start, range_end)))) self.checkRange(range_start, range_end) return range_start, range_end
| 480,630 |
def getMatchGenerator(self, url, matcher): if isinstance(url, vfs.Reference): if url.scheme != "file": dprint("vfs not threadsafe; skipping %s" % unicode(url).encode("utf-8")) return url = unicode(url.path).encode("utf-8") fh = open(url, "rb") return matcher.iterMatches(url, fh)
|
def getMatchGenerator(self, url, matcher): if isinstance(url, vfs.Reference): if url.scheme != "file": dprint("vfs not threadsafe; skipping %s" % unicode(url).encode("utf-8")) return url = unicode(url.path).encode("utf-8") try: fh = open(url, "rb") return matcher.iterMatches(url, fh) except: dprint("Failed opening %s" % url) return iter([])
| 480,631 |
def action(self, index=-1, multiplier=1): self.mode.classprefs.print_style = index
|
def action(self, index=-1, multiplier=1): self.mode.classprefs.print_style = index
| 480,632 |
def convertSection(cls, section):
|
def convertSection(cls, section):
| 480,633 |
def getNewLineIndentString(self, stc, col, ind): """Return the number of characters to be indented on a new line @param stc: stc of interest @param col: column position of cursor on line @param ind: indentation in characters of cursor on line """ return stc.GetIndentString(7)
|
def getNewLineIndentString(self, stc, col, ind): """Return the number of characters to be indented on a new line @param stc: stc of interest @param col: column position of cursor on line @param ind: indentation in characters of cursor on line """ return stc.GetIndentString(7)
| 480,634 |
def get_transport(transport_key): t = paramiko.Transport(transport_key) t.start_client() if not t.is_active(): raise OSError("Failure to connect to: '%s'" % str(transport_key)) pub_key = t.get_remote_server_key() return t
|
def get_transport(transport_key): t = paramiko.Transport(transport_key) t.start_client() if not t.is_active(): raise OSError("Failure to connect to: '%s'" % str(transport_key)) pub_key = t.get_remote_server_key() return t
| 480,635 |
def _get_client(cls, ref): newref = cls._copy_root_reference_without_username(ref) if newref in cls.connection_cache: client = cls.connection_cache[newref] if cls.debug: dprint("Found cached sftp connection: %s" % client) else: client = cls._get_sftp(ref) if cls.debug: dprint("Creating sftp connection: %s" % client) cls.connection_cache[newref] = client return client
|
def _get_client(cls, ref): newref = cls._copy_root_reference_without_username(ref) if newref in cls.connection_cache: client = cls.connection_cache[newref] if cls.debug: dprint("Found cached sftp connection: %s" % client) transport = client.get_channel().get_transport() if not transport.is_active(): dprint("Cached channel closed. Opening new connection for %s" % ref) client = None if client is None: client = cls._get_sftp(ref) if cls.debug: dprint("Creating sftp connection: %s" % client) cls.connection_cache[newref] = client return client
| 480,636 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url).encode("utf-8") if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if ukey != key and key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: layout = cls.layout[major_mode_keyword][ukey] except KeyError: layout = {} cls.dprint("%s: layout=%s" % (url, layout)) return layout
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url).encode("utf-8") if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if ukey != key and key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: layout = cls.layout[major_mode_keyword][ukey] except KeyError: layout = {} cls.dprint("%s: layout=%s" % (unicode(url), unicode(layout))) return layout
| 480,637 |
def __cmp__(self, other): return cmp(self.name, other.name)
|
def __cmp__(self, other): return cmp(self.name, other.name)
| 480,638 |
def run(self, text=""): assert self.dprint("Running %s in %s" % (self.cmd, self.working_dir)) savecwd = os.getcwd() try: os.chdir(self.working_dir) self.process = wx.Process(self.handler) self.process.Redirect(); self.pid = wx.Execute(self.cmd, wx.EXEC_ASYNC, self.process) finally: os.chdir(savecwd) if self.pid==0: assert self.dprint("startup failed") self.process = None wx.CallAfter(self.jobout.startupFailureCallback, self) else: wx.CallAfter(self.jobout.startupCallback, self) size = len(text) fh = self.process.GetOutputStream() assert self.dprint("sending text size=%d to %s" % (size,fh)) # sending large chunks of text to a process's stdin would sometimes # freeze, but breaking up into < 1024 byte pieces seemed to work # on all platforms if size > 1000: for i in range(0,size,1000): last = i+1000 if last>size: last=size assert self.dprint("sending text[%d:%d] to %s" % (i,last,fh)) fh.write(text[i:last]) assert self.dprint("last write = %s" % str(fh.LastWrite())) elif len(text) > 0: fh.write(text) self.process.CloseOutput() self.stdout = self.process.GetInputStream() self.stderr = self.process.GetErrorStream()
|
def run(self, text=""): assert self.dprint("Running %s in %s" % (self.cmd, self.working_dir)) savecwd = os.getcwd() try: os.chdir(self.working_dir) self.process = wx.Process(self.handler) self.process.Redirect(); if wx.Platform != '__WXMSW__': flag = wx.EXEC_ASYNC else: flag = wx.EXEC_NOHIDE self.pid = wx.Execute(self.cmd, flag, self.process) finally: os.chdir(savecwd) if self.pid==0: assert self.dprint("startup failed") self.process = None wx.CallAfter(self.jobout.startupFailureCallback, self) else: wx.CallAfter(self.jobout.startupCallback, self) size = len(text) fh = self.process.GetOutputStream() assert self.dprint("sending text size=%d to %s" % (size,fh)) # sending large chunks of text to a process's stdin would sometimes # freeze, but breaking up into < 1024 byte pieces seemed to work # on all platforms if size > 1000: for i in range(0,size,1000): last = i+1000 if last>size: last=size assert self.dprint("sending text[%d:%d] to %s" % (i,last,fh)) fh.write(text[i:last]) assert self.dprint("last write = %s" % str(fh.LastWrite())) elif len(text) > 0: fh.write(text) self.process.CloseOutput() self.stdout = self.process.GetInputStream() self.stderr = self.process.GetErrorStream()
| 480,639 |
def get_mtime(cls, ref): attrs = cls._stat(ref) return attrs.st_mtime
|
def get_mtime(cls, ref): attrs = cls._stat(ref) return attrs.st_mtime
| 480,640 |
def get_atime(cls, ref): attrs = cls._stat(ref) return attrs.st_atime
|
def get_atime(cls, ref): attrs = cls._stat(ref) return attrs.st_atime
| 480,641 |
def __init__(self, stc, *args, **kwargs): """Mixin must be initialized using this constructor. Keyword arguments are also available instead of calling the convenience functions. For L{setIndicator}, use C{indicator}, C{indicator_color}, and {indicator_style}; for L{setLanguage}, use C{language}; and for L{setMinimumWordSize}, use C{min_word_size}. See the descriptions of those methods for more info. @kwarg language: default language string recognized by enchant (e.g. "en_US", "kr_KR", etc. If a default language isn't explicitly here, the default language is taken from the class method L{setDefaultLanguage} @kwarg check_region: optional function to specify if the region should be spell checked. Function should return True if the position should be spell-checked; False if it doesn't make sense to spell check that part of the document. Function should be a bound method to the STC. @kwarg idle_count: number of idle events that have to occur before an idle event is actually processed. This reduces processor usage by only processing one out of every idle_count events. """ self.stc = stc self.setIndicator(kwargs.get('indicator', 2), kwargs.get('indicator_color', "#FF0000"), kwargs.get('indicator_style', wx.stc.STC_INDIC_SQUIGGLE)) self.setMinimumWordSize(kwargs.get('min_word_size', 3)) if 'language' in kwargs: self.setDefaultLanguage(kwargs['language']) if 'check_region' in kwargs: self._spell_check_region = kwargs['check_region'] else: self._spell_check_region = lambda s: True if 'idle_count' in kwargs: self._num_idle_ticks = kwargs['idle_count'] else: self._num_idle_ticks = 10 self._idle_ticks = 0 self._spelling_debug = True self._spelling_last_idle_line = -1 self.dirty_range_count_per_idle = 5 self._no_update = False self._last_block = -1 self.clearDirtyRanges()
|
def __init__(self, stc, *args, **kwargs): """Mixin must be initialized using this constructor. Keyword arguments are also available instead of calling the convenience functions. For L{setIndicator}, use C{indicator}, C{indicator_color}, and {indicator_style}; for L{setLanguage}, use C{language}; and for L{setMinimumWordSize}, use C{min_word_size}. See the descriptions of those methods for more info. @kwarg language: default language string recognized by enchant (e.g. "en_US", "kr_KR", etc. If a default language isn't explicitly here, the default language is taken from the class method L{setDefaultLanguage} @kwarg check_region: optional function to specify if the region should be spell checked. Function should return True if the position should be spell-checked; False if it doesn't make sense to spell check that part of the document. Function should be a bound method to the STC. @kwarg idle_count: number of idle events that have to occur before an idle event is actually processed. This reduces processor usage by only processing one out of every idle_count events. """ self.stc = stc self.setIndicator(kwargs.get('indicator', 2), kwargs.get('indicator_color', "#FF0000"), kwargs.get('indicator_style', wx.stc.STC_INDIC_SQUIGGLE)) self.setMinimumWordSize(kwargs.get('min_word_size', 3)) if 'language' in kwargs: self.setDefaultLanguage(kwargs['language']) if 'check_region' in kwargs: self._spell_check_region = kwargs['check_region'] else: self._spell_check_region = lambda s: True if 'idle_count' in kwargs: self._num_idle_ticks = kwargs['idle_count'] else: self._num_idle_ticks = 10 self._idle_ticks = 0 self._spelling_debug = False self._spelling_last_idle_line = -1 self.dirty_range_count_per_idle = 5 self._no_update = False self._last_block = -1 self.clearDirtyRanges()
| 480,642 |
def createColumns(self, list): list.InsertSizedColumn(0, "URL", min=100, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
|
def createColumns(self, list): list.InsertSizedColumn(0, "File", min=100, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
| 480,643 |
def getItemRawValues(self, index, item): return (unicode(item.url), item.line, unicode(item.text))
|
def getItemRawValues(self, index, item): return (unicode(item.short), item.line, unicode(item.text), item.url)
| 480,644 |
def OnItemActivated(self, evt): index = evt.GetIndex() orig_index = self.list.GetItemData(index) values = self.list.itemDataMap[orig_index] dprint(values) self.frame.open(values[0], options={'line':values[1] - 1})
|
def OnItemActivated(self, evt): index = evt.GetIndex() orig_index = self.list.GetItemData(index) values = self.list.itemDataMap[orig_index] dprint(values) self.frame.open(values[0], options={'line':values[1] - 1})
| 480,645 |
def action(self, index=-1, multiplier=1): s = self.mode # FIXME: Because the autoindenter depends on the styling information, # need to make sure the document is up to date. But, is this call to # style the entire document fast enough in practice, or will it have # to be optimized? s.Colourise(0, s.GetTextLength())
|
def action(self, index=-1, multiplier=1): s = self.mode # FIXME: Because the autoindenter depends on the styling information, # need to make sure the document is up to date. But, is this call to # style the entire document fast enough in practice, or will it have # to be optimized? s.Colourise(0, s.GetTextLength())
| 480,646 |
def action(self, index=-1, multiplier=1): s = self.mode # FIXME: Because the autoindenter depends on the styling information, # need to make sure the document is up to date. But, is this call to # style the entire document fast enough in practice, or will it have # to be optimized? s.Colourise(0, s.GetTextLength())
|
def action(self, index=-1, multiplier=1): s = self.mode # FIXME: Because the autoindenter depends on the styling information, # need to make sure the document is up to date. But, is this call to # style the entire document fast enough in practice, or will it have # to be optimized? s.Colourise(0, s.GetTextLength())
| 480,647 |
def __init__(self, string, match_case): try: if not match_case: flags = re.IGNORECASE else: flags = 0 self.cre = re.compile(string, flags) except re.error: self.cre = None self.last_match = None
|
def __init__(self, string, match_case): try: if not match_case: flags = re.IGNORECASE else: flags = 0 self.cre = re.compile(string, flags) self.error = "" except re.error, errmsg: self.cre = None self.last_match = None
| 480,648 |
def isValid(self): return bool(self.cre)
|
def isValid(self): return bool(self.cre)
| 480,649 |
def OnStartSearch(self, evt): if not self.isSearchRunning(): self.showSearchButton(True) method = self.buffer.stc.search_method.option if method.isValid(): status = SearchStatus(self) matcher = self.buffer.stc.search_type.option.getStringMatcher(self.search_text.GetValue()) ignorer = WildcardListIgnorer(self.ignore_filenames.GetValue()) if matcher.isValid(): self.buffer.stc.clearSearchResults() self.buffer.stc.setPrefix(method.getPrefix()) self.resetList() self.status_info.startProgress("Searching...") self.thread = SearchThread(self.buffer.stc, matcher, ignorer, status) self.thread.start() else: self.setStatusText("Invalid search string.") else: self.setStatusText(method.getErrorString())
|
def OnStartSearch(self, evt): if not self.isSearchRunning(): self.showSearchButton(True) method = self.buffer.stc.search_method.option if method.isValid(): status = SearchStatus(self) matcher = self.buffer.stc.search_type.option.getStringMatcher(self.search_text.GetValue()) ignorer = WildcardListIgnorer(self.ignore_filenames.GetValue()) if matcher.isValid(): self.buffer.stc.clearSearchResults() self.buffer.stc.setPrefix(method.getPrefix()) self.resetList() self.status_info.startProgress("Searching...") self.thread = SearchThread(self.buffer.stc, matcher, ignorer, status) self.thread.start() else: if hasattr(matcher, "getErrorString"): error = matcher.getErrorString() else: error = "Invalid search string." self.setStatusText(error) self.showSearchButton(False) else: self.setStatusText(method.getErrorString())
| 480,650 |
def getPrefix(self): prefix = unicode(self.pathname) if not prefix.endswith("/"): prefix += "/" return prefix
|
def getPrefix(self): prefix = unicode(self.pathname) if not prefix.endswith(os.sep): prefix += os.sep return prefix
| 480,651 |
def OnFocus(self, evt): """Callback used to pop down any springtabs. When the major mode loses keyboard focus, the springtabs should be cleared to allow the new focus receiver to display itself. This fails when the major mode never takes keyboard focus at all, in which case a focus-lost event is never generated and this method never gets called. """ self.wrapper.spring.clearRadio() self.frame.spring.clearRadio() evt.Skip()
|
def OnFocus(self, evt): """Callback used to pop down any springtabs. When the major mode loses keyboard focus, the springtabs should be cleared to allow the new focus receiver to display itself. This fails when the major mode never takes keyboard focus at all, in which case a focus-lost event is never generated and this method never gets called. """ try: self.wrapper.spring.clearRadio() self.frame.spring.clearRadio() except wx.PyDeadObjectError: pass evt.Skip()
| 480,652 |
def action(self, index=-1, multiplier=1): filename = self.frame.showSaveAs("Save Image as ENVI", wildcard="BIL (*.bil)|*.bil|BIP (*.bip)|*.bip|BSQ (*.bsq)|*.bsq") if filename: root, ext = os.path.splitext(filename) ext = ext.lower() if ext in ['.bil', '.bip', '.bsq']: handler = HyperspectralFileFormat.getHandlerByName("ENVI") if handler: try: self.mode.showBusy(True) self.mode.status_info.startProgress("Exporting to %s" % filename) wx.GetApp().cooperativeYield() if self.endian: options = {'byte_order': self.endian} handler.export(filename, self.mode.cube, options=options, progress=self.updateProgress) self.mode.status_info.stopProgress("Saved %s" % filename) wx.GetApp().cooperativeYield() finally: self.mode.showBusy(False) else: self.mode.setStatusText("Can't find ENVI handler") else: self.frame.showErrorDialog("Unrecognized file format %s\n\nThe filename extension determines the\ninterleave format. Use a filename extension of\n.bip, .bil, or .bsq" % filename)
|
def action(self, index=-1, multiplier=1): filename = self.frame.showSaveAs("Save Image as ENVI", wildcard="BIL (*.bil)|*.bil|BIP (*.bip)|*.bip|BSQ (*.bsq)|*.bsq") if filename: root, ext = os.path.splitext(filename) ext = ext.lower() if ext in ['.bil', '.bip', '.bsq']: handler = HyperspectralFileFormat.getHandlerByName("ENVI") if handler: try: self.mode.showBusy(True) self.mode.status_info.startProgress("Exporting to %s" % filename) wx.GetApp().cooperativeYield() if self.endian: options['byte_order'] = self.endian handler.export(filename, self.mode.cube, options=options, progress=self.updateProgress) self.mode.status_info.stopProgress("Saved %s" % filename) wx.GetApp().cooperativeYield() finally: self.mode.showBusy(False) else: self.mode.setStatusText("Can't find ENVI handler") else: self.frame.showErrorDialog("Unrecognized file format %s\n\nThe filename extension determines the\ninterleave format. Use a filename extension of\n.bip, .bil, or .bsq" % filename)
| 480,653 |
def createColumns(self, list): list.InsertSizedColumn(0, "File", min=100, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
|
def createColumns(self, list): list.InsertSizedColumn(0, "File", min=100, max=250, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
| 480,654 |
def createColumns(self, list): list.InsertSizedColumn(0, "File", min=100, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
|
def createColumns(self, list): list.InsertSizedColumn(0, "File", min=100, greedy=False) list.InsertSizedColumn(1, "Line", min=10, greedy=False) list.InsertSizedColumn(2, "Match", min=300, greedy=True)
| 480,655 |
def save(self, url): if self.mmap: self.mmap.flush() self.mmap.sync() else: self.raw.tofile(str(url.path))
|
def save(self, url): if self.mmap: self.mmap.flush() self.mmap.sync() else: filename = unicode(url.path) try: self.raw.tofile(filename) except ValueError: fd = open(filename, "wb") flat = self.raw.ravel() size = flat.size start = 0 while start < size: last = start + 10000 if last > size: last = size fd.write(flat[start:last].tostring()) start = last
| 480,656 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url).encode("utf-8") if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
| 480,657 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if ukey != key and key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
| 480,658 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: layout = cls.layout[major_mode_keyword][ukey] except KeyError: return {}
| 480,659 |
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: return {}
|
def getLayoutSubsequent(cls, major_mode_keyword, url): #dprint("getLayoutSubsequent") ukey = unicode(url) if major_mode_keyword in cls.layout: try: key = str(url) # Convert old style string keyword to unicode keyword if key in cls.layout[major_mode_keyword]: cls.layout[major_mode_keyword][ukey] = cls.layout[major_mode_keyword][key] del cls.layout[major_mode_keyword][key] except UnicodeEncodeError: pass try: return cls.layout[major_mode_keyword][ukey] except KeyError: layout = {} cls.dprint("%s: layout=%s" % (url, layout)) return layout
| 480,660 |
def showError(self, message=None): data = message.data if isinstance(data, tuple) or isinstance(data, list): frame = data[0] text = data[1] else: frame = wx.GetApp().GetTopWindow() text = data if self.frame == frame: paneinfo = frame._mgr.GetPane(self) if self.classprefs.unhide_on_message: if not paneinfo.IsShown(): paneinfo.Show(True) frame._mgr.Update() if message.topic[-1] == 'wrap': columns = 72 import textwrap text = textwrap.fill(text, columns) self.addMessage(text)
|
def showError(self, message=None): data = message.data if isinstance(data, tuple) or isinstance(data, list): frame = data[0] text = data[1] else: frame = wx.GetApp().GetTopWindow() text = data if not self.frame: dprint("Frame has been deleted!!! Message was:") dprint(message) dlg = wx.MessageDialog(wx.GetApp().GetTopWindow(), message, "Error message for deleted frame!!!", wx.OK | wx.ICON_EXCLAMATION ) retval=dlg.ShowModal() dlg.Destroy() elif self.frame == frame: paneinfo = frame._mgr.GetPane(self) if self.classprefs.unhide_on_message: if not paneinfo.IsShown(): paneinfo.Show(True) frame._mgr.Update() if message.topic[-1] == 'wrap': columns = 72 import textwrap text = textwrap.fill(text, columns) self.addMessage(text)
| 480,661 |
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
|
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
| 480,662 |
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
|
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
| 480,663 |
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
|
def action(self, index=-1, multiplier=1): self.frame.open("about:sample.dot")
| 480,664 |
def startupCallback(self, job): self.process = job self.busy(True) self.preview = StringIO()
|
def startupCallback(self, job): self.process = job self.preview = StringIO()
| 480,665 |
def finishedCallback(self, job): assert self.dprint() self.process = None self.busy(False) self.createImage() # Don't call evt.Skip() here because it causes a crash
|
def finishedCallback(self, job): assert self.dprint() self.process = None self.busy(False) self.createImage() # Don't call evt.Skip() here because it causes a crash
| 480,666 |
def getMajorModes(self): yield GraphvizMode
|
def getMajorModes(self): yield GraphvizMode
| 480,667 |
def download(self, url, path): """ Download url and store it at path """ f = None g = None try: f = urllib.urlopen(url) g = open(path, 'wb') copyobj(f, g) finally: if f: f.close() if g: g.close()
|
def download(url, path): """ Download url and store it at path """ f = None g = None try: f = urllib.urlopen(url) g = open(path, 'wb') copyobj(f, g) finally: if f: f.close() if g: g.close()
| 480,668 |
def main(): home = config.get_home() default_cache = os.path.join(home,"var","cache") default_cache = os.getenv("MULE_CACHE", default_cache) parser = OptionParser() parser.add_option("-f", "--foreground", action="store_true", dest="foreground", default=False, help="Do not fork [default: fork]") parser.add_option("-r", "--rls", action="store", dest="rls", default=DEFAULT_RLS, metavar="HOST", help="RLS host [def: %default]") parser.add_option("-c", "--cache", action="store", dest="cache", default=DEFAULT_CACHE, metavar="DIR", help="Cache directory [def: %default]") (options, args) = parser.parse_args() if len(args) > 0: parser.error("Invalid argument") if not options.rls: parser.error("Specify --rls or MULE_RLS environment") if os.path.isfile(options.cache): parser.error("--cache argument is file") if not os.path.isdir(options.cache): os.makedirs(options.cache) # Fork if not options.foreground: util.daemonize() os.chdir(config.get_home()) # Configure logging (after the fork) log.configure() l = log.get_log("agent") try: a = Agent(options.rls, options.cache) a.run() except Exception, e: l.exception(e) sys.exit(1)
|
def main(): home = config.get_home() default_cache = os.path.join(home,"var","cache") default_cache = os.getenv("MULE_CACHE", default_cache) parser = OptionParser() parser.add_option("-f", "--foreground", action="store_true", dest="foreground", default=False, help="Do not fork [default: fork]") parser.add_option("-r", "--rls", action="store", dest="rls", default=DEFAULT_RLS, metavar="HOST", help="RLS host [def: %default]") parser.add_option("-c", "--cache", action="store", dest="cache", default=DEFAULT_CACHE, metavar="DIR", help="Cache directory [def: %default]") (options, args) = parser.parse_args() if len(args) > 0: parser.error("Invalid argument") if not options.rls: parser.error("Specify --rls or MULE_RLS environment") if os.path.isfile(options.cache): parser.error("--cache argument is file") if not os.path.isdir(options.cache): os.makedirs(options.cache) # Fork if not options.foreground: util.daemonize() os.chdir(config.get_home()) # Configure logging (after the fork) log.configure() l = log.get_log("agent") try: a = Agent(options.rls, options.cache) a.run() except Exception, e: l.exception(e) sys.exit(1)
| 480,669 |
def usage(): sys.stderr.write( "Usage: %s COMMAND\n" % os.path.basename(sys.argv[0])) sys.stderr.write( """
|
def usage(): sys.stderr.write( "Usage: %s COMMAND\n" % os.path.basename(sys.argv[0])) sys.stderr.write( """
| 480,670 |
def usage(): sys.stderr.write( "Usage: %s COMMAND\n" % os.path.basename(sys.argv[0])) sys.stderr.write( """
|
def usage(): sys.stderr.write( "Usage: %s COMMAND\n" % os.path.basename(sys.argv[0])) sys.stderr.write( """
| 480,671 |
def main(): if len(sys.argv) < 2: usage() cmd = sys.argv[1] args = sys.argv[2:] if cmd in ['get']: parser = OptionParser("Usage: %prog get LFN PATH") parser.add_option("-s", "--symlink", action="store_true", dest="symlink", default=SYMLINK, help="symlink PATH to cached file [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 2: parser.error("Specify LFN and PATH") lfn = args[0] path = args[1] get(lfn, path, options.symlink) elif cmd in ['put']: parser = OptionParser("Usage: %prog put PATH LFN") parser.add_option("-r", "--rename", action="store_true", dest="rename", default=RENAME, help="rename PATH to cached file [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 2: pasrser.error("Specify PATH and LFN") path = args[0] lfn = args[1] put(path, lfn, options.rename) elif cmd in ['remove','rm']: parser = OptionParser("Usage: %prog remove [options] LFN") parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="Force LFN to be removed from cache [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 1: parser.error("Specify LFN") lfn = args[0] remove(lfn, options.force) elif cmd in ['list','ls']: parser = OptionParser("Usage: %prog list") (options, args) = parser.parse_args(args=args) if len(args) > 0: parser.error("Invalid argument") ls() elif cmd in ['rls_add','add']: parser = OptionParser("Usage: %prog rls_add LFN PFN") (options, args) = parser.parse_args(args=args) if len(args) != 2: parser.error("Specify LFN and PFN") lfn = args[0] pfn = args[1] rls_add(lfn, pfn) elif cmd in ['rls_lookup','rls_lu','lookup','lu']: parser = OptionParser("Usage: %prog rls_lookup LFN") (options, args) = parser.parse_args(args=args) if len(args) != 1: parser.error("Specify LFN") lfn = args[0] rls_lookup(lfn) elif cmd in ['rls_delete','rls_del','delete','del']: parser = OptionParser("Usage: %prog rls_del LFN [PFN]") (options, args) = parser.parse_args(args=args) if len(args) not in [1,2]: parser.error("Specify LFN and/or PFN") lfn = args[0] if len(args) > 1: pfn = args[1] else: pfn = None rls_delete(lfn, pfn) elif cmd in ['-h','help','-help','--help']: usage() else: sys.stderr.write("Unrecognized argument: %s\n" % cmd)
|
def main(): if len(sys.argv) < 2: usage() cmd = sys.argv[1] args = sys.argv[2:] if cmd in ['get']: parser = OptionParser("Usage: %prog get LFN PATH") parser.add_option("-s", "--symlink", action="store_true", dest="symlink", default=SYMLINK, help="symlink PATH to cached file [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 2: parser.error("Specify LFN and PATH") lfn = args[0] path = args[1] get(lfn, path, options.symlink) elif cmd in ['put']: parser = OptionParser("Usage: %prog put PATH LFN") parser.add_option("-r", "--rename", action="store_true", dest="rename", default=RENAME, help="rename PATH to cached file [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 2: pasrser.error("Specify PATH and LFN") path = args[0] lfn = args[1] put(path, lfn, options.rename) elif cmd in ['remove','rm']: parser = OptionParser("Usage: %prog remove [options] LFN") parser.add_option("-f", "--force", action="store_true", dest="force", default=False, help="Force LFN to be removed from cache [default: %default]") (options, args) = parser.parse_args(args=args) if len(args) != 1: parser.error("Specify LFN") lfn = args[0] remove(lfn, options.force) elif cmd in ['list','ls']: parser = OptionParser("Usage: %prog list") (options, args) = parser.parse_args(args=args) if len(args) > 0: parser.error("Invalid argument") ls() elif cmd in ['rls_add','add']: parser = OptionParser("Usage: %prog rls_add LFN PFN") (options, args) = parser.parse_args(args=args) if len(args) != 2: parser.error("Specify LFN and PFN") lfn = args[0] pfn = args[1] rls_add(lfn, pfn) elif cmd in ['rls_lookup','rls_lu','lookup','lu']: parser = OptionParser("Usage: %prog rls_lookup LFN") (options, args) = parser.parse_args(args=args) if len(args) != 1: parser.error("Specify LFN") lfn = args[0] rls_lookup(lfn) elif cmd in ['rls_delete','rls_del','delete','del']: parser = OptionParser("Usage: %prog rls_del LFN [PFN]") (options, args) = parser.parse_args(args=args) if len(args) not in [1,2]: parser.error("Specify LFN and/or PFN") lfn = args[0] if len(args) > 1: pfn = args[1] else: pfn = None rls_delete(lfn, pfn) elif cmd in ['-h','help','-help','--help']: usage() else: sys.stderr.write("Unrecognized argument: %s\n" % cmd)
| 480,672 |
def with_transaction(self, *args, **kwargs): deadlocks = 0 while True: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except DBLockDeadlockError, e: txn.abort() deadlocks += 1 if deadlocks < retries: self.log.info("Deadlock detected, retrying") continue else: self.log.error("Deadlock detected, aborting") raise except: txn.abort() raise
|
def with_transaction(self, *args, **kwargs): deadlocks = 0 while True: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except bdb.DBLockDeadlockError, e: txn.abort() deadlocks += 1 if deadlocks < retries: self.log.info("Deadlock detected, retrying") continue else: self.log.error("Deadlock detected, aborting") raise except: txn.abort() raise
| 480,673 |
def list(self): cur = self.db.cursor(txn) try: result = [] current = cur.first() while current is not None: rec = pickle.loads(current[1]) rec['lfn'] = current[0] result.append(rec) current = cur.next() return result finally: cur.close()
|
def list(self, txn): cur = self.db.cursor(txn) try: result = [] current = cur.first() while current is not None: rec = pickle.loads(current[1]) rec['lfn'] = current[0] result.append(rec) current = cur.next() return result finally: cur.close()
| 480,674 |
def with_transaction(method): def with_transaction(self, *args, **kwargs): if len(args)>0 and isinstance(args[0],Database): return method(self, *args, **kwargs) else: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise return with_transaction
|
def with_transaction(method, retries=3): def with_transaction(self, *args, **kwargs): if len(args)>0 and isinstance(args[0],Database): return method(self, *args, **kwargs) else: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise return with_transaction
| 480,675 |
def with_transaction(self, *args, **kwargs): if len(args)>0 and isinstance(args[0],Database): return method(self, *args, **kwargs) else: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise
|
def with_transaction(self, *args, **kwargs): deadlocks = 0 while True: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise
| 480,676 |
def with_transaction(self, *args, **kwargs): if len(args)>0 and isinstance(args[0],Database): return method(self, *args, **kwargs) else: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise
|
def with_transaction(self, *args, **kwargs): if len(args)>0 and isinstance(args[0],Database): return method(self, *args, **kwargs) else: txn = self.env.txn_begin() try: result = method(self, txn, *args, **kwargs) txn.commit() return result except: txn.abort() raise
| 480,677 |
def __init__(self, env, interval=300): Thread.__init__(self) self.setDaemon(True) self.log = log.get_log("ckpt_thread") self.env = env self.interval = interval
|
def __init__(self, env, interval=300): Thread.__init__(self) self.setDaemon(True) self.log = log.get_log("bdb manager") self.env = env self.interval = interval
| 480,678 |
def __init__(self, path, name, duplicates=False): self.path = path self.dbpath = os.path.join(self.path, name) if not os.path.isdir(self.path): os.makedirs(self.path) self.env = bdb.DBEnv() self.env.set_tx_max(self.max_txns) self.env.set_lk_max_lockers(self.max_txns*2) self.env.set_lk_max_locks(self.max_txns*2) self.env.set_lk_max_objects(self.max_txns*2) self.env.set_flags(bdb.DB_TXN_NOSYNC, True) if bdb.version() > (4,7): self.env.log_set_config(bdb.DB_LOG_AUTO_REMOVE, True) self.env.open(self.path, bdb.DB_CREATE | bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG | bdb.DB_INIT_MPOOL | bdb.DB_INIT_TXN | bdb.DB_RECOVER | bdb.DB_THREAD) self.db = bdb.DB(self.env) if duplicates: self.db.set_flags(bdb.DB_DUPSORT) if bdb.version() > (4,1): txn = self.env.txn_begin() self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD, dbtype=bdb.DB_BTREE, txn=txn) txn.commit() else: self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD, dbtype=bdb.DB_BTREE)
|
def __init__(self, path, name, duplicates=False): self.path = path self.dbpath = os.path.join(self.path, name) if not os.path.isdir(self.path): os.makedirs(self.path) self.env = bdb.DBEnv() self.env.set_tx_max(self.max_txns) self.env.set_lk_max_lockers(self.max_txns*2) self.env.set_lk_max_locks(self.max_txns*2) self.env.set_lk_max_objects(self.max_txns*2) self.env.set_flags(bdb.DB_TXN_NOSYNC, True) if hasattr(self.env, "log_set_config"): self.env.log_set_config(bdb.DB_LOG_AUTO_REMOVE, True) self.env.open(self.path, bdb.DB_CREATE | bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG | bdb.DB_INIT_MPOOL | bdb.DB_INIT_TXN | bdb.DB_RECOVER | bdb.DB_THREAD) self.db = bdb.DB(self.env) if duplicates: self.db.set_flags(bdb.DB_DUPSORT) if bdb.version() > (4,1): txn = self.env.txn_begin() self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD, dbtype=bdb.DB_BTREE, txn=txn) txn.commit() else: self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD, dbtype=bdb.DB_BTREE)
| 480,679 |
def delete(self, txn, lfn, pfn=None): cur = self.db.cursor(txn) try: if pfn is None: current = cur.set(lfn) while current is not None: cur.delete() current = cur.next_dup() else: current = cur.set_both(lfn, pfn) if current is not None: cur.delete() finally: cur.close()
|
def delete(self, txn, lfn, pfn=None): cur = self.db.cursor(txn) try: if pfn is None: current = cur.set(lfn) while current is not None: cur.delete() current = cur.next_dup() else: current = cur.set_both(lfn, pfn) if current is not None: cur.delete() finally: cur.close()
| 480,680 |
def get(self, lfn): current = self.db.get(lfn) if current is not None: return pickle.loads(current) else: return None
|
@with_transaction def get(self, txn, lfn): current = self.db.get(lfn, txn) if current is not None: return pickle.loads(current) else: return None
| 480,681 |
def remove(self, txn, lfn): self.db.delete(lfn, txn)
|
def @with_transaction remove(self, @with_transaction txn, @with_transaction lfn): @with_transaction self.db.delete(lfn, @with_transaction txn) @with_transaction
| 480,682 |
def list(self): cur = self.db.cursor() try: result = [] current = cur.first() while current is not None: rec = pickle.loads(current[1]) rec['lfn'] = current[0] result.append(rec) current = cur.next() return result finally: cur.close()
|
def list(self): cur = self.db.cursor(txn) try: result = [] current = cur.first() while current is not None: rec = pickle.loads(current[1]) rec['lfn'] = current[0] result.append(rec) current = cur.next() return result finally: cur.close()
| 480,683 |
def lookup(lfn): cur = conn.cursor() cur.execute("select pfn from map where lfn=?",(lfn,)) pfns = [] for row in cur.fetchall(): pfns.append(row['pfn']) cur.close() return pfns
|
def lookup(self, conn, lfn): cur = conn.cursor() cur.execute("select pfn from map where lfn=?",(lfn,)) pfns = [] for row in cur.fetchall(): pfns.append(row['pfn']) cur.close() return pfns
| 480,684 |
def get(self, txn, lfn): current = self.db.get(lfn, txn) if current is not None: return pickle.loads(current) else: return None
|
def get(self, txn, lfn): current = self.db.get(lfn, None, txn) if current is not None: return pickle.loads(current) else: return None
| 480,685 |
def __repr__(self): return '<MailmanRESTClient: %s>' % self.host
|
def __repr__(self): return '<MailmanRESTClient: %s>' % self.host
| 480,686 |
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST, PUT or PATCH data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data) if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() if method == 'POST': headers['Content-type'] = "application/x-www-form-urlencoded" response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
|
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST oder PUT data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data) if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() if method == 'POST': headers['Content-type'] = "application/x-www-form-urlencoded" response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
| 480,687 |
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST, PUT or PATCH data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data) if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() if method == 'POST': headers['Content-type'] = "application/x-www-form-urlencoded" response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
|
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST, PUT or PATCH data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data, doseq=True) headers['Content-type'] = "application/x-www-form-urlencoded" if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() if method == 'POST': headers['Content-type'] = "application/x-www-form-urlencoded" response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
| 480,688 |
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST, PUT or PATCH data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data) if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() if method == 'POST': headers['Content-type'] = "application/x-www-form-urlencoded" response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
|
def _http_request(self, path, data=None, method=None, **kwargs): """Send an HTTP request. :param path: the path to send the request to :type path: string :param data: POST, PUT or PATCH data to send :type data: dict :param method: the HTTP method; defaults to GET or POST (if data is not None) :type method: string :return: the request content or a status code, depending on the method and if the request was successful :rtype: int, list or dict """ url = self.host + path # Include general header information headers = { 'User-Agent': 'MailmanRESTClient', 'Accept': 'text/plain', } if data is not None: data = urlencode(data) if method is None: if data is None: method = 'GET' else: method = 'POST' method = method.upper() response, content = Http().request(url, method, data, headers) if method == 'GET': if response.status // 100 != 2: return response.status else: return json.loads(content) else: return response.status
| 480,689 |
def get_member(self, email_address, fqdn_listname): """Return a member object. :param email_adresses: the email address used :type email_address: string :param fqdn_listname: the mailing list :type fqdn_listname: string :return: a member object :rtype: _Member """ return _Member(self.host, email_address, fqdn_listname)
|
def get_member(self, email_address, fqdn_listname): """Return a member object. :param email_adresses: the email address used :type email_address: string :param fqdn_listname: the mailing list :type fqdn_listname: string :return: a member object :rtype: _Member """ return _Member(self.host, email_address, fqdn_listname)
| 480,690 |
def delete_list(self, list_name): fqdn_listname = list_name + '@' + self.info['email_host'] return self._http_request('/3.0/lists/' + fqdn_listname, None, 'DELETE')
|
def delete_list(self, list_name): fqdn_listname = list_name + '@' + self.info['email_host'] return self._http_request('/3.0/lists/' + fqdn_listname, None, 'DELETE')
| 480,691 |
def get_members(self): """Get a list of all list members.
|
def get_members(self): """Get a list of all list members.
| 480,692 |
def avg (l): sum=0 for e in l: sum=sum+e; return sum/len(l)
|
def avg (l): sum=0 for e in l: sum=sum+e; return sum/len(l)
| 480,693 |
def correl_split_weighted( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=0 sum_nb_val=0 for (start,stop) in segments: sum_nb_val = sum_nb_val + stop - start; #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr + (l/sum_nb_val)*c # weighted product of correlation print('-- %f * %f' % (c,l/sum_nb_val)) print("-> glob_corr=%f\n" % glob_corr) return (glob_corr,interv);
|
def correl_split_weighted( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=0 sum_nb_val=0 for (start,stop) in segments: sum_nb_val = sum_nb_val + stop - start; #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr + (l/sum_nb_val)*c # weighted product of correlation print('-- %f * %f' % (c,l/sum_nb_val)) print("-> glob_corr={}\n".format(glob_corr)) return (glob_corr,interv);
| 480,694 |
def correl_split( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=1 for (start,stop) in segments: #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr * c # product of correlation coeffs print("-> glob_corr=%f\n" % glob_corr) return (glob_corr,interv);
|
def correl_split( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=1 for (start,stop) in segments: #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr * c # product of correlation coeffs print("-> glob_corr=%f\n" % glob_corr) return (glob_corr,interv);
| 480,695 |
def correl_split( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=1 for (start,stop) in segments: #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr * c # product of correlation coeffs print("-> glob_corr=%f\n" % glob_corr) return (glob_corr,interv);
|
def correl_split( X , Y , segments ): # expects segments = [(0,i1-1),(i1-1,i2-1),(i2,len-1)] correl = list(); interv = list(); # regr. line coeffs and range glob_corr=1 for (start,stop) in segments: #if start==stop : # return 0 S_XY= cov( X [start:stop+1], Y [start:stop+1] ) S_X2 = variance( X [start:stop+1] ) S_Y2 = variance( Y [start:stop+1] ) # to compute correlation if S_X2*S_Y2 == 0: return (0,[]) c = S_XY/(sqrt(S_X2)*sqrt(S_Y2)) a = S_XY/S_X2 # regr line coeffs b= avg ( Y[start:stop+1] ) - a * avg( X[start:stop+1] ) print(" range [%d,%d] corr=%f, coeff det=%f [a=%f, b=%f]" % (X[start],X[stop],c,c**2,a, b)) correl.append( (c, stop-start) ); # store correl. coef + number of values (segment length) interv.append( (a,b, X[start],X[stop]) ); for (c,l) in correl: glob_corr = glob_corr * c # product of correlation coeffs print("-> glob_corr=%f\n" % glob_corr) return (glob_corr,interv);
| 480,696 |
def cov (X, Y): assert len(X) == len(Y) n = len(X) # n=len(X)=len(Y) avg_X = avg(X) avg_Y = avg(Y) S_XY = 0.0 for i in xrange(n): S_XY += (X[i] - avg_X) * (Y[i] - avg_Y) return (S_XY / n)
|
def cov (X, Y): assert len(X) == len(Y) n = len(X) # n=len(X)=len(Y) avg_X = avg(X) avg_Y = avg(Y) S_XY = 0.0 for i in range(n): S_XY += (X[i] - avg_X) * (Y[i] - avg_Y) return (S_XY / n)
| 480,697 |
def variance (X): n = len(X) avg_X = avg (X) S_X2 = 0.0 for i in xrange(n): S_X2 += (X[i] - avg_X) ** 2 return (S_X2 / n)
|
def variance (X): n = len(X) avg_X = avg (X) S_X2 = 0.0 for i in range(n): S_X2 += (X[i] - avg_X) ** 2 return (S_X2 / n)
| 480,698 |
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
|
def calibrate (links, latency, bandwidth, sizes, timings): assert len(sizes) == len(timings) if len(sizes) < 2: return None S_XY = cov(sizes, timings) S_X2 = variance(sizes) a = S_XY / S_X2 b = avg(timings) - a * avg(sizes) return (b * 1e-6) / (latency * links), 1e6 / (a * bandwidth)
| 480,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.