function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def b64_decode(data): """Decodes standard unpadded base64 encoded string.""" mod = len(data) % 4 if mod: data += '=' * (4 - mod) return base64.b64decode(data)
luci/luci-py
[ 70, 40, 70, 82, 1427740754 ]
def save_paired_items(request, user, formset, Model, item_name, counterpart_name): """ Handle saving skills or links to the database. """ paired_items = [] for form in formset: if form.is_valid(): item = form.cleaned_data.get(item_name, None) counterpart = form.cleaned_data.get(counterpart_name, None) if item and counterpart: model_instance = Model(user=user) setattr(model_instance, item_name, item) setattr(model_instance, counterpart_name, counterpart) paired_items.append(model_instance) # Replace old pairs with new # Do this in a transaction to avoid a case where we delete the old # but cannot save the new try: with transaction.atomic(): Model.objects.filter(user=user).delete() Model.objects.bulk_create(paired_items) except IntegrityError: messages.error(request, _('There was an error updating your profile.')) return redirect(reverse('accounts:profile-settings'))
nlhkabu/connect
[ 47, 25, 47, 18, 1433260168 ]
def save_links(request, user, formset): """Wrapper function to save paired link anchors and URLs.""" save_paired_items(request, user, formset, UserLink, 'anchor', 'url')
nlhkabu/connect
[ 47, 25, 47, 18, 1433260168 ]
def __init__(self, *args, **kwargs): """ Initialize the endpoint """ super(AutomationActions, self).__init__(*args, **kwargs) self.endpoint = 'automations' self.workflow_id = None
charlesthk/python-mailchimp
[ 460, 138, 460, 34, 1441895803 ]
def pause(self, workflow_id): """ Pause all emails in a specific Automation workflow. :param workflow_id: The unique id for the Automation workflow. :type workflow_id: :py:class:`str` """ self.workflow_id = workflow_id return self._mc_client._post(url=self._build_path(workflow_id, 'actions/pause-all-emails'))
charlesthk/python-mailchimp
[ 460, 138, 460, 34, 1441895803 ]
def from_tree(tree): pass
N-Parsons/exercism-python
[ 1, 1, 1, 3, 1506170251 ]
def set_value(self): pass
N-Parsons/exercism-python
[ 1, 1, 1, 3, 1506170251 ]
def set_left(self): pass
N-Parsons/exercism-python
[ 1, 1, 1, 3, 1506170251 ]
def set_right(self): pass
N-Parsons/exercism-python
[ 1, 1, 1, 3, 1506170251 ]
def __init__(self, input_capture_file_path, capture_cmd, wireshark_pipe): self.input_capture_file_path = input_capture_file_path self.capture_cmd = capture_cmd self.wireshark_pipe = wireshark_pipe self.process = None self.pipe = None QtCore.QThread.__init__(self)
GNS3/gns3-legacy
[ 13, 10, 13, 1, 1380163967 ]
def run(self): try: in_file = open(self.input_capture_file_path, 'rb') except IOError, e: debug("Cannot open capture file: %s") % unicode(e) self.exit() return try: self.process = subprocess.Popen(self.capture_cmd.strip()) except (OSError, IOError), e: debug("Cannot start Wireshark: %s") % unicode(e) self.exit() return try: self.pipe = win32pipe.CreateNamedPipe(self.wireshark_pipe, win32pipe.PIPE_ACCESS_OUTBOUND, win32pipe.PIPE_TYPE_MESSAGE | win32pipe.PIPE_WAIT, 1, 65536, 65536, 300, None) win32pipe.ConnectNamedPipe(self.pipe, None) except win32pipe.error: debug("Error while creating and connecting the pipe ...") win32file.CloseHandle(self.pipe) self.exit() return while True: data = in_file.read() if not self.process or self.process.returncode != None: win32file.CloseHandle(self.pipe) debug("Wireshark is not running, deleting pipe ...") self.exit() return if data: try: win32file.WriteFile(self.pipe, data) except: win32file.CloseHandle(self.pipe) debug("Wireshark has been closed, deleting pipe ...") self.exit() return else: time.sleep(0.5) #FIXME: find a better way to wake-up the thread only when there is data to read
GNS3/gns3-legacy
[ 13, 10, 13, 1, 1380163967 ]
def PathToTestFile( *args ): dir_of_current_script = os.path.dirname( os.path.abspath( __file__ ) ) return os.path.join( dir_of_current_script, 'testdata', *args )
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def tearDownModule(): global shared_app, shared_filepaths for filepath in shared_filepaths.get( shared_app, [] ): StopCompleterServer( shared_app, 'cs', filepath )
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def Wrapper( test_case_instance, *args, **kwargs ): ClearCompletionsCache() with IgnoreExtraConfOutsideTestsFolder(): return test( test_case_instance, shared_app, *args, **kwargs )
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def IsolatedYcmd( custom_options = {} ): def Decorator( test ): @functools.wraps( test ) def Wrapper( test_case_instance, *args, **kwargs ): with IsolatedApp( custom_options ) as app: try: test( test_case_instance, app, *args, **kwargs ) finally: global shared_filepaths for filepath in shared_filepaths.get( app, [] ): StopCompleterServer( app, 'cs', filepath ) return Wrapper return Decorator
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def ReadFile( filepath, fileposition ): with open( filepath, encoding = 'utf8' ) as f: if fileposition: f.seek( fileposition ) return f.read(), f.tell()
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def WrapOmniSharpServer( app, filepath ): global shared_filepaths global shared_log_indexes if filepath not in shared_filepaths.setdefault( app, [] ): GetDiagnostics( app, filepath ) shared_filepaths[ app ].append( filepath ) WaitUntilCsCompleterIsReady( app, filepath ) logfiles = [] response = GetDebugInfo( app, filepath ) for server in response[ 'completer' ][ 'servers' ]: logfiles.extend( server[ 'logfiles' ] ) try: yield finally: for logfile in logfiles: if os.path.isfile( logfile ): log_content, log_end_position = ReadFile( logfile, shared_log_indexes.get( logfile, 0 ) ) shared_log_indexes[ logfile ] = log_end_position sys.stdout.write( f'Logfile { logfile }:\n\n' ) sys.stdout.write( log_content ) sys.stdout.write( '\n' )
puremourning/ycmd-1
[ 3, 2, 3, 1, 1423398207 ]
def __init__(self, fileOrPath): self._resources = OrderedDict() if hasattr(fileOrPath, 'read'): self.file = fileOrPath else: try: # try reading from the resource fork (only works on OS X) self.file = self.openResourceFork(fileOrPath) self._readFile() return except (ResourceError, IOError): # if it fails, use the data fork self.file = self.openDataFork(fileOrPath) self._readFile()
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def openResourceFork(path): with open(path + '/..namedfork/rsrc', 'rb') as resfork: data = resfork.read() infile = BytesIO(data) infile.name = path return infile
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def openDataFork(path): with open(path, 'rb') as datafork: data = datafork.read() infile = BytesIO(data) infile.name = path return infile
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def _read(self, numBytes, offset=None): if offset is not None: try: self.file.seek(offset) except OverflowError: raise ResourceError("Failed to seek offset ('offset' is too large)") if self.file.tell() != offset: raise ResourceError('Failed to seek offset (reached EOF)') try: data = self.file.read(numBytes) except OverflowError: raise ResourceError("Cannot read resource ('numBytes' is too large)") if len(data) != numBytes: raise ResourceError('Cannot read resource (not enough data)') return data
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def _readTypeList(self): absTypeListOffset = self.absTypeListOffset numTypesData = self._read(2, absTypeListOffset) self.numTypes, = struct.unpack('>H', numTypesData) absTypeListOffset2 = absTypeListOffset + 2 for i in range(self.numTypes + 1): resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset) item = sstruct.unpack(ResourceTypeItem, resTypeItemData) resType = tostr(item['type'], encoding='mac-roman') refListOffset = absTypeListOffset + item['refListOffset'] numRes = item['numRes'] + 1 resources = self._readReferenceList(resType, refListOffset, numRes) self._resources[resType] = resources
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def __getitem__(self, resType): return self._resources[resType]
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def __setitem__(self, resType, resources): self._resources[resType] = resources
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def __iter__(self): return iter(self._resources)
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def types(self): return list(self._resources.keys())
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def getIndices(self, resType): numRes = self.countResources(resType) if numRes: return list(range(1, numRes+1)) else: return []
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def getIndResource(self, resType, index): """Return resource of given type located at an index ranging from 1 to the number of resources for that type, or None if not found. """ if index < 1: return None try: res = self[resType][index-1] except (KeyError, IndexError): return None return res
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def close(self): if not self.file.closed: self.file.close()
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def __init__(self, resType=None, resData=None, resID=None, resName=None, resAttr=None): self.type = resType self.data = resData self.id = resID self.name = resName self.attr = resAttr
MitchTalmadge/Emoji-Tools
[ 103, 16, 103, 14, 1431903482 ]
def __init__(self, model=None): super(ContainerStatus, self).__init__() self._name = None self._state = None self._last_state = None self._ready = None self._restart_count = None self._image = None self._image_id = None self._container_id = None if model is not None: m = filter_model(model) self._build_with_model(m)
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def name(self): return self._name
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def name(self, name=None): if not is_valid_string(name): raise SyntaxError("ContainerStatus: name: [ {0} ] is invalid".format(name)) self._name = name
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def state(self): return self._state
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def state(self, state=None): if not isinstance(state, ContainerState): raise SyntaxError("ContainerStatus: state: [ {0} ] is invalid".format(state)) self._state = state
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def last_state(self): return self._last_state
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def last_state(self, state=None): if not isinstance(state, ContainerState): raise SyntaxError("ContainerStatus: last_state: [ {0} ] is invalid".format(state)) self._last_state = state
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def ready(self): return self._ready
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def ready(self, ready=None): if not isinstance(ready, bool): raise SyntaxError("ContainerStatus: ready: [ {0} ] is invalid".format(ready)) self._ready = ready
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def restart_count(self): return self._restart_count
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def restart_count(self, count=None): if not isinstance(count, int): raise SyntaxError("ContainerStatus: restart_count: [ {0} ] is invalid".format(count)) self._restart_count = count
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def image(self): return self._image
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def image(self, image=None): if not is_valid_string(image): raise SyntaxError("ContainerStatus: image: [ {0} ] is invalid".format(image)) self._image = image
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def image_id(self): return self._image_id
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def image_id(self, iid=None): if not is_valid_string(iid): raise SyntaxError("ContainerStatus: image_id: [ {0} ] is invalid".format(iid)) self._image_id = iid
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def container_id(self): return self._container_id
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def container_id(self, cid=None): if not is_valid_string(cid): raise SyntaxError("ContainerStatus: container_id: [ {0} ] is invalid".format(cid)) self._container_id = cid
mnubo/kubernetes-py
[ 122, 48, 122, 12, 1452712930 ]
def test_SmokeTest(self): "ipc - Should be derived from object." self.failUnless(isinstance(ipc(),object), "IPC objects should also be instances of object.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test___init__SmokeTest_NoArguments(self): "newComponent.__init__ - Should work without problems." nc=newComponent() self.failUnless(isinstance(nc, ipc), "newComponent should be derived from ipc class") self.failUnless(len(nc.components())==0, "There should be no components in the message if the constructor was called with no arguments.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_components(self): "newComponent.components - Returns a tuple of components that need to be added to the run queue/activated. Same test as for __init__ as they are counterparts." nc=newComponent("ba","da","bing") self.failUnless(nc.components()==("ba","da","bing"), "components returned something other than expected.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test___init__SmokeTest_NoArguments(self): "shutdownMicroprocess.__init__ - Should work without problems." sm=shutdownMicroprocess() self.failUnless(isinstance(sm,ipc), "shutdownMicroprocess should be derived from ipc") self.failUnless(sm.microprocesses()==(), "Microprocess tuple not empty as expected.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_microprocesses(self): "shutdownMicroprocess.microprocesses- Returns the list of microprocesses that need to be shutdown. This is essentially the counterpart to the __init__ test." sm=shutdownMicroprocess("ba","da","bing") self.failUnless(sm.microprocesses()==("ba","da","bing"), "Returned tuple not as expected.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_NoArguments(self): "notify.__init__ - Called without arguments fails." self.failUnlessRaises(TypeError, notify)
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_NoArguments(self): "status.__init__ - Called without arguments fails." self.failUnlessRaises(TypeError, status)
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_status(self): "status.status - Returns the status message stored inside the status object. Counterpart to __init__ test." s=status("Status message.") self.failUnless(s.status() == "Status message.", "Status message not stored properly.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_NoArguments(self): "wouldblock.__init__ - Called without arguments fails." self.failUnlessRaises(TypeError, wouldblock)
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_NoArguments(self): "producerFinished.__init__ - Called without arguments defaults to a caller of None, message of None. Checks producerFinished is a subclass of ipc" pf=producerFinished() self.failUnless(isinstance(pf, ipc), "producerFinished should be an derived from ipc.") self.failUnless(pf.caller== None, "caller does not default to None") self.failUnless(pf.message == None, "message does not default to None")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_NoArguments(self): "errorInformation.__init__ - Called without arguments fails - must include caller." self.failUnlessRaises(TypeError, errorInformation)
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_SmokeTest_MinSensibleArguments(self): "errorInformation.__init__ - An exception & message (any object) in addition to the caller to provide a more meaningful errorInformation message where appropriate. ttbw " ei=errorInformation("caller", "exception", "message") self.failUnless(ei.caller == "caller", "Caller is not set properly by position.") self.failUnless(ei.message == "message", "Caller is not set properly by position.") self.failUnless(ei.exception == "exception", "Caller is not set properly by position.") ei=errorInformation(exception="exception", message="message", caller = "caller") self.failUnless(ei.caller == "caller", "Caller is not set properly by name.") self.failUnless(ei.message == "message", "Caller is not set properly by name.") self.failUnless(ei.exception == "exception", "Caller is not set properly by name.")
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def test_gamma(): t = np.linspace(0, 30, 5000) # make up some numbers pk_t = 5.0 fwhm = 6.0 # get the estimated parameters shape, scale, coef = gamma_params(pk_t, fwhm) # get distribution function g_exp = gamma_expr(pk_t, fwhm) # make matching standard distribution gf = gamma(shape, scale=scale).pdf # get values L1t = gf(t) L2t = lambdify_t(g_exp)(t) # they are the same bar a scaling factor nz = np.abs(L1t) > 1e-15 sf = np.mean(L1t[nz] / L2t[nz]) assert_almost_equal(L1t , L2t*sf)
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def test_spm_hrf_octave(): # Test SPM hrf against output from SPM code running in Octave my_path = dirname(__file__) hrfs_path = pjoin(my_path, 'spm_hrfs.mat') # mat file resulting from make_hrfs.m hrfs_mat = sio.loadmat(hrfs_path, squeeze_me=True) params = hrfs_mat['params'] hrfs = hrfs_mat['hrfs'] for i, pvec in enumerate(params): dt, ppk, upk, pdsp, udsp, rat = pvec t_vec = np.arange(0, 32.1, dt) our_hrf = spm_hrf_compat(t_vec, peak_delay=ppk, peak_disp=pdsp, under_delay=upk, under_disp=udsp, p_u_ratio=rat) # Normalize integral to match SPM assert_almost_equal(our_hrf, hrfs[i]) # Test basis functions # mat file resulting from get_td_dd.m bases_path = pjoin(my_path, 'spm_bases.mat') bases_mat = sio.loadmat(bases_path, squeeze_me=True) dt = bases_mat['dt'] t_vec = np.arange(0, 32 + dt, dt) # SPM function divides by sum of values - revert with dt assert_almost_equal(spmt(t_vec), bases_mat['hrf'] / dt, 4) assert_almost_equal(dspmt(t_vec), bases_mat['dhrf'] / dt, 4) assert_almost_equal(ddspmt(t_vec), bases_mat['ddhrf'] / dt, 4)
alexis-roche/nipy
[ 1, 1, 1, 1, 1272833701 ]
def setUp(self): if os.path.exists(TESTFN): mode = 'r+b' else: mode = 'w+b' with self.open(TESTFN, mode) as f: current_size = os.fstat(f.fileno())[stat.ST_SIZE] if current_size == size+1: return if current_size == 0: f.write(b'z') f.seek(0) f.seek(size) f.write(b'a') f.flush() self.assertEqual(os.fstat(f.fileno())[stat.ST_SIZE], size+1)
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def tearDownClass(cls): with cls.open(TESTFN, 'wb'): pass if not os.stat(TESTFN)[stat.ST_SIZE] == 0: raise cls.failureException('File was not truncated by opening ' 'with mode "wb"') unlink(TESTFN2)
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def test_large_read(self, _size): # bpo-24658: Test that a read greater than 2GB does not fail. with self.open(TESTFN, "rb") as f: self.assertEqual(len(f.read()), size + 1) self.assertEqual(f.tell(), size + 1)
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def test_seek_read(self): with self.open(TESTFN, 'rb') as f: self.assertEqual(f.tell(), 0) self.assertEqual(f.read(1), b'z') self.assertEqual(f.tell(), 1) f.seek(0) self.assertEqual(f.tell(), 0) f.seek(0, 0) self.assertEqual(f.tell(), 0) f.seek(42) self.assertEqual(f.tell(), 42) f.seek(42, 0) self.assertEqual(f.tell(), 42) f.seek(42, 1) self.assertEqual(f.tell(), 84) f.seek(0, 1) self.assertEqual(f.tell(), 84) f.seek(0, 2) # seek from the end self.assertEqual(f.tell(), size + 1 + 0) f.seek(-10, 2) self.assertEqual(f.tell(), size + 1 - 10) f.seek(-size-1, 2) self.assertEqual(f.tell(), 0) f.seek(size) self.assertEqual(f.tell(), size) # the 'a' that was written at the end of file above self.assertEqual(f.read(1), b'a') f.seek(-size-1, 1) self.assertEqual(f.read(1), b'z') self.assertEqual(f.tell(), 1)
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def test_truncate(self): with self.open(TESTFN, 'r+b') as f: if not hasattr(f, 'truncate'): raise unittest.SkipTest("open().truncate() not available " "on this system") f.seek(0, 2) # else we've lost track of the true size self.assertEqual(f.tell(), size+1) # Cut it back via seek + truncate with no argument. newsize = size - 10 f.seek(newsize) f.truncate() self.assertEqual(f.tell(), newsize) # else pointer moved f.seek(0, 2) self.assertEqual(f.tell(), newsize) # else wasn't truncated # Ensure that truncate(smaller than true size) shrinks # the file. newsize -= 1 f.seek(42) f.truncate(newsize) self.assertEqual(f.tell(), 42) f.seek(0, 2) self.assertEqual(f.tell(), newsize) # XXX truncate(larger than true size) is ill-defined # across platform; cut it waaaaay back f.seek(0) f.truncate(1) self.assertEqual(f.tell(), 0) # else pointer moved f.seek(0) self.assertEqual(len(f.read()), 1) # else wasn't truncated
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def skip_no_disk_space(path, required): def decorator(fun): def wrapper(*args, **kwargs): if shutil.disk_usage(os.path.realpath(path)).free < required: hsize = int(required / 1024 / 1024) raise unittest.SkipTest( f"required {hsize} MiB of free disk space") return fun(*args, **kwargs) return wrapper return decorator
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def test_it(self): # Internally shutil.copyfile() can use "fast copy" methods like # os.sendfile(). size = os.path.getsize(TESTFN) shutil.copyfile(TESTFN, TESTFN2) self.assertEqual(os.path.getsize(TESTFN2), size) with open(TESTFN2, 'rb') as f: self.assertEqual(f.read(5), b'z\x00\x00\x00\x00') f.seek(size - 5) self.assertEqual(f.read(), b'\x00\x00\x00\x00a')
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def setUp(self): super().setUp() self.thread = None
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def tcp_server(self, sock): def run(sock): with sock: conn, _ = sock.accept() conn.settimeout(self.timeout) with conn, open(TESTFN2, 'wb') as f: event.wait(self.timeout) while True: chunk = conn.recv(65536) if not chunk: return f.write(chunk) event = threading.Event() sock.settimeout(self.timeout) self.thread = threading.Thread(target=run, args=(sock, )) self.thread.start() event.set()
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def test_it(self): port = socket_helper.find_unused_port() with socket.create_server(("", port)) as sock: self.tcp_server(sock) with socket.create_connection(("127.0.0.1", port)) as client: with open(TESTFN, 'rb') as f: client.sendfile(f) self.tearDown() size = os.path.getsize(TESTFN) self.assertEqual(os.path.getsize(TESTFN2), size) with open(TESTFN2, 'rb') as f: self.assertEqual(f.read(5), b'z\x00\x00\x00\x00') f.seek(size - 5) self.assertEqual(f.read(), b'\x00\x00\x00\x00a')
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def tearDownModule(): unlink(TESTFN) unlink(TESTFN2)
brython-dev/brython
[ 5954, 494, 5954, 33, 1410764301 ]
def __init__(self, name: str) -> None: """Initialize the sensor.""" self._attr_name = name
home-assistant/home-assistant
[ 58698, 22318, 58698, 2794, 1379402988 ]
def test(): # Presence of comments should not affect contents of tags. (In old pyxl, this led to differences # in whitespace handling.) assert str(get_frag1()) == str(get_frag2())
dropbox/pyxl
[ 519, 58, 519, 2, 1361664830 ]
def InitializeMacNowFunction(plat): """Sets a monotonic clock for the Mac platform. Args: plat: Platform that is being run on. Unused in GetMacNowFunction. Passed for consistency between initilaizers. """ del plat # Unused global _CLOCK # pylint: disable=global-statement global _NOW_FUNCTION # pylint: disable=global-statement _CLOCK = _MAC_CLOCK libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) class MachTimebaseInfoData(ctypes.Structure): """System timebase info. Defined in <mach/mach_time.h>.""" _fields_ = (('numer', ctypes.c_uint32), ('denom', ctypes.c_uint32)) mach_absolute_time = libc.mach_absolute_time mach_absolute_time.restype = ctypes.c_uint64 timebase = MachTimebaseInfoData() libc.mach_timebase_info(ctypes.byref(timebase)) ticks_per_second = timebase.numer / timebase.denom * 1.0e9 def MacNowFunctionImpl(): return mach_absolute_time() / ticks_per_second _NOW_FUNCTION = MacNowFunctionImpl
cricketclubucd/davisdragons
[ 3, 1, 3, 12, 1499574730 ]
def InitializeLinuxNowFunction(plat): """Sets a monotonic clock for linux platforms. Args: plat: Platform that is being run on. """ global _CLOCK # pylint: disable=global-statement global _NOW_FUNCTION # pylint: disable=global-statement _CLOCK = _LINUX_CLOCK clock_monotonic = GetClockGetTimeClockNumber(plat) try: # Attempt to find clock_gettime in the C library. clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime except AttributeError: # If not able to find int in the C library, look in rt library. clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime class Timespec(ctypes.Structure): """Time specification, as described in clock_gettime(3).""" _fields_ = (('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long)) def LinuxNowFunctionImpl(): ts = Timespec() if clock_gettime(clock_monotonic, ctypes.pointer(ts)): errno = ctypes.get_errno() raise OSError(errno, os.strerror(errno)) return ts.tv_sec + ts.tv_nsec / 1.0e9 _NOW_FUNCTION = LinuxNowFunctionImpl
cricketclubucd/davisdragons
[ 3, 1, 3, 12, 1499574730 ]
def InitializeWinNowFunction(plat): """Sets a monotonic clock for windows platforms. Args: plat: Platform that is being run on. """ global _CLOCK # pylint: disable=global-statement global _NOW_FUNCTION # pylint: disable=global-statement if IsQPCUsable(): _CLOCK = _WIN_HIRES qpc_return = ctypes.c_int64() qpc_frequency = ctypes.c_int64() ctypes.windll.Kernel32.QueryPerformanceFrequency( ctypes.byref(qpc_frequency)) qpc_frequency = float(qpc_frequency.value) qpc = ctypes.windll.Kernel32.QueryPerformanceCounter def WinNowFunctionImpl(): qpc(ctypes.byref(qpc_return)) return qpc_return.value / qpc_frequency else: _CLOCK = _WIN_LORES kernel32 = (ctypes.cdll.kernel32 if plat.startswith(_PLATFORMS['cygwin']) else ctypes.windll.kernel32) get_tick_count_64 = getattr(kernel32, 'GetTickCount64', None) # Windows Vista or newer if get_tick_count_64: get_tick_count_64.restype = ctypes.c_ulonglong def WinNowFunctionImpl(): return get_tick_count_64() / 1000.0 else: # Pre Vista. get_tick_count = kernel32.GetTickCount get_tick_count.restype = ctypes.c_uint32 get_tick_count_lock = threading.Lock() def WinNowFunctionImpl(): global GET_TICK_COUNT_LAST_NOW # pylint: disable=global-statement global GET_TICK_COUNT_WRAPAROUNDS # pylint: disable=global-statement with get_tick_count_lock: current_sample = get_tick_count() if current_sample < GET_TICK_COUNT_LAST_NOW: GET_TICK_COUNT_WRAPAROUNDS += 1 GET_TICK_COUNT_LAST_NOW = current_sample final_ms = GET_TICK_COUNT_WRAPAROUNDS << 32 final_ms += GET_TICK_COUNT_LAST_NOW return final_ms / 1000.0 _NOW_FUNCTION = WinNowFunctionImpl
cricketclubucd/davisdragons
[ 3, 1, 3, 12, 1499574730 ]
def Now(): return _NOW_FUNCTION() * 1e6 # convert from seconds to microseconds
cricketclubucd/davisdragons
[ 3, 1, 3, 12, 1499574730 ]
def find_ntoskrnl(version_modules): for entry in version_modules: e_type = entry[0] if e_type == 'r': e_data = entry[1] if e_data['pdb'] in NT_KRNL_PDB: return (e_data['pdb'], e_data['guid']) raise RuntimeError('Cannot find {} with version_modules ' 'plugin'.format(NT_KRNL_PDB))
libvmi/libvmi
[ 609, 225, 609, 85, 1357164372 ]
def format_config(domain, config, old_format=False): if not old_format: formatted_config = """
libvmi/libvmi
[ 609, 225, 609, 85, 1357164372 ]
def main(args): # delete rekall's BasicConfig # we want to configure the root logger for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) debug = args['--debug'] # configure root logger log_level = logging.INFO if debug: log_level = logging.DEBUG logging.basicConfig(level=log_level) logging.debug(args) domain_name = args['<domain>'] uri = args['--uri'] old_format = args['--old'] url = args['<url>'] config = None if not url: # take temporary memory dump # we need to create our own tmp_dir # otherwise the dumpfile will be owned by libvirt # and we don't have the permission to remove it in /tmp with TemporaryDirectory() as tmp_dir: with NamedTemporaryFile(dir=tmp_dir) as ram_dump: # chmod to be r/w by everyone # before libvirt takes ownership os.chmod(ram_dump.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH) con = libvirt.open(uri) domain = con.lookupByName(domain_name) # take dump logging.info('Dumping %s physical memory to %s', domain.name(), ram_dump.name) flags = libvirt.VIR_DUMP_MEMORY_ONLY dumpformat = libvirt.VIR_DOMAIN_CORE_DUMP_FORMAT_RAW domain.coreDumpWithFormat(ram_dump.name, dumpformat, flags) ram_dump.flush() # extract offsets config = extract_offsets(domain.name(), ram_dump.name) else: config = extract_offsets(domain_name, url) formatted_config = format_config(domain_name, config, old_format) logging.info(formatted_config)
libvmi/libvmi
[ 609, 225, 609, 85, 1357164372 ]
def conv_block( data, name, channels, kernel_size=(3, 3), strides=(1, 1), padding=(1, 1), epsilon=1e-5, layout="NCHW",
dmlc/tvm
[ 9142, 2938, 9142, 595, 1476310828 ]
def separable_conv_block( data, name, depthwise_channels, pointwise_channels, kernel_size=(3, 3), downsample=False, padding=(1, 1), epsilon=1e-5, layout="NCHW", dtype="float32",
dmlc/tvm
[ 9142, 2938, 9142, 595, 1476310828 ]
def mobile_net( num_classes=1000, data_shape=(1, 3, 224, 224), dtype="float32", alpha=1.0, is_shallow=False, layout="NCHW",
dmlc/tvm
[ 9142, 2938, 9142, 595, 1476310828 ]
def get_workload( batch_size=1, num_classes=1000, image_shape=(3, 224, 224), dtype="float32", layout="NCHW"
dmlc/tvm
[ 9142, 2938, 9142, 595, 1476310828 ]
def ready(self): # To override the settings before loading social_django. if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False): self._enable_third_party_auth()
edx/edx-platform
[ 6290, 3437, 6290, 280, 1369945238 ]
def __init__(self, stringbinding): match = DCERPCStringBinding.parser.match(stringbinding) self.__uuid = match.group(1) self.__ps = match.group(2) self.__na = match.group(3) options = match.group(4) if options: options = options.split(',') self.__endpoint = options[0] try: self.__endpoint.index('endpoint=') self.__endpoint = self.__endpoint[len('endpoint='):] except: pass self.__options = options[1:] else: self.__endpoint = '' self.__options = []
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_protocol_sequence(self): return self.__ps
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_endpoint(self): return self.__endpoint
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def __str__(self): return DCERPCStringBindingCompose(self.__uuid, self.__ps, self.__na, self.__endpoint, self.__options)
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def DCERPCTransportFactory(stringbinding): sb = DCERPCStringBinding(stringbinding) na = sb.get_network_address() ps = sb.get_protocol_sequence() if 'ncadg_ip_udp' == ps: port = sb.get_endpoint() if port: return UDPTransport(na, int(port)) else: return UDPTransport(na) elif 'ncacn_ip_tcp' == ps: port = sb.get_endpoint() if port: return TCPTransport(na, int(port)) else: return TCPTransport(na) elif 'ncacn_http' == ps: port = sb.get_endpoint() if port: return HTTPTransport(na, int(port)) else: return HTTPTransport(na) elif 'ncacn_np' == ps: named_pipe = sb.get_endpoint() if named_pipe: named_pipe = named_pipe[len(r'\pipe'):] return SMBTransport(na, filename = named_pipe) else: return SMBTransport(na) else: raise Exception, "Unknown protocol sequence."
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def __init__(self, dstip, dstport): self.__dstip = dstip self.__dstport = dstport self._max_send_frag = None self._max_recv_frag = None self.set_credentials('','','','')
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def send(self,data=0, forceWriteAndx = 0, forceRecv = 0): raise RuntimeError, 'virtual function'
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def disconnect(self): raise RuntimeError, 'virtual function'
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_dip(self): return self.__dstip
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_dport(self): return self.__dstport
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_addr(self): return (self.get_dip(), self.get_dport())
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def set_max_fragment_size(self, send_fragment_size): # -1 is default fragment size: 0 (don't fragment) # 0 is don't fragment # other values are max fragment size if send_fragment_size == -1: self.set_default_max_fragment_size() else: self._max_send_frag = send_fragment_size
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def get_credentials(self): return ( self._username, self._password, self._nt_hash, self._lm_hash)
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]
def __init__(self,dstip, dstport = 135): DCERPCTransport.__init__(self, dstip, dstport) self.__socket = 0
pwnieexpress/pwn_plug_sources
[ 124, 97, 124, 24, 1321552607 ]