function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def RunSteps(api): target = api.target('fuchsia-arm64') assert not target.is_win assert not target.is_linux assert not target.is_mac assert api.target.host.is_host assert target != api.target.host assert target != 'foo' step_result = api.step('platform things', cmd=None) step_result.presentation.logs['name'] = [target.os] step_result.presentation.logs['arch'] = [target.arch] step_result.presentation.logs['platform'] = [target.platform] step_result.presentation.logs['triple'] = [target.triple] step_result.presentation.logs['string'] = [str(target)]
ric2b/Vivaldi-browser
[ 131, 27, 131, 3, 1490828945 ]
def end_headers(self): self.send_header("Cache-Control", "no-cache, no-store, must-revalidate") self.send_header("Pragma", "no-cache") self.send_header("Expires", "0") super().end_headers()
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, *args: Any) -> None: super().__init__(*args) self.papi = PublishAPISection("http://test/") self.maxDiff = None
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_update(self, *, rmock: requests_mock.Mocker) -> None: rmock.put("http://test/api/publish/s3%3Aaptly-repo%3Atest_xyz__1/test", text='{"AcquireByHash":false,"Architectures":["amd64"],"Distribution":"test","Label":"",' '"Origin":"","Prefix":"test/xyz_1","SkipContents":false,' '"SourceKind":"local","Sources":[{"Component":"main","Name":"aptly-repo"}],' '"Storage":"s3:aptly-repo"}') self.assertEqual( self.papi.update( prefix="s3:aptly-repo:test/xyz_1", distribution="test", sign_batch=True, sign_gpgkey="A16BE921", sign_passphrase="123456", ), PublishEndpoint( storage='s3:aptly-repo', prefix='test/xyz_1', distribution='test', source_kind='local', sources=[{ 'Name': 'aptly-repo', 'Component': 'main' }], architectures=['amd64'], label='', origin='', acquire_by_hash=False ) )
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_update_no_sign(self, *, rmock: requests_mock.Mocker) -> None: rmock.put("http://test/api/publish/s3%3Aaptly-repo%3Atest_xyz__1/test", text='{"AcquireByHash":false,"Architectures":["amd64"],"Distribution":"test","Label":"",' '"Origin":"","Prefix":"test/xyz_1","SkipContents":false,' '"SourceKind":"local","Sources":[{"Component":"main","Name":"aptly-repo"}],' '"Storage":"s3:aptly-repo"}') self.assertEqual( self.papi.update( prefix="s3:aptly-repo:test/xyz_1", distribution="test", sign_skip=True, ), PublishEndpoint( storage='s3:aptly-repo', prefix='test/xyz_1', distribution='test', source_kind='local', sources=[{ 'Name': 'aptly-repo', 'Component': 'main' }], architectures=['amd64'], label='', origin='', acquire_by_hash=False ) )
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_publish(self, *, rmock: requests_mock.Mocker) -> None: rmock.post("http://test/api/publish/s3%3Amyendpoint%3Atest_a__1", text='{"AcquireByHash":false,"Architectures":["amd64"],"Distribution":"test","Label":"test",' '"Origin":"origin","Prefix":"test/a_1","SkipContents":false,' '"SourceKind":"local","Sources":[{"Component":"main","Name":"aptly-repo"}],' '"Storage":"s3:myendpoint"}') self.assertEqual( self.papi.publish( sources=[{'Name': 'aptly-repo'}], architectures=['amd64'], prefix='s3:myendpoint:test/a_1', distribution='test', label='test', origin='origin', sign_batch=True, sign_gpgkey='A16BE921', sign_passphrase='*********', force_overwrite=True, sign_keyring="/etc/gpg-managed-keyring/pubring.pub", sign_secret_keyring="/etc/gpg-managed-keyring/secring.gpg", acquire_by_hash=False ), PublishEndpoint( storage='s3:myendpoint', prefix='test/a_1', distribution='test', source_kind='local', sources=[{'Component': 'main', 'Name': 'aptly-repo'}], architectures=['amd64'], label='test', origin='origin', acquire_by_hash=False ) )
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_publish_no_sign(self, *, rmock: requests_mock.Mocker) -> None: rmock.post("http://test/api/publish/s3%3Amyendpoint%3Atest_a__1", text='{"AcquireByHash":false,"Architectures":["amd64"],"Distribution":"test","Label":"test",' '"Origin":"origin","Prefix":"test/a_1","SkipContents":false,' '"SourceKind":"local","Sources":[{"Component":"main","Name":"aptly-repo"}],' '"Storage":"s3:myendpoint"}') self.assertEqual( self.papi.publish( sources=[{'Name': 'aptly-repo'}], architectures=['amd64'], prefix='s3:myendpoint:test/a_1', distribution='test', label='test', origin='origin', sign_skip=True, acquire_by_hash=False ), PublishEndpoint( storage='s3:myendpoint', prefix='test/a_1', distribution='test', source_kind='local', sources=[{'Component': 'main', 'Name': 'aptly-repo'}], architectures=['amd64'], label='test', origin='origin', acquire_by_hash=False ) )
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_update_snapshot_default_key(self, *, rmock: requests_mock.Mocker) -> None: rmock.put("http://test/api/publish/s3%3Aaptly-repo%3Atest_xyz__1/test", text='{"AcquireByHash":false,"Architectures":["amd64"],"Distribution":"test","Label":"",' '"Origin":"","Prefix":"test/xyz_1","SkipContents":false,' '"SourceKind":"snapshot","Sources":[{"Component":"main","Name":"aptly-repo-1"}],' '"Storage":"s3:aptly-repo"}') self.assertEqual( self.papi.update( prefix="s3:aptly-repo:test/xyz_1", distribution="test", snapshots=[{"Name": "aptly-repo-1"}], force_overwrite=True, sign_batch=True, sign_passphrase="123456", sign_keyring="/etc/gpg-managed-keyring/pubring.pub", sign_secret_keyring="/etc/gpg-managed-keyring/secring.gpg", skip_cleanup=True ), PublishEndpoint( storage='s3:aptly-repo', prefix='test/xyz_1', distribution='test', source_kind='snapshot', sources=[{ 'Name': 'aptly-repo-1', 'Component': 'main', }], architectures=['amd64'], label='', origin='', acquire_by_hash=False ) )
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def test_no_name(self, *, rmock: requests_mock.Mocker) -> None: with self.assertRaises(AptlyAPIException): self.papi.publish(sources=[{'nope': 'nope'}], architectures=['amd64'], prefix='s3:myendpoint:test/a_1', distribution='test', sign_skip=False, sign_gpgkey='A16BE921', sign_passphrase="*******") with self.assertRaises(AptlyAPIException): self.papi.update(snapshots=[{'nope': 'nope'}], prefix='s3:myendpoint:test/a_1', distribution='test', sign_skip=False, sign_gpgkey='A16BE921', sign_passphrase="*******")
gopythongo/aptly-api-client
[ 19, 16, 19, 7, 1496180979 ]
def __init__(self, model, color = 'BLUE'): self.node_radius = 10 # Radius of a node self.node_color = 'GREEN' # TODO not currently used self.node_outline = 'BLACK' # TODO not currently used # Setting this flag prevents drawing this node and links while dragging self.dragging = False self.model = model # Now setup the node's bitmap so we can just blit to the screen # rather than having to re-draw every time. #self.bmp = wx.EmptyBitmap(2 * self.node_radius + 4, 2 * self.node_radius + 4) self.bmp = wx.EmptyBitmap(2 * self.node_radius, 3 * self.node_radius)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def HitTest(self, point): rect = self.GetRect() return rect.InsideXY(point.x, point.y)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def Erase(self, dc): if self.dragging: return dc.SetBrush(wx.Brush("WHITE")) dc.SetPen(wx.Pen("WHITE")) x, y = self.model.GetPosition() #dc.DrawRectangle(x-self.node_radius, y-self.node_radius, # self.node_radius * 2 + 4, self.node_radius * 2 + 4) dc.DrawRectangle(x-self.node_radius, y-self.node_radius, 2 * self.node_radius, 3 * self.node_radius)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def Update(self): #self.led = state # create a DC for drawing in to the bitmap memory bdc = wx.MemoryDC(); bdc.SelectObject(self.bmp); # First clear the background #bdc.SetBrush(wx.Brush("WHITE")) #bdc.SetPen(wx.Pen("WHITE")) #bdc.DrawRectangle(0, 0, self.node_radius * 2 + 4, self.node_radius * 2 + 4) # Now draw our default node #bdc.SetBrush(wx.Brush(self.node_color)) #if self.model.GetLedState() == 1: # bdc.SetPen(wx.Pen(self.node_outline, 4)) #else: # bdc.SetPen(wx.Pen("RED", 4)) #bdc.DrawEllipse(0, 0, self.node_radius * 2, self.node_radius * 2) bdc.SetBrush(wx.Brush("DARKGREEN")) bdc.SetPen(wx.Pen("DARKGREEN")) bdc.DrawRectangle(0, 0, 2 * self.node_radius, 3 * self.node_radius)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def __str__(self): return 'node_view:'+str(self.model.id)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def __init__(self, src, dst): self.src = src self.dst = dst self.flashcount = 0
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def Draw(self, dc, op = wx.COPY): if self.src.dragging or self.dst.dragging: return if self.flashcount: pen = wx.Pen("GOLD") else: pen = wx.Pen("BLUE") pen.SetWidth(4) dc.SetPen(pen) dc.DrawLine(self.src.model.pos[0], self.src.model.pos[1], self.dst.model.pos[0], self.dst.model.pos[1])
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def __init__(self): self.lock = thread.allocate_lock() self.list = []
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def put(self, obj): "Add an object to the queue atomically." self.lock.acquire() self.list.append(obj) self.lock.release()
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def get(self): "Return the entire queue as a list and clear the queue atomically." self.lock.acquire() list = self.list self.list = [] self.lock.release() return list
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def __init__(self, parent, id, model): wx.ScrolledWindow.__init__(self, parent, id, style=wx.NO_FULL_REPAINT_ON_RESIZE) self.model = model self.node_dict = {} self.link_dict = {} self.node_size = 25 self.dragNode = None self.dragImage = None self.queue = event_queue()
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def DispatchEvent(self, callback, *args): """"Queue a net event to be handled on the GUI thread.
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def FindNode(self, point): "Return the node that contains the point." for n in self.node_dict.itervalues(): if n.HitTest(point): return n return None
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def OnLeftUp(self, evt): if not self.dragImage or not self.dragNode: self.dragImage = None self.dragNode = None return # Hide the image, end dragging, and nuke out the drag image. self.dragImage.Hide() self.dragImage.EndDrag() self.dragImage = None dc = wx.ClientDC(self) # reposition and draw the shape self.dragNode.model.pos = ( self.dragNode.model.pos[0] + evt.GetPosition()[0] - self.dragStartPos[0], self.dragNode.model.pos[1] + evt.GetPosition()[1] - self.dragStartPos[1] )
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def OnRightDown(self, event): pass
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def OnMotion(self, evt): # Ignore mouse movement if we're not dragging. if not self.dragNode or not evt.Dragging() or not evt.LeftIsDown(): return # if we have a node, but haven't started dragging yet if self.dragNode and not self.dragImage: # only start the drag after having moved a couple pixels tolerance = 2 pt = evt.GetPosition() dx = abs(pt.x - self.dragStartPos.x) dy = abs(pt.y - self.dragStartPos.y) if dx <= tolerance and dy <= tolerance: return # Create a DragImage to draw this node while it is moving # (The drag image will update even as the bitmap is updating. Magical!) self.dragImage = wx.DragImage(self.dragNode.bmp, wx.StockCursor(wx.CURSOR_HAND)) hotspot = self.dragStartPos - self.dragNode.model.pos + [self.dragNode.node_radius, self.dragNode.node_radius] self.dragImage.BeginDrag(hotspot, self, False) self.dragImage.Move(pt) # erase the node since it will be drawn by the DragImage now dc = wx.ClientDC(self) for link in self.dragNode.model.incoming.itervalues(): if link not in self.link_dict: continue l = self.link_dict[link] l.Erase(dc) l.src.Draw(dc) for link in self.dragNode.model.outgoing.itervalues(): if link not in self.link_dict: continue l = self.link_dict[link] l.Erase(dc) l.dst.Draw(dc) self.dragNode.Erase(dc) self.dragNode.dragging = True self.dragImage.Show() # if we have node and image then move it elif self.dragNode and self.dragImage: self.dragImage.Move(evt.GetPosition())
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def OnIdle(self, event): """Handle queued network events. See net_view.DispatchEvent().""" for callback, args in self.queue.get(): callback(*args)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def Draw(self, dc): dc.BeginDrawing() # for Windows compatibility # Since we are a scrolling window we need to prepare the DC self.PrepareDC(dc) dc.SetBackground(wx.Brush(self.GetBackgroundColour())) dc.Clear() for link in self.link_dict.itervalues(): link.Draw(dc) for node in self.node_dict.itervalues(): node.Draw(dc) dc.EndDrawing()
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def add_node(self, nodemodel, color = 'BLUE'): n = node_view(nodemodel, color) self.node_dict[nodemodel] = n nodemodel.Bind(net_model.LED_CHANGED, self.DispatchEvent, self.node_state_changed) n.Update()
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def del_node(self, node): if self.node_dict.has_key(node): dc = wx.ClientDC(self) self.node_dict[node].Erase(dc) del self.node_dict[node]
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def node_state_changed(self, node): if self.node_dict.has_key(node): n = self.node_dict[node] n.Update() dc = wx.ClientDC(self) n.Draw(dc)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def del_radio_link(self, link): if self.link_dict.has_key(link): l = self.link_dict[link] dc = wx.ClientDC(self) l.Erase(dc) l.src.Draw(dc) l.dst.Draw(dc) del self.link_dict[link]
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def new_network(self, model): self.node_dict.clear() self.link_dict.clear() self.dragNode = None self.dragImage = None dummy = self.queue.get() # empties the list
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def forward_radio_packet(self, link): if link in self.link_dict: l = self.link_dict[link] l.flashcount += 1 # Return the link to its original color after a delay. wx.FutureCall(500, self.flash_link_off, l, link)
turon/mantis
[ 3, 2, 3, 1, 1271876993 ]
def threaded_encode_job(job): """ Given a job, run it through its encoding workflow in a non-blocking manner. """ # Update the timestamp for when the node last did something so it # won't terminate itself. NodeStateManager.i_did_something() job.nommer.onomnom()
duointeractive/media-nommer
[ 24, 4, 24, 2, 1292949659 ]
def threaded_heartbeat(): """ Fires off a threaded task to check in with feederd via SimpleDB_. There is a domain that contains all of the running EC2_ instances and their unique IDs, along with some state data.
duointeractive/media-nommer
[ 24, 4, 24, 2, 1292949659 ]
def task_heartbeat(): """ Checks in with feederd in a non-blocking manner via :py:meth:`threaded_heartbeat`.
duointeractive/media-nommer
[ 24, 4, 24, 2, 1292949659 ]
def test_blog_page_entries(self, browser, site_url): browser.visit(site_url + '/blog/') entries = browser.find_by_css('.page-content') assert browser.status_code == 200 assert len(entries) > 0
APSL/puput
[ 541, 148, 541, 18, 1438122609 ]
def test_entry_page_author(self, browser, site_url): browser.visit(site_url + '/blog/author/admin/') entries = browser.find_by_css('.page-content') assert browser.status_code == 200 assert browser.is_text_present('Entries for author') assert len(entries) > 0
APSL/puput
[ 541, 148, 541, 18, 1438122609 ]
def test_entry_page_tag(self, browser, site_url): browser.visit(site_url + '/blog/tag/test/') entries = browser.find_by_css('.page-content') assert browser.status_code == 200 assert browser.is_text_present('Entries for tag') assert len(entries) > 0
APSL/puput
[ 541, 148, 541, 18, 1438122609 ]
def __init__(self, model, input_record, num_to_collect, name='last_n_window_collector', **kwargs): super(LastNWindowCollector, self).__init__( model, name, input_record, **kwargs) assert num_to_collect > 0 self.num_to_collect = num_to_collect assert isinstance(input_record, schema.Scalar), \ "Got {!r}".format(input_record) self.last_n = self.create_param(param_name='last_n', shape=[0], initializer=('ConstantFill', {}), optimizer=model.NoOptim) self.next_blob = self.create_param( param_name='next', shape=[], initializer=('ConstantFill', {'value': 0, 'dtype': core.DataType.INT32}), optimizer=model.NoOptim ) self.mutex = self.create_param( param_name='mutex', shape=None, initializer=('CreateMutex',), optimizer=model.NoOptim, ) self.num_visited_blob = self.create_param( param_name='num_visited', shape=[], initializer=('ConstantFill', { 'value': 0, 'dtype': core.DataType.INT64, }), optimizer=model.NoOptim, ) self.output_schema = schema.Struct( ( 'last_n', schema.from_blob_list(input_record, [self.last_n]) ), ('num_visited', schema.Scalar(blob=self.num_visited_blob)), ('mutex', schema.Scalar(blob=self.mutex)), )
ryfeus/lambda-packs
[ 1086, 234, 1086, 13, 1476901359 ]
def __init__(self): Hardware.__init__(self, CLASS_NAME, CLASS_ID, VENDOR_ID, DEVICES, PRIORITY)
DecisionSystemsGroup/DSGos
[ 2, 1, 2, 1, 1427035979 ]
def get_packages(): pkgs = ["catalyst-hook", "catalyst-libgl", "catalyst-utils", "acpid", "qt4"] if os.uname()[-1] == "x86_64": pkgs.extend(["lib32-catalyst-libgl", "lib32-catalyst-utils", "lib32-opencl-catalyst"]) return pkgs
DecisionSystemsGroup/DSGos
[ 2, 1, 2, 1, 1427035979 ]
def add_repositories(path): """ Adds [xorg116] and [catalyst-hd234k] repos to pacman.conf """ with open(path, 'r') as pacman_conf: lines = pacman_conf.readlines() with open(path, "w") as pacman_conf: for line in lines: # xorg11x needs to be present before core repository if "[core]" in line: line = "[xorg116]\n" line += "Server = http://catalyst.wirephire.com/repo/xorg116/$arch\n" line += "SigLevel = Optional TrustAll\n" line += "## Mirrors, if the primary server does not work or is too slow:\n" line += "#Server = http://mirror.rts-informatique.fr/archlinux-catalyst/repo/xorg116/$arch\n" line += "#Server = http://mirror.hactar.bz/Vi0L0/xorg116/$arch\n\n" line += "[catalyst-hd234k]\n" line += "http://catalyst.wirephire.com/repo/catalyst-hd234k/$arch\n" line += "SigLevel = Optional TrustAll\n" line += "## Mirrors, if the primary server does not work or is too slow:\n" line += "#Server = http://70.239.162.206/catalyst-mirror/repo/catalyst-hd234k/$arch\n" line += "#Server = http://mirror.rts-informatique.fr/archlinux-catalyst/repo/catalyst-hd234k/$arch\n" line += "#Server = http://mirror.hactar.bz/Vi0L0/catalyst-hd234k/$arch\n\n" line += "[core]\n" pacman_conf.write(line)
DecisionSystemsGroup/DSGos
[ 2, 1, 2, 1, 1427035979 ]
def post_install(self, dest_dir): # Add repos to user's pacman.conf path = os.path.join(dest_dir, "etc/pacman.conf") self.add_repositories(path) super().chroot(["systemctl", "enable", "atieventsd"]) super().chroot(["systemctl", "enable", "catalyst-hook"]) super().chroot(["systemctl", "enable", "temp-links-catalyst"]) super().chroot(["aticonfig", "--initial"], dest_dir)
DecisionSystemsGroup/DSGos
[ 2, 1, 2, 1, 1427035979 ]
def __init__(self): pass
inventree/InvenTree
[ 2517, 401, 2517, 134, 1490233450 ]
def plugin_name(self): """ Name of plugin """ return self.PLUGIN_NAME
inventree/InvenTree
[ 2517, 401, 2517, 134, 1490233450 ]
def plugin_title(self): """ Title of plugin """ if self.PLUGIN_TITLE: return self.PLUGIN_TITLE else: return self.plugin_name()
inventree/InvenTree
[ 2517, 401, 2517, 134, 1490233450 ]
def is_active(self): """ Return True if this plugin is currently active """ cfg = self.plugin_config() if cfg: return cfg.active else: return False
inventree/InvenTree
[ 2517, 401, 2517, 134, 1490233450 ]
def Replace_BaseRF_Gap_to_AxisField_Nodes(accLattice,z_step,dir_location="",accSeq_Names = [],cavs_Names = []): """ Function will replace BaseRF_Gap nodes by AxisFieldRF_Gap. It is assumed that AxisFieldRF_Gap nodes do not overlap any others nodes (except Drifts). The replacement will be performed only for specified sequences. If the cavities list is empty, all of them will be replaced! If you want to replace the nodes in a particular cavity please specify it! The dir_location is the location of the directory with the axis field files. """
PyORBIT-Collaboration/py-orbit
[ 19, 36, 19, 5, 1481146278 ]
def Make_AxisFieldRF_Gaps_and_Find_Neihbor_Nodes(rf_length_tolerance,accLattice,accSeq,dir_location,cavs): """ It returns (af_rf_gap_dict,rf_gap_ind_up_down_arr). This function analyzes the nodes in the accSeq and creates a dictionary and an array: af_rf_gap_dict[rf_gap] = AxisFieldRF_Gap(rf_gap) and rf_gap_ind_up_down_arr[[rf_gap,gap_ind,drift_down_ind,drift_up_ind],...] where rf_gap is a BaseRF_Gap instance, and indexes drift_down_ind and drift_up_ind are the indexes covering the edges of the axis filed of the particular AxisFieldRF_Gap. """ rank = orbit_mpi.MPI_Comm_rank(orbit_mpi.mpi_comm.MPI_COMM_WORLD) nodes = accSeq.getNodes() node_pos_dict = accLattice.getNodePositionsDict() #-------------------------------------------------- #---- let's create the new AxisFieldRF_Gap instances af_rf_gap_dict = {} for cav in cavs: rf_gaps = cav.getRF_GapNodes() for rf_gap in rf_gaps: af_rf_gap = AxisFieldRF_Gap(rf_gap) af_rf_gap.readAxisFieldFile(dir_location) af_rf_gap_dict[rf_gap] = af_rf_gap #-------------------------------------------------- #---- Let's fix the length of the axis field of the first gap if it is goes #---- beyond of the beginning of the sequence if(len(cavs) > 0): accSeq_pos = accSeq.getPosition() cav = cavs[0] rf_gap = cav.getRF_GapNodes()[0] (gap_pos_start,gap_pos_end) = node_pos_dict[rf_gap] (z_min,z_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max() field_start = gap_pos_start - accSeq_pos + z_min if(field_start < rf_length_tolerance): z_min_new = z_min + abs(field_start) + rf_length_tolerance func = af_rf_gap_dict[rf_gap].getAxisFieldFunction() func_new = RenormalizeFunction(func,z_min_new,z_max) af_rf_gap_dict[rf_gap].setAxisFieldFunction(func_new) (z_min_new,z_max_new) = (func_new.getMinX(),func_new.getMaxX()) af_rf_gap_dict[rf_gap].setZ_Min_Max(z_min_new,z_max_new) msg = "debug =============== WARNING START ================ RF Gap="+rf_gap.getName() msg += os.linesep msg += "Inside the Replace_BaseRF_Gap_to_AxisField_Nodes Python function. " msg += os.linesep msg += "The RF gap field goes outside the start of the AccSequence = " + accSeq.getName() msg += os.linesep msg += "The RF gap = " + rf_gap.getName() msg += os.linesep msg += "That is wrong! The field will be cut shorter and re-normalized!" msg += os.linesep msg += "rf_gap (pos_start,pos_end) = " + str((gap_pos_start- accSeq_pos,gap_pos_end - accSeq_pos)) msg += os.linesep msg += "old rf gap (z_min,z_max) = " + str((z_min,z_max)) msg += os.linesep msg += "new rf gap (z_min,z_max) = " + str((z_min_new,z_max_new)) msg += os.linesep msg += "debug =============== WARNING END ================" if(rank == 0): print msg #-------------------------------------------------- #---- Let's fix the length of the axis fields to avoid the fields overlaping for cav in cavs: rf_gaps = cav.getRF_GapNodes() for gap_ind in range(len(rf_gaps) - 1): rf_gap0 = rf_gaps[gap_ind] rf_gap1 = rf_gaps[gap_ind+1] (gap0_pos_start,gap0_pos_end) = node_pos_dict[rf_gap0] (gap1_pos_start,gap1_pos_end) = node_pos_dict[rf_gap1] gap0_pos_end += af_rf_gap_dict[rf_gap0].getZ_Min_Max()[1] gap1_pos_start += af_rf_gap_dict[rf_gap1].getZ_Min_Max()[0] delta_z = gap0_pos_end - gap1_pos_start if(math.fabs(delta_z) < rf_length_tolerance): (z_min,z_max) = af_rf_gap_dict[rf_gap0].getZ_Min_Max() z_max -= delta_z af_rf_gap_dict[rf_gap0].setZ_Min_Max(z_min,z_max) #print "debug gap0=",rf_gap0.getName()," gap1=",rf_gap1.getName()," delta=",delta_z else: if(delta_z > 0.): msg = "The Replace_BaseRF_Gap_to_AxisField_Nodes Python function. " msg += os.linesep msg += "The RF gap field overlaps more than rf_length_tolerance[mm]= "+str(1000.*rf_length_tolerance) msg += os.linesep msg = msg + "RF gap 0 = " + rf_gap0.getName() msg = msg + os.linesep msg = msg + "RF gap 1 = " + rf_gap1.getName() msg = msg + os.linesep (z_min,z_max) = af_rf_gap_dict[rf_gap0].getZ_Min_Max() (pos_start,pos_stop) = (gap0_pos_start+z_min,gap0_pos_start+z_max) msg = msg + "Gap 0 (pos_start,pos_stop)= " + str((pos_start,pos_stop)) msg = msg + os.linesep (z_min,z_max) = af_rf_gap_dict[rf_gap1].getZ_Min_Max() (pos_start,pos_stop) = (gap1_pos_end+z_min,gap1_pos_end+z_max) msg = msg + "Gap 1 (pos_start,pos_stop)= " + str((pos_start,pos_stop)) msg = msg + os.linesep msg = msg + "Gap 0 limits (z_min,z_max)= " + str(af_rf_gap_dict[rf_gap0].getZ_Min_Max()) msg = msg + os.linesep msg = msg + "Gap 1 limits (z_min,z_max)= " + str(af_rf_gap_dict[rf_gap1].getZ_Min_Max()) msg = msg + os.linesep msg = msg + "Overlapping delta= " + str(delta_z) msg = msg + os.linesep orbitFinalize(msg) #-------------------------------------------------------------------------------------- #---- array with [rf_gap, drift indexes for the gap and drifts before and after the gap] #---- Here we will go through all rf gaps and find indexes of the drifts before (down) #---- and after (up). These drifts should be replaced with the shorter drifts. #---- The drifts that covered by the RF gap field completely should be removed. #--------------------------------------------------------------------------------------- #---- to speed up indexing let's build rf gaps vs. index dictionary rf_gap_ind_dict = {} for node_ind in range(len(nodes)): node = nodes[node_ind] if(isinstance(node,BaseRF_Gap)): rf_gap_ind_dict[node] = node_ind #------------------------------------- rf_gap_ind_up_down_arr = [] for cav in cavs: rf_gaps = cav.getRF_GapNodes() for rf_gap in rf_gaps: gap_ind = rf_gap_ind_dict[rf_gap] (gap_pos_start,gap_pos_end) = node_pos_dict[rf_gap] drift_down_ind = gap_ind drift_up_ind = gap_ind (z_min,z_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max() #---- let's find the next upstream node covering the edge of the field drift_down_ind = gap_ind - 1 node = nodes[drift_down_ind] (node_pos_start,node_pos_end) = node_pos_dict[node] while(node_pos_end > gap_pos_start + z_min): drift_down_ind = drift_down_ind - 1 #--- if it is the beginning of sequence - we are done! if(drift_down_ind < 0): if(gap_pos_start + z_min < node_pos_start): node = nodes[drift_down_ind+1] #---- by default gap_pos_start=gap_pos_end for rf gap with length=0 (gap_pos_start,gap_pos_end) = node_pos_dict[rf_gap] (z_min,z_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max() (gap_pos_start,gap_pos_end) = (gap_pos_start+z_min,gap_pos_end+z_max) (pos_start,pos_end) = node_pos_dict[node] func = af_rf_gap_dict[rf_gap].getAxisFieldFunction() delta_cut = pos_start - gap_pos_start func_new = RenormalizeFunction(func,z_min+delta_cut,z_max) af_rf_gap_dict[rf_gap].setAxisFieldFunction(func_new) (z_min_new,z_max_new) = (func_new.getMinX(),func_new.getMaxX()) af_rf_gap_dict[rf_gap].setZ_Min_Max(z_min_new,z_max_new) msg = "debug =============== WARNING START ================ RF Gap="+rf_gap.getName() msg += os.linesep msg += "Inside the Replace_BaseRF_Gap_to_AxisField_Nodes Python function. " msg += os.linesep msg += "The RF gap field overlaps the first element in AccSequence." msg += os.linesep msg += "It means that the field goes outside the AccSequence." msg += os.linesep msg += "That is wrong! The field will be cut shorter and re-normalized!" msg += os.linesep msg += "node = " + node.getName() msg += os.linesep msg += "node (pos_start,pos_end) = " + str((pos_start,pos_end)) msg += os.linesep msg += "rf_gap (pos_start,pos_end) = " + str((gap_pos_start,gap_pos_end)) msg += os.linesep msg += "old rf gap (z_min,z_max) = " + str((z_min,z_max)) msg += os.linesep msg += "new rf gap (z_min,z_max) = " + str((z_min_new,z_max_new)) msg += os.linesep msg += "debug =============== WARNING END ================" orbitFinalize(msg) else: break #--------------------------------------------------------------------- node = nodes[drift_down_ind] (node_pos_start,node_pos_end) = node_pos_dict[node] drift_down_ind = drift_down_ind + 1 #--------------------------------- drift_up_ind = gap_ind + 1 node = nodes[drift_up_ind] (node_pos_start,node_pos_end) = node_pos_dict[node] while(node_pos_start < gap_pos_start + z_max): drift_up_ind = drift_up_ind + 1 #--- if it is the end of sequence - we are done! if(drift_up_ind > len(nodes) -1): if(gap_pos_start + z_max > node_pos_end): node = nodes[drift_up_ind-1] msg = "The Replace_BaseRF_Gap_to_AxisField_Nodes Python function. " msg += os.linesep msg += "The RF gap field overlaps the last element in AccSequence." msg += os.linesep msg += "It means that the field goes outside the AccSequence." msg += os.linesep msg += "That is wrong! Stop! Please check the lattice!" msg += os.linesep msg += "RF gap = " + rf_gap.getName() msg += os.linesep msg += "node = " + node.getName() msg += os.linesep (gap_pos_start,gap_pos_end) = node_pos_dict[rf_gap] (z_min,z_max) = af_rf_gap_dict[rf_gap].getZ_Min_Max() (gap_pos_start,gap_pos_end) = (gap_pos_end+z_min,gap_pos_end+z_max) (pos_start,pos_end) = node_pos_dict[node] msg += "node (pos_start,pos_end) = " + str((pos_start,pos_end)) msg += os.linesep msg += "rf_gap (pos_start,pos_end) = " + str((gap_pos_start,gap_pos_end)) msg += os.linesep orbitFinalize(msg) else: break node = nodes[drift_up_ind] (node_pos_start,node_pos_end) = node_pos_dict[node] drift_up_ind = drift_up_ind - 1 rf_gap_ind_up_down_arr.append([rf_gap,gap_ind,drift_down_ind,drift_up_ind]) #---------------------------------------------------------------------- """ #---- Debug printing part of the code ---------------START------------- for node_ind in range(len(nodes)): node = nodes[node_ind] if(af_rf_gap_dict.has_key(node)): (pos_start,pos_stop) = node_pos_dict[node] pos_start += af_rf_gap_dict[node].getZ_Min_Max()[0] pos_stop += af_rf_gap_dict[node].getZ_Min_Max()[1] print "debug node_ind=",node_ind," node=",node.getName()," (pos_start,pos_end)=",(pos_start,pos_stop) else: print "debug node_ind=",node_ind," node=",node.getName()," (pos_start,pos_end)=",node_pos_dict[node] for [rf_gap,gap_ind,drift_down_ind,drift_up_ind] in rf_gap_ind_up_down_arr: print "debug gap=",rf_gap.getName()," gap_ind=",gap_ind," drift_down_ind=",drift_down_ind," drift_up_ind=",drift_up_ind #---- Debug printing part of the code ---------------STOP-------------- """ #---------------------------------------------------------------------- return (af_rf_gap_dict,rf_gap_ind_up_down_arr)
PyORBIT-Collaboration/py-orbit
[ 19, 36, 19, 5, 1481146278 ]
def RenormalizeFunction(func,z_min,z_max): """ It re-normalizes the Function in the new limits (z_min,z_max). We assume that region of the function definition will be cut not extended. """ spline = SplineCH() spline.compile(func) integrator = GaussLegendreIntegrator(500) integrator.setLimits(z_min,z_max) integral = integrator.integral(spline) n_points = func.getSize() step = (z_max - z_min)/(n_points-1) new_func = Function() for i in range(n_points): x = z_min + step*i y = spline.getY(x)/integral new_func.add(x,y) new_func.setConstStep(1) return new_func
PyORBIT-Collaboration/py-orbit
[ 19, 36, 19, 5, 1481146278 ]
def create(kernel): result = Tangible() result.template = "object/tangible/mission/quest_item/shared_slooni_jong_q1_needed.iff" result.attribute_template_id = -1 result.stfName("loot_tals_n","slooni_jong_q1_needed")
anhstudios/swganh
[ 62, 37, 62, 37, 1297996365 ]
def get_printable_location_up(block_method): from som.vmobjects.method_bc import BcAbstractMethod assert isinstance(block_method, BcAbstractMethod) return "to:do: " + block_method.merge_point_string()
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def get_printable_location_down(block_method): from som.vmobjects.method_bc import BcAbstractMethod assert isinstance(block_method, BcAbstractMethod) return "downToto:do: " + block_method.merge_point_string()
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _to_do_int(i, by_increment, top, block, block_method): assert isinstance(i, int) assert isinstance(top, int) while i <= top: jitdriver_int.jit_merge_point(block_method=block_method) block_method.invoke_2(block, Integer(i)) i += by_increment
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _to_do(rcvr, limit, block): block_method = block.get_method() i = rcvr.get_embedded_integer() if isinstance(limit, Double): _to_do_double(i, 1, limit.get_embedded_double(), block, block_method) else: _to_do_int(i, 1, limit.get_embedded_integer(), block, block_method) return rcvr
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _down_to_do_int(i, by_increment, bottom, block, block_method): assert isinstance(i, int) assert isinstance(bottom, int) while i >= bottom: jitdriver_int_down.jit_merge_point(block_method=block_method) block_method.invoke_2(block, Integer(i)) i -= by_increment
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def _down_to_do(rcvr, limit, block): block_method = block.get_method() i = rcvr.get_embedded_integer() if isinstance(limit, Double): _down_to_do_double(i, 1, limit.get_embedded_double(), block, block_method) else: _down_to_do_int(i, 1, limit.get_embedded_integer(), block, block_method) return rcvr
SOM-st/PySOM
[ 24, 4, 24, 5, 1382259745 ]
def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant()
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeBaseline(self, context): # No step should crash self.__assistant.CheckCrashes(context)
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeSuperior(self, context): self.status_superior = self.status_baseline return self.status_superior
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeExemplary(self, context): if (self.status_superior == False): self.status_exemplary = self.status_superior return self.status_exemplary
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def __init__( self, queue: q.Queue = None, burst_limit: int = 30, time_limit_ms: int = 1000, exc_route: Callable[[Exception], None] = None, autostart: bool = True, name: str = None,
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def run(self) -> None: """ Do not use the method except for unthreaded testing purposes, the method normally is automatically called by autostart argument. """ times: List[float] = [] # used to store each callable processing time while True: item = self._queue.get() if self.__exit_req: return # shutdown thread # delay routine now = time.perf_counter() t_delta = now - self.time_limit # calculate early to improve perf. if times and t_delta > times[-1]: # if last call was before the limit time-window # used to impr. perf. in long-interval calls case times = [now] else: # collect last in current limit time-window times = [t for t in times if t >= t_delta] times.append(now) if len(times) >= self.burst_limit: # if throughput limit was hit time.sleep(times[1] - t_delta) # finally process one try: func, args, kwargs = item func(*args, **kwargs) except Exception as exc: # re-route any exceptions self.exc_route(exc) # to prevent thread exit
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def _default_exception_handler(exc: Exception) -> NoReturn: """ Dummy exception handler which re-raises exception in thread. Could be possibly overwritten by subclasses. """ raise exc
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def __init__( self, all_burst_limit: int = 30, all_time_limit_ms: int = 1000, group_burst_limit: int = 20, group_time_limit_ms: int = 60000, exc_route: Callable[[Exception], None] = None, autostart: bool = True,
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def start(self) -> None: """Method is used to manually start the ``MessageQueue`` processing.""" self._all_delayq.start() self._group_delayq.start()
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def __call__(self, promise: Callable, is_group_msg: bool = False) -> Callable: """ Processes callables in throughput-limiting queues to avoid hitting limits (specified with :attr:`burst_limit` and :attr:`time_limit`. Args: promise (:obj:`callable`): Mainly the ``telegram.utils.promise.Promise`` (see Notes for other callables), that is processed in delay queues. is_group_msg (:obj:`bool`, optional): Defines whether ``promise`` would be processed in group*+*all* ``DelayQueue``s (if set to :obj:`True`), or only through *all* ``DelayQueue`` (if set to :obj:`False`), resulting in needed delays to avoid hitting specified limits. Defaults to :obj:`False`. Note: Method is designed to accept ``telegram.utils.promise.Promise`` as ``promise`` argument, but other callables could be used too. For example, lambdas or simple functions could be used to wrap original func to be called with needed args. In that case, be sure that either wrapper func does not raise outside exceptions or the proper :attr:`exc_route` handler is provided. Returns: :obj:`callable`: Used as ``promise`` argument. """ if not is_group_msg: # ignore middle group delay self._all_delayq(promise) else: # use middle group delay self._group_delayq(self._all_delayq, promise) return promise
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def wrapped(self: 'Bot', *args: object, **kwargs: object) -> object: # pylint: disable=W0212 queued = kwargs.pop( 'queued', self._is_messages_queued_default # type: ignore[attr-defined] ) isgroup = kwargs.pop('isgroup', False) if queued: prom = Promise(method, (self,) + args, kwargs) return self._msg_queue(prom, isgroup) # type: ignore[attr-defined] return method(self, *args, **kwargs)
tzpBingo/github-trending
[ 42, 20, 42, 1, 1504755582 ]
def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant()
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeBaseline(self, context): # No step should crash self.__assistant.CheckCrashes(context)
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeSuperior(self, context):
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary
KhronosGroup/COLLADA-CTS
[ 30, 9, 30, 11, 1336571488 ]
def forwards(self, orm): # Adding model 'Review' db.create_table(u'review_review', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[USER_MODEL['orm_label']], null=True, blank=True)), ('content', self.gf('django.db.models.fields.TextField')(max_length=1024, blank=True)), ('language', self.gf('django.db.models.fields.CharField')(max_length=5, blank=True)), ('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), )) db.send_create_signal(u'review', ['Review']) # Adding model 'ReviewExtraInfo' db.create_table(u'review_reviewextrainfo', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('type', self.gf('django.db.models.fields.CharField')(max_length=256)), ('review', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['review.Review'])), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal(u'review', ['ReviewExtraInfo']) # Adding model 'RatingCategory' db.create_table(u'review_ratingcategory', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal(u'review', ['RatingCategory']) # Adding model 'RatingCategoryTranslation' db.create_table(u'review_ratingcategorytranslation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=256)), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['review.RatingCategory'])), ('language', self.gf('django.db.models.fields.CharField')(max_length=2)), )) db.send_create_signal(u'review', ['RatingCategoryTranslation']) # Adding model 'Rating' db.create_table(u'review_rating', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('value', self.gf('django.db.models.fields.CharField')(max_length=20)), ('review', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['review.Review'])), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['review.RatingCategory'])), )) db.send_create_signal(u'review', ['Rating'])
bitmazk/django-review
[ 125, 48, 125, 8, 1376058990 ]
def oscillation_period(lambda_jeans): return np.sqrt(np.pi/rho0)*wavelength/np.sqrt(lambda_jeans*lambda_jeans - wavelength*wavelength)
gandalfcode/gandalf
[ 41, 13, 41, 44, 1375101397 ]
def jeans_unstable_solution(x, omega, rhofit): return rho0*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cosh(omega*tsim))
gandalfcode/gandalf
[ 41, 13, 41, 44, 1375101397 ]
def jeans_stable_solution(x, omega, rhofit): return rhofit*(1.0 + amp*np.sin(2.0*math.pi*x/wavelength)*np.cos(omega*tsim))
gandalfcode/gandalf
[ 41, 13, 41, 44, 1375101397 ]
def load_config(path): # Load and parse required config file try: config = configparser() config.optionxform = str if len(config.read(path)) != 1: raise IOError except StandardError: # Either couldn't read/find the file, or couldn't parse it. print "Warning! Could not load %s" % (path,) raise ImportError else: return config
ellonweb/merlin
[ 19, 22, 19, 1, 1229460974 ]
def gen_random_label(): label = "" for i in range(gen.randint(1, maxsize)): label = label + gen.choice(ldh) return label
atmark-techno/atmark-dist
[ 3, 2, 3, 4, 1476164728 ]
def usage(): sys.stdout.write("Usage: " + sys.argv[0] + " [-n number] " + \ "[-p percent-random] [-t TLD]\n") sys.stdout.write(" [-m MAXSIZE] [-f zone-file]\n")
atmark-techno/atmark-dist
[ 3, 2, 3, 4, 1476164728 ]
def pam_conv(auth, query_list, userData): resp = [] for i in range(len(query_list)): query, type = query_list[i] if type == PAM.PAM_PROMPT_ECHO_ON: val = raw_input(query) resp.append((val, 0)) elif type == PAM.PAM_PROMPT_ECHO_OFF: val = getpass(query) resp.append((val, 0)) elif type == PAM.PAM_PROMPT_ERROR_MSG or type == PAM.PAM_PROMPT_TEXT_INFO: print query resp.append(('', 0)) else: return None return resp
unix4you2/practico
[ 5, 2, 5, 1, 1656680052 ]
def setUp(self): """ Add a user and a course """ super().setUp() # create and log in a staff user. # create and log in a non-staff user self.user = UserFactory() self.factory = RequestFactory() self.client = AjaxEnabledTestClient() self.client.login(username=self.user.username, password='test') self.course_create_rerun_url = reverse('course_handler') self.course_start = datetime.datetime.utcnow() self.course_end = self.course_start + datetime.timedelta(days=30) self.enrollment_start = self.course_start - datetime.timedelta(days=7) self.enrollment_end = self.course_end - datetime.timedelta(days=14) source_course = CourseFactory.create( org='origin', number='the_beginning', run='first', display_name='the one and only', start=self.course_start, end=self.course_end, enrollment_start=self.enrollment_start, enrollment_end=self.enrollment_end ) self.source_course_key = source_course.id for role in [CourseInstructorRole, CourseStaffRole]: role(self.source_course_key).add_users(self.user)
eduNEXT/edunext-platform
[ 28, 7, 28, 10, 1414072000 ]
def test_rerun(self): """ Just testing the functionality the view handler adds over the tasks tested in test_clone_course """ add_organization({ 'name': 'Test Organization', 'short_name': self.source_course_key.org, 'description': 'Testing Organization Description', }) response = self.client.ajax_post(self.course_create_rerun_url, { 'source_course_key': str(self.source_course_key), 'org': self.source_course_key.org, 'course': self.source_course_key.course, 'run': 'copy', 'display_name': 'not the same old name', }) self.assertEqual(response.status_code, 200) data = parse_json(response) dest_course_key = CourseKey.from_string(data['destination_course_key']) self.assertEqual(dest_course_key.run, 'copy') source_course = self.store.get_course(self.source_course_key) dest_course = self.store.get_course(dest_course_key) self.assertEqual(dest_course.start, CourseFields.start.default) self.assertEqual(dest_course.end, source_course.end) self.assertEqual(dest_course.enrollment_start, None) self.assertEqual(dest_course.enrollment_end, None) course_orgs = get_course_organizations(dest_course_key) self.assertEqual(len(course_orgs), 1) self.assertEqual(course_orgs[0]['short_name'], self.source_course_key.org)
eduNEXT/edunext-platform
[ 28, 7, 28, 10, 1414072000 ]
def test_newly_created_course_has_web_certs_enabled(self, store): """ Tests newly created course has web certs enabled by default. """ with modulestore().default_store(store): response = self.client.ajax_post(self.course_create_rerun_url, { 'org': 'orgX', 'number': 'CS101', 'display_name': 'Course with web certs enabled', 'run': '2015_T2' }) self.assertEqual(response.status_code, 200) data = parse_json(response) new_course_key = CourseKey.from_string(data['course_key']) course = self.store.get_course(new_course_key) self.assertTrue(course.cert_html_view_enabled)
eduNEXT/edunext-platform
[ 28, 7, 28, 10, 1414072000 ]
def test_course_creation_for_unknown_organization_relaxed(self, store): """ Tests that when ORGANIZATIONS_AUTOCREATE is True, creating a course-run with an unknown org slug will create an organization and organization-course linkage in the system. """ with self.assertRaises(InvalidOrganizationException): get_organization_by_short_name("orgX") with modulestore().default_store(store): response = self.client.ajax_post(self.course_create_rerun_url, { 'org': 'orgX', 'number': 'CS101', 'display_name': 'Course with web certs enabled', 'run': '2015_T2' }) self.assertEqual(response.status_code, 200) self.assertIsNotNone(get_organization_by_short_name("orgX")) data = parse_json(response) new_course_key = CourseKey.from_string(data['course_key']) course_orgs = get_course_organizations(new_course_key) self.assertEqual(len(course_orgs), 1) self.assertEqual(course_orgs[0]['short_name'], 'orgX')
eduNEXT/edunext-platform
[ 28, 7, 28, 10, 1414072000 ]
def test_course_creation_for_unknown_organization_strict(self, store): """ Tests that when ORGANIZATIONS_AUTOCREATE is False, creating a course-run with an unknown org slug will raise a validation error. """ with modulestore().default_store(store): response = self.client.ajax_post(self.course_create_rerun_url, { 'org': 'orgX', 'number': 'CS101', 'display_name': 'Course with web certs enabled', 'run': '2015_T2' }) self.assertEqual(response.status_code, 400) with self.assertRaises(InvalidOrganizationException): get_organization_by_short_name("orgX") data = parse_json(response) self.assertIn('Organization you selected does not exist in the system', data['error'])
eduNEXT/edunext-platform
[ 28, 7, 28, 10, 1414072000 ]
def setUp(self): self._app = create_test_app() with self._app.app_context(): model.syncdb() self.app = self._app.test_client()
spacewiki/spacewiki
[ 10, 1, 10, 16, 1427854923 ]
def test_no_page(self): self.assertEqual(self.app.get('/missing-page').status_code, 200)
spacewiki/spacewiki
[ 10, 1, 10, 16, 1427854923 ]
def setup(core, object): return
ProjectSWGCore/NGECore2
[ 23, 70, 23, 56, 1372673790 ]
def main(): fig = plt.figure(figsize=[10, 5]) ax1 = fig.add_subplot(1, 2, 1, projection=ccrs.SouthPolarStereo()) ax2 = fig.add_subplot(1, 2, 2, projection=ccrs.SouthPolarStereo(), sharex=ax1, sharey=ax1) fig.subplots_adjust(bottom=0.05, top=0.95, left=0.04, right=0.95, wspace=0.02) # Limit the map to -60 degrees latitude and below. ax1.set_extent([-180, 180, -90, -60], ccrs.PlateCarree()) ax1.add_feature(cfeature.LAND) ax1.add_feature(cfeature.OCEAN) ax1.gridlines() ax2.gridlines() ax2.add_feature(cfeature.LAND) ax2.add_feature(cfeature.OCEAN) # Compute a circle in axes coordinates, which we can use as a boundary # for the map. We can pan/zoom as much as we like - the boundary will be # permanently circular. theta = np.linspace(0, 2*np.pi, 100) center, radius = [0.5, 0.5], 0.5 verts = np.vstack([np.sin(theta), np.cos(theta)]).T circle = mpath.Path(verts * radius + center) ax2.set_boundary(circle, transform=ax2.transAxes) plt.show()
SciTools/cartopy
[ 1188, 337, 1188, 330, 1343979839 ]
def itemNames():
ProjectSWGCore/NGECore2
[ 23, 70, 23, 56, 1372673790 ]
def check(self, instance): # read config tags = instance.get('tags', []) host = instance.get('host', '127.0.0.1') port = instance.get('port', 23456) prefix = instance.get('prefix', 'plog.') suffix = instance.get('suffix', '') timeout = instance.get('timeout', 3) max_size = instance.get('max_size', 65536) # create socket, ask for stats sock = socket(AF_INET, SOCK_DGRAM) try: sock.sendto("\0\0statfordatadogplease", (host, port)) # wait for a reply ready = select([sock], [], [], timeout) if not ready[0]: raise socketError('timeout') data, addr = sock.recvfrom(max_size) stats = loads(data) finally: sock.close() def rate(name, val): self.rate(prefix + name + suffix, val, tags=tags) def gauge(name, val): self.gauge(prefix + name + suffix, val, tags=tags) gauge('uptime', stats['uptime']) rate('udp_simple', stats['udp_simple_messages']) rate('udp_invalid_version', stats['udp_invalid_version']) rate('v0_invalid_type', stats['v0_invalid_type']) rate('v0_invalid_multipart_header', stats['v0_invalid_multipart_header']) rate('unknown_command', stats['unknown_command']) rate('v0_commands', stats['v0_commands']) rate('exceptions', stats['exceptions']) rate('unhandled_objects', stats['unhandled_objects']) rate('holes.from_dead_port', stats['holes_from_dead_port']) rate('holes.from_new_message', stats['holes_from_new_message']) rate('fragments', sum(stats['v0_fragments'])) rate('invalid_checksum', sum(stats['v0_invalid_checksum'])) rate('invalid_fragments', sum(sum(a) for a in stats['v0_invalid_fragments'])) rate('missing_fragments', sum(sum(a) for a in stats['dropped_fragments'])) defragmenter = stats['defragmenter'] rate('defragmenter.evictions', defragmenter['evictions']) rate('defragmenter.hits', defragmenter['hits']) rate('defragmenter.miss', defragmenter['misses']) def flatsum(json): if isinstance(json, Number): return json elif isinstance(json, list): return sum(flatsum(o) for o in json) else: return 0 def feed_handler_metrics(path, json): if isinstance(json, dict): for k in json: feed_handler_metrics(path + '.' + k, json[k]) else: existing = handler_metrics.get(path, 0) handler_metrics[path] = existing + flatsum(json) handlers = stats['handlers'] handler_metrics = {} for handler in handlers: handler_name = handler['name'] for metric_name in handler: if metric_name != 'name': metric_path = handler_name + '.' + metric_name feed_handler_metrics(metric_path, handler[metric_name]) for path in handler_metrics: # TODO: rates instead of gauges through YAML config? # We can use rate(my.metric{*}) on Datadog's side for graphing, but alerts # do not support functions _yet_. gauge(path, handler_metrics[path])
pcarrier/plog
[ 70, 25, 70, 13, 1391253070 ]
def _core_plugin(self): return manager.NeutronManager.get_plugin()
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _l3_plugin(self): return manager.NeutronManager.get_service_plugins().get( constants.L3_ROUTER_NAT)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _make_qos_dict(self, qos, fields=None): res = {'id': qos.id, 'tenant_id': qos.tenant_id, 'name': qos.name, 'description': qos.description, 'direction': qos.direction, 'rate': qos.rate, 'burst': qos.burst, 'cburst': qos.cburst, 'default_queue_id': qos.default_queue_id} if qos.port_id is not None: res.update({'target_type': 'port', 'target_id': qos.port_id}) elif qos.router_id is not None: res.update({'target_type': 'router', 'target_id': qos.router_id}) else: res.update({'target_type': None, 'target_id': None}) res['qos_queues'] = [ self._make_qos_queue_dict(q) for q in filter(lambda q: q.parent_queue is None, qos.queues) ] res['unattached_filters'] = [ self._make_qos_filter_dict(f) for f in filter(lambda f: f.queue is None, qos.filters) ] return self._fields(res, fields)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _aggregate_rate_of_qos(self, qos): return reduce(lambda x, y: x + y, [q.rate for q in qos.queues if q.parent_queue is None])
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _check_qos_target(self, context, target_type, target_id, qos_direction): ret = {'router_id': None, 'port_id': None} if target_type is not None and target_id is not None: # Need to check try: if target_type == 'port': target = self._core_plugin._get_port(context, target_id) device_owner = target['device_owner'] valid_port_target = False if device_owner == n_constants.DEVICE_OWNER_FLOATINGIP: if qos_direction == 'egress': valid_port_target = True elif device_owner.startswith('compute'): valid_port_target = True if not valid_port_target: raise ext_qos.QosInvalidPortType( port_id=target_id, port_type=device_owner) ret['port_id'] = target_id elif target_type == 'router': target = self._l3_plugin._get_router(context, target_id) ret['router_id'] = target_id else: # Should not reach target = None except exc.NoResultFound: raise ext_qos.QosTargetNotFound(target_id=target_id) for qos in target.eayun_qoss: if qos.direction == qos_direction: raise ext_qos.QosConflict() return ret
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def create_qos(self, context, qos): """ Create a qos and its default queue. """ qos = qos['qos'] default_queue = self._extract_default_queue_from_qos_param(qos) if qos['rate'] < default_queue['rate']: raise ext_qos.QosRateTooSmall(id=None, rate=qos['rate']) qos_target = self._check_qos_target( context, qos['target_type'], qos['target_id'], qos['direction']) tenant_id = self._get_tenant_id_for_create(context, qos) qos_id = qos.get('id', uuidutils.generate_uuid()) default_queue_id = uuidutils.generate_uuid() with context.session.begin(subtransactions=True): qos_db = Qos( id=qos_id, tenant_id=tenant_id, name=qos['name'], description=qos['description'], direction=qos['direction'], port_id=qos_target['port_id'], router_id=qos_target['router_id'], rate=qos['rate'], burst=qos['burst'], cburst=qos['cburst'], default_queue_id=default_queue_id) qos_queue_db = QosQueue( id=default_queue_id, tenant_id=tenant_id, qos_id=qos_id, parent_id=None, prio=7, rate=default_queue['rate'], ceil=default_queue['ceil'], burst=default_queue['burst'], cburst=default_queue['cburst']) context.session.add(qos_db) context.session.add(qos_queue_db) return self._make_qos_dict(qos_db)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def delete_qos(self, context, id): qos = self._get_qos(context, id) with context.session.begin(subtransactions=True): context.session.delete(qos)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def get_qoss_count(self, context, filters=None): return self._get_collection_count(context, Qos, filters=filters)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]