prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>directions.py<|end_file_name|><|fim▁begin|><|fim▁hole|> class Direction(Enum): invalid = (0.0, 0.0) up = (0.0, -1.0) down = (0.0, 1.0) left = (-1.0, 0.0) right = (1.0, 0.0) def x(self): return self.value[0] def y(self): return self.value[1] def __str__(self): return str(self.value)<|fim▁end|>
from enum import Enum
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main import "time" import "fmt" import "os" import "io/ioutil" import "math/rand" import "github.com/antonholmquist/jason" var LOADED *jason.Object // global var to hold the entire json object from file // loads from file into the jason.Object pointer. func LoadRollTables() (err error) { fi, err := os.Open("./rolltables.json") // hardcoded input file if err != nil { panic(err) } defer fi.Close() r, err := ioutil.ReadAll(fi) // read entire file at once, may need to change if files get large if err != nil { panic(err) } LOADED, err = jason.NewObjectFromBytes(r) // take the bytes object and turn into the jason object if err != nil { panic(err) } return err } // rolls all the tables in the jason.Object pointer LOADED. func RollAllTables() (err error) { rtables, err := LOADED.GetObjectArray("rolltables") for _, value := range rtables { err = RollOneTable(value) if err != nil { fmt.Println(err) // just want to print, not panic so it keeps going through other rolls } } return } func RollOneTable(rt *jason.Object) (err error) { // initialize vars var i int64 // counter for the loop of rolls var rolls []int64 // holds a list of all rolls if needed later for debugging var total int64 // result amount var dnum int64 // how many dice to roll var dmod int64 // this is to add a single fixed modifier amount to the roll if you desire dnum, err = rt.GetInt64("Dicenum") if err != nil { panic(err) } dmod, err = rt.GetInt64("Dicemod") if err != nil { panic(err) } // generates a roll based off of dnum and dsize for i = 0; i < dnum; i++ { var dsize int64 // number of sides of dice, 1 being the lowest always dsize, err = rt.GetInt64("Dicesize") if err != nil { panic(err) } roll := rand.Int63n(dsize) + 1 // + 1 makes it 1-100 instead of 0-99 rolls = append(rolls, roll)<|fim▁hole|> // adds dmod to total from the rolls total += dmod rollsarray, err := rt.GetObjectArray("Rolls") if err != nil { panic(err) } // this portion of the function loads the inidividual rolls and then checks if the generated roll matches for _, individRolls := range rollsarray { var themin int64 // "Min" in json var themax int64 // "Max" in json themin, err = individRolls.GetInt64("Min") if err != nil { panic(err) } themax, err = individRolls.GetInt64("Max") if err != nil { panic(err) } if total >= themin && total <= themax { var result string result, err = individRolls.GetString("Result") if err != nil { panic(err) } var name string name, err = rt.GetString("Name") if err != nil { panic(err) } fmt.Printf("%s: %s\n", name, result) var subrolls *jason.Object // place to store any subrolls subrolls, err = individRolls.GetObject("rolltable") if err != nil { // idea is if there's an error, there's no subrolls so just pass } else { // but, if err is nil, we call ourselves recursively err = RollOneTable(subrolls) if err != nil { fmt.Println(err) // don't panic as their might be a non-nil error with further sub rolls return } } } } err = nil return } func main() { rand.Seed(time.Now().UnixNano()) // sets a unique seed for the random number generator LoadRollTables() // loads the json into a *jason.Object global RollAllTables() // rolls it all }<|fim▁end|>
total += roll }
<|file_name|>publish_content.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import logging import superdesk import superdesk.publish from datetime import timedelta from eve.utils import config, ParsedRequest from flask import current_app as app from superdesk import get_resource_service from superdesk.celery_task_utils import get_lock_id from superdesk.errors import PublishHTTPPushClientError from superdesk.lock import lock, unlock from superdesk.celery_app import celery from superdesk.utc import utcnow from superdesk.profiling import ProfileManager from .publish_queue import QueueState logger = logging.getLogger(__name__) UPDATE_SCHEDULE_DEFAULT = {'seconds': 10} PUBLISH_QUEUE = 'publish_queue' STATE_PENDING = 'pending' class PublishContent(superdesk.Command): """Runs deliveries""" def run(self, provider_type=None): publish.apply_async(expires=10) @celery.task(soft_time_limit=1800) def publish(): """Fetch items from publish queue as per the configuration, call the transmit function.""" with ProfileManager('publish:transmit'): lock_name = get_lock_id("Transmit", "Articles") if not lock(lock_name, expire=1810): logger.info('Task: {} is already running.'.format(lock_name)) return try: # Query any oustanding transmit requests items = list(get_queue_items()) if len(items) > 0: transmit_items(items) # Query any outstanding retry attempts retry_items = list(get_queue_items(True)) if len(retry_items) > 0: transmit_items(retry_items) except Exception: logger.exception('Task: {} failed.'.format(lock_name)) finally: unlock(lock_name) def get_queue_items(retries=False): if retries: lookup = { '$and': [ {'state': QueueState.RETRYING.value}, {'next_retry_attempt_at': {'$lte': utcnow()}} ] } else: lookup = { '$and': [ {'state': QueueState.PENDING.value} ] } request = ParsedRequest()<|fim▁hole|> request.sort = '[("_created", 1), ("subscriber_id", 1), ("published_seq_num", 1)]' return get_resource_service(PUBLISH_QUEUE).get(req=request, lookup=lookup) @celery.task(soft_time_limit=600) def transmit_subscriber_items(queue_items, subscriber): lock_name = get_lock_id('Subscriber', 'Transmit', subscriber) publish_queue_service = get_resource_service(PUBLISH_QUEUE) if not lock(lock_name, expire=610): return try: for queue_item in queue_items: log_msg = '_id: {_id} item_id: {item_id} state: {state} ' \ 'item_version: {item_version} headline: {headline}'.format(**queue_item) try: # check the status of the queue item queue_item = publish_queue_service.find_one(req=None, _id=queue_item[config.ID_FIELD]) if queue_item.get('state') not in [QueueState.PENDING.value, QueueState.RETRYING.value]: logger.info('Transmit State is not pending/retrying for queue item: {}. It is in {}'. format(queue_item.get(config.ID_FIELD), queue_item.get('state'))) continue # update the status of the item to in-progress queue_update = {'state': 'in-progress', 'transmit_started_at': utcnow()} publish_queue_service.patch(queue_item.get(config.ID_FIELD), queue_update) logger.info('Transmitting queue item {}'.format(log_msg)) destination = queue_item['destination'] transmitter = superdesk.publish.registered_transmitters[destination.get('delivery_type')] transmitter.transmit(queue_item) logger.info('Transmitted queue item {}'.format(log_msg)) except Exception as e: logger.exception('Failed to transmit queue item {}'.format(log_msg)) max_retry_attempt = app.config.get('MAX_TRANSMIT_RETRY_ATTEMPT') retry_attempt_delay = app.config.get('TRANSMIT_RETRY_ATTEMPT_DELAY_MINUTES') try: orig_item = publish_queue_service.find_one(req=None, _id=queue_item['_id']) updates = {config.LAST_UPDATED: utcnow()} if orig_item.get('retry_attempt', 0) < max_retry_attempt and \ not isinstance(e, PublishHTTPPushClientError): updates['retry_attempt'] = orig_item.get('retry_attempt', 0) + 1 updates['state'] = QueueState.RETRYING.value updates['next_retry_attempt_at'] = utcnow() + timedelta(minutes=retry_attempt_delay) else: # all retry attempts exhausted marking the item as failed. updates['state'] = QueueState.FAILED.value publish_queue_service.system_update(orig_item.get(config.ID_FIELD), updates, orig_item) except Exception: logger.error('Failed to set the state for failed publish queue item {}.'.format(queue_item['_id'])) finally: unlock(lock_name) def transmit_items(queue_items): # get a distinct list of the subscribers that have queued items subscribers = list(set([q['subscriber_id'] for q in queue_items])) # extract the queued items for each subscriber and transmit them for subscriber in subscribers: sub_queue_items = [item for item in queue_items if item['subscriber_id'] == subscriber] transmit_subscriber_items.apply_async(kwargs={'queue_items': sub_queue_items, 'subscriber': str(subscriber)}) superdesk.command('publish:transmit', PublishContent())<|fim▁end|>
request.max_results = app.config.get('MAX_TRANSMIT_QUERY_LIMIT', 500) # ensure we publish in the correct sequence
<|file_name|>exec_commands.rs<|end_file_name|><|fim▁begin|>use arguments::{VERBOSE_MODE, JOBLOG}; use execute::command::{self, CommandErr}; use input_iterator::InputsLock; use numtoa::NumToA; use time::{self, Timespec}; use tokenizer::Token; use verbose; use super::pipe::disk::State; use super::job_log::JobLog; use super::child::handle_child; use std::io::{self, Read, Write}; use std::sync::mpsc::Sender; use std::time::Duration; /// Contains all the required data needed for executing commands in parallel. /// Commands will be generated based on a template of argument tokens combined /// with the current input argument. pub struct ExecCommands<IO: Read> { pub slot: usize, pub num_inputs: usize, pub flags: u16, pub timeout: Duration, pub inputs: InputsLock<IO>, pub output_tx: Sender<State>, pub arguments: &'static [Token], pub tempdir: String, }<|fim▁hole|> pub fn run(&mut self) { let stdout = io::stdout(); let stderr = io::stderr(); let slot = &self.slot.to_string(); let mut command_buffer = &mut String::with_capacity(64); let has_timeout = self.timeout != Duration::from_millis(0); let mut input = String::with_capacity(64); let mut id_buffer = [0u8; 20]; let mut job_buffer = [0u8; 20]; let mut total_buffer = [0u8; 20]; let mut start_indice = self.num_inputs.numtoa(10, &mut total_buffer); let job_total = &total_buffer[start_indice..]; while let Some(job_id) = self.inputs.try_next(&mut input) { if self.flags & VERBOSE_MODE != 0 { verbose::processing_task(&stdout, job_id+1, self.num_inputs, &input); } start_indice = (job_id+1).numtoa(10, &mut id_buffer); let command = command::ParallelCommand { slot_no: slot, job_no: &id_buffer[start_indice..], job_total: job_total, input: &input, command_template: self.arguments, flags: self.flags }; command_buffer.clear(); let (start_time, end_time, exit_value, signal) = match command.exec(command_buffer) { Ok(child) => { handle_child(child, &self.output_tx, self.flags, job_id, input.clone(), has_timeout, self.timeout, &self.tempdir, &mut job_buffer) }, Err(cmd_err) => { let mut stderr = stderr.lock(); let _ = stderr.write(b"parallel: command error: "); let message = match cmd_err { CommandErr::IO(error) => format!("I/O error: {}\n", error), }; let _ = stderr.write(message.as_bytes()); let message = format!("{}: {}: {}", job_id+1, command.input, message); let _ = self.output_tx.send(State::Error(job_id, message)); (Timespec::new(0, 0), Timespec::new(0, 0), -1, 0) } }; if self.flags & JOBLOG != 0 { let runtime: time::Duration = end_time - start_time; let _ = self.output_tx.send(State::JobLog(JobLog { job_id: job_id, start_time: start_time, runtime: runtime.num_nanoseconds().unwrap_or(0) as u64, exit_value: exit_value, signal: signal, flags: self.flags, command: command_buffer.clone(), })); } if self.flags & VERBOSE_MODE != 0 { verbose::task_complete(&stdout, job_id, self.num_inputs, &input); } } } }<|fim▁end|>
impl<IO: Read> ExecCommands<IO> {
<|file_name|>test_bot.py<|end_file_name|><|fim▁begin|>import os import json import pytest from .. import bot, PACKAGEDIR EXAMPLE_TWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'example-tweet.json'), 'r')) EXAMPLE_RETWEET = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'retweeted-status.json'), 'r')) EXAMPLE_NARCISSISTIC = json.load(open(os.path.join(PACKAGEDIR, 'tests', 'examples', 'narcissistic-tweet.json'), 'r')) TESTDB = 'test_goldstar.db' def test_recipients(): handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True) recipients = handler.get_recipients() assert len(recipients) == 1 assert recipients[0]['screen_name'] == 'exoplaneteer' def test_responses():<|fim▁hole|> handler = bot.TweetHandler(EXAMPLE_TWEET, dbfile=TESTDB, dry_run=True) responses = handler.handle() assert len(responses) == 1 # only 1 star handed out assert len(responses[0]) < 140 # max tweet length assert responses[0] == '@exoplaneteer Congratulations, you just earned a 🌟 from @GeertHub! Your total is 1. https://twitter.com/GeertHub/status/745616020581265408' def test_retweet(): """A retweet should not result in a star!""" with pytest.raises(bot.InvalidTweetException): handler = bot.TweetHandler(EXAMPLE_RETWEET, dbfile=TESTDB, dry_run=True) def test_narcisstic(): """Don't allow people to give stars to themselves!""" handler = bot.TweetHandler(EXAMPLE_NARCISSISTIC, dbfile=TESTDB, dry_run=True) responses = handler.handle() assert len(responses) == 1 assert responses[0] == "@exoplaneteer I'm sorry, Dan. I'm afraid I can't do that."<|fim▁end|>
<|file_name|>compositor.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ use compositor_layer::{CompositorData, CompositorLayer, WantsScrollEventsFlag}; use compositor_task::{CompositorEventListener, CompositorProxy, CompositorReceiver}; use compositor_task::{CompositorTask, LayerProperties, Msg}; use constellation::SendableFrameTree; use pipeline::CompositionPipeline; use scrolling::ScrollingTimerProxy; use windowing; use windowing::{MouseWindowEvent, WindowEvent, WindowMethods, WindowNavigateMsg}; use geom::point::{Point2D, TypedPoint2D}; use geom::rect::{Rect, TypedRect}; use geom::scale_factor::ScaleFactor; use geom::size::{Size2D, TypedSize2D}; use gfx::color; use gfx::paint_task::Msg as PaintMsg; use gfx::paint_task::PaintRequest; use gleam::gl::types::{GLint, GLsizei}; use gleam::gl; use layers::geometry::{DevicePixel, LayerPixel}; use layers::layers::{BufferRequest, Layer, LayerBuffer, LayerBufferSet}; use layers::rendergl::RenderContext; use layers::rendergl; use layers::scene::Scene; use msg::compositor_msg::{Epoch, LayerId}; use msg::compositor_msg::{ReadyState, PaintState, ScrollPolicy}; use msg::constellation_msg::Msg as ConstellationMsg; use msg::constellation_msg::{ConstellationChan, NavigationDirection}; use msg::constellation_msg::{Key, KeyModifiers, KeyState, LoadData}; use msg::constellation_msg::{PipelineId, WindowSizeData}; use png; use profile::mem; use profile::time::{self, ProfilerCategory, profile}; use script_traits::{ConstellationControlMsg, ScriptControlChan}; use std::cmp; use std::collections::HashMap; use std::collections::hash_map::Entry::{Occupied, Vacant}; use std::mem as std_mem; use std::num::Float; use std::rc::Rc; use std::slice::bytes::copy_memory; use std::sync::mpsc::Sender; use time::{precise_time_ns, precise_time_s}; use url::Url; use util::geometry::{PagePx, ScreenPx, ViewportPx}; use util::opts; /// NB: Never block on the constellation, because sometimes the constellation blocks on us. pub struct IOCompositor<Window: WindowMethods> { /// The application window. window: Rc<Window>, /// The port on which we receive messages. port: Box<CompositorReceiver>, /// The render context. This will be `None` if the windowing system has not yet sent us a /// `PrepareRenderingEvent`. context: Option<RenderContext>, /// The root pipeline. root_pipeline: Option<CompositionPipeline>, /// Tracks details about each active pipeline that the compositor knows about. pipeline_details: HashMap<PipelineId, PipelineDetails>, /// The canvas to paint a page. scene: Scene<CompositorData>, /// The application window size. window_size: TypedSize2D<DevicePixel, u32>, /// "Mobile-style" zoom that does not reflow the page. viewport_zoom: ScaleFactor<PagePx, ViewportPx, f32>, /// "Desktop-style" zoom that resizes the viewport to fit the window. /// See `ViewportPx` docs in util/geom.rs for details. page_zoom: ScaleFactor<ViewportPx, ScreenPx, f32>, /// The device pixel ratio for this window. hidpi_factor: ScaleFactor<ScreenPx, DevicePixel, f32>, channel_to_self: Box<CompositorProxy + Send>, /// A handle to the scrolling timer. scrolling_timer: ScrollingTimerProxy, /// Tracks whether we should composite this frame. composition_request: CompositionRequest, /// Tracks whether we are in the process of shutting down, or have shut down and should close /// the compositor. shutdown_state: ShutdownState, /// Tracks outstanding paint_msg's sent to the paint tasks. outstanding_paint_msgs: u32, /// Tracks the last composite time. last_composite_time: u64, /// Tracks whether the zoom action has happened recently. zoom_action: bool, /// The time of the last zoom action has started. zoom_time: f64, /// Whether the page being rendered has loaded completely. /// Differs from ReadyState because we can finish loading (ready) /// many times for a single page. got_load_complete_message: bool, /// Whether we have received a `SetFrameTree` message. got_set_frame_tree_message: bool, /// The channel on which messages can be sent to the constellation. constellation_chan: ConstellationChan, /// The channel on which messages can be sent to the time profiler. time_profiler_chan: time::ProfilerChan, /// The channel on which messages can be sent to the memory profiler. mem_profiler_chan: mem::ProfilerChan, /// Pending scroll to fragment event, if any fragment_point: Option<Point2D<f32>>, /// Pending scroll events. pending_scroll_events: Vec<ScrollEvent>, /// Has a Quit event been seen? has_seen_quit_event: bool, } pub struct ScrollEvent { delta: TypedPoint2D<DevicePixel,f32>, cursor: TypedPoint2D<DevicePixel,i32>, } #[derive(PartialEq)] enum CompositionRequest { NoCompositingNecessary, CompositeOnScrollTimeout(u64), CompositeNow(CompositingReason), } #[derive(Copy, PartialEq, Debug)] enum ShutdownState { NotShuttingDown, ShuttingDown, FinishedShuttingDown, } struct HitTestResult { layer: Rc<Layer<CompositorData>>, point: TypedPoint2D<LayerPixel, f32>, } struct PipelineDetails { /// The pipeline associated with this PipelineDetails object. pipeline: Option<CompositionPipeline>, /// The status of this pipeline's ScriptTask. ready_state: ReadyState, /// The status of this pipeline's PaintTask. paint_state: PaintState, /// Whether animations are running. animations_running: bool, } impl PipelineDetails { fn new() -> PipelineDetails { PipelineDetails { pipeline: None, ready_state: ReadyState::Blank, paint_state: PaintState::Painting, animations_running: false, } } } impl<Window: WindowMethods> IOCompositor<Window> { fn new(window: Rc<Window>, sender: Box<CompositorProxy+Send>, receiver: Box<CompositorReceiver>, constellation_chan: ConstellationChan, time_profiler_chan: time::ProfilerChan, mem_profiler_chan: mem::ProfilerChan) -> IOCompositor<Window> { // Create an initial layer tree. // // TODO: There should be no initial layer tree until the painter creates one from the // display list. This is only here because we don't have that logic in the painter yet. let window_size = window.framebuffer_size(); let hidpi_factor = window.hidpi_factor(); IOCompositor { window: window, port: receiver, context: None, root_pipeline: None, pipeline_details: HashMap::new(), scene: Scene::new(Rect { origin: Point2D::zero(), size: window_size.as_f32(), }), window_size: window_size, hidpi_factor: hidpi_factor, channel_to_self: sender.clone_compositor_proxy(), scrolling_timer: ScrollingTimerProxy::new(sender), composition_request: CompositionRequest::NoCompositingNecessary, pending_scroll_events: Vec::new(), shutdown_state: ShutdownState::NotShuttingDown, page_zoom: ScaleFactor::new(1.0), viewport_zoom: ScaleFactor::new(1.0), zoom_action: false, zoom_time: 0f64, got_load_complete_message: false, got_set_frame_tree_message: false, constellation_chan: constellation_chan, time_profiler_chan: time_profiler_chan, mem_profiler_chan: mem_profiler_chan, fragment_point: None, outstanding_paint_msgs: 0, last_composite_time: 0, has_seen_quit_event: false, } } pub fn create(window: Rc<Window>, sender: Box<CompositorProxy+Send>, receiver: Box<CompositorReceiver>, constellation_chan: ConstellationChan, time_profiler_chan: time::ProfilerChan, mem_profiler_chan: mem::ProfilerChan) -> IOCompositor<Window> { let mut compositor = IOCompositor::new(window, sender, receiver, constellation_chan, time_profiler_chan, mem_profiler_chan); // Set the size of the root layer. compositor.update_zoom_transform(); // Tell the constellation about the initial window size. compositor.send_window_size(); compositor } fn handle_browser_message(&mut self, msg: Msg) -> bool { match (msg, self.shutdown_state) { (_, ShutdownState::FinishedShuttingDown) => panic!("compositor shouldn't be handling messages after shutting down"), (Msg::Exit(chan), _) => { debug!("shutting down the constellation"); let ConstellationChan(ref con_chan) = self.constellation_chan; con_chan.send(ConstellationMsg::Exit).unwrap(); chan.send(()).unwrap(); self.shutdown_state = ShutdownState::ShuttingDown; } (Msg::ShutdownComplete, _) => { debug!("constellation completed shutdown"); self.shutdown_state = ShutdownState::FinishedShuttingDown; return false; } (Msg::ChangeReadyState(pipeline_id, ready_state), ShutdownState::NotShuttingDown) => { self.change_ready_state(pipeline_id, ready_state); } (Msg::ChangePaintState(pipeline_id, paint_state), ShutdownState::NotShuttingDown) => { self.change_paint_state(pipeline_id, paint_state); } (Msg::ChangeRunningAnimationsState(pipeline_id, running_animations), ShutdownState::NotShuttingDown) => { self.change_running_animations_state(pipeline_id, running_animations); } (Msg::ChangePageTitle(pipeline_id, title), ShutdownState::NotShuttingDown) => { self.change_page_title(pipeline_id, title); } (Msg::ChangePageUrl(pipeline_id, url), ShutdownState::NotShuttingDown) => { self.change_page_url(pipeline_id, url); } (Msg::PaintMsgDiscarded, ShutdownState::NotShuttingDown) => { self.remove_outstanding_paint_msg(); } (Msg::SetFrameTree(frame_tree, response_chan, new_constellation_chan), ShutdownState::NotShuttingDown) => { self.set_frame_tree(&frame_tree, response_chan, new_constellation_chan); self.send_viewport_rects_for_all_layers(); self.get_title_for_main_frame(); } (Msg::CreateOrUpdateBaseLayer(layer_properties), ShutdownState::NotShuttingDown) => { self.create_or_update_base_layer(layer_properties); } (Msg::CreateOrUpdateDescendantLayer(layer_properties), ShutdownState::NotShuttingDown) => { self.create_or_update_descendant_layer(layer_properties); } (Msg::GetGraphicsMetadata(chan), ShutdownState::NotShuttingDown) => { chan.send(Some(self.window.native_metadata())).unwrap(); } (Msg::SetLayerRect(pipeline_id, layer_id, rect), ShutdownState::NotShuttingDown) => { self.set_layer_rect(pipeline_id, layer_id, &rect); } (Msg::AssignPaintedBuffers(pipeline_id, epoch, replies), ShutdownState::NotShuttingDown) => { for (layer_id, new_layer_buffer_set) in replies.into_iter() { self.assign_painted_buffers(pipeline_id, layer_id, new_layer_buffer_set, epoch); } self.remove_outstanding_paint_msg(); } (Msg::ScrollFragmentPoint(pipeline_id, layer_id, point), ShutdownState::NotShuttingDown) => { self.scroll_fragment_to_point(pipeline_id, layer_id, point); } (Msg::LoadComplete, ShutdownState::NotShuttingDown) => { self.got_load_complete_message = true; // If we're painting in headless mode, schedule a recomposite. if opts::get().output_file.is_some() { self.composite_if_necessary(CompositingReason::Headless); } // Inform the embedder that the load has finished. // // TODO(pcwalton): Specify which frame's load completed. self.window.load_end(); } (Msg::ScrollTimeout(timestamp), ShutdownState::NotShuttingDown) => { debug!("scroll timeout, drawing unpainted content!"); match self.composition_request { CompositionRequest::CompositeOnScrollTimeout(this_timestamp) => { if timestamp == this_timestamp { self.composition_request = CompositionRequest::CompositeNow( CompositingReason::HitScrollTimeout) } } _ => {} } } (Msg::RecompositeAfterScroll, ShutdownState::NotShuttingDown) => { self.composition_request = CompositionRequest::CompositeNow(CompositingReason::ContinueScroll) } (Msg::KeyEvent(key, state, modified), ShutdownState::NotShuttingDown) => { if state == KeyState::Pressed { self.window.handle_key(key, modified); } } (Msg::SetCursor(cursor), ShutdownState::NotShuttingDown) => { self.window.set_cursor(cursor) } (Msg::PaintTaskExited(pipeline_id), ShutdownState::NotShuttingDown) => { if self.pipeline_details.remove(&pipeline_id).is_none() { panic!("Saw PaintTaskExited message from an unknown pipeline!"); } } // When we are shutting_down, we need to avoid performing operations // such as Paint that may crash because we have begun tearing down // the rest of our resources. (_, ShutdownState::ShuttingDown) => { } } true } fn change_ready_state(&mut self, pipeline_id: PipelineId, ready_state: ReadyState) { self.get_or_create_pipeline_details(pipeline_id).ready_state = ready_state; self.window.set_ready_state(self.get_earliest_pipeline_ready_state()); // If we're painting in headless mode, schedule a recomposite. if opts::get().output_file.is_some() { self.composite_if_necessary(CompositingReason::Headless) } } fn get_earliest_pipeline_ready_state(&self) -> ReadyState { if self.pipeline_details.len() == 0 { return ReadyState::Blank; } return self.pipeline_details.values().fold(ReadyState::FinishedLoading, |v, ref details| { cmp::min(v, details.ready_state) }); } fn change_paint_state(&mut self, pipeline_id: PipelineId, paint_state: PaintState) { self.get_or_create_pipeline_details(pipeline_id).paint_state = paint_state; self.window.set_paint_state(paint_state); } /// Sets or unsets the animations-running flag for the given pipeline, and schedules a /// recomposite if necessary. fn change_running_animations_state(&mut self, pipeline_id: PipelineId, animations_running: bool) { self.get_or_create_pipeline_details(pipeline_id).animations_running = animations_running; if animations_running { self.composite_if_necessary(CompositingReason::Animation); } } pub fn get_or_create_pipeline_details<'a>(&'a mut self, pipeline_id: PipelineId) -> &'a mut PipelineDetails { if !self.pipeline_details.contains_key(&pipeline_id) { self.pipeline_details.insert(pipeline_id, PipelineDetails::new()); } return self.pipeline_details.get_mut(&pipeline_id).unwrap(); } pub fn get_pipeline<'a>(&'a self, pipeline_id: PipelineId) -> &'a CompositionPipeline { match self.pipeline_details.get(&pipeline_id) { Some(ref details) => { match details.pipeline { Some(ref pipeline) => pipeline, None => panic!("Compositor layer has an unitialized pipeline ({:?}).", pipeline_id), } } None => panic!("Compositor layer has an unknown pipeline ({:?}).", pipeline_id), } } fn change_page_title(&mut self, pipeline_id: PipelineId, title: Option<String>) { let set_title = self.root_pipeline.as_ref().map_or(false, |root_pipeline| { root_pipeline.id == pipeline_id }); if set_title { self.window.set_page_title(title); } } fn change_page_url(&mut self, _: PipelineId, url: Url) { self.window.set_page_url(url); } fn all_pipelines_in_idle_paint_state(&self) -> bool { if self.pipeline_details.len() == 0 { return false; } return self.pipeline_details.values().all(|ref details| { // If a pipeline exists and has a root layer that has // zero size, it will never be painted. In this case, // consider it as idle to avoid hangs in reftests. if let Some(ref pipeline) = details.pipeline { if let Some(root_layer) = self.find_pipeline_root_layer(pipeline.id) { if root_layer.bounds.borrow().size == Size2D::zero() { return true; } } } details.paint_state == PaintState::Idle }); } fn has_paint_msg_tracking(&self) -> bool { // only track PaintMsg's if the compositor outputs to a file. opts::get().output_file.is_some() } fn has_outstanding_paint_msgs(&self) -> bool { self.has_paint_msg_tracking() && self.outstanding_paint_msgs > 0 } fn add_outstanding_paint_msg(&mut self, count: u32) { // return early if not tracking paint_msg's if !self.has_paint_msg_tracking() { return; } debug!("add_outstanding_paint_msg {:?}", self.outstanding_paint_msgs); self.outstanding_paint_msgs += count; } fn remove_outstanding_paint_msg(&mut self) { if !self.has_paint_msg_tracking() { return; } if self.outstanding_paint_msgs > 0 { self.outstanding_paint_msgs -= 1; } else { debug!("too many repaint msgs completed"); } } fn set_frame_tree(&mut self, frame_tree: &SendableFrameTree, response_chan: Sender<()>, new_constellation_chan: ConstellationChan) { response_chan.send(()).unwrap(); self.root_pipeline = Some(frame_tree.pipeline.clone()); // If we have an old root layer, release all old tiles before replacing it. match self.scene.root { Some(ref layer) => layer.clear_all_tiles(self), None => { } } self.scene.root = Some(self.create_frame_tree_root_layers(frame_tree, None)); self.scene.set_root_layer_size(self.window_size.as_f32()); // Initialize the new constellation channel by sending it the root window size. self.constellation_chan = new_constellation_chan; self.send_window_size(); self.got_set_frame_tree_message = true; self.composite_if_necessary(CompositingReason::NewFrameTree); } fn create_root_layer_for_pipeline_and_rect(&mut self, pipeline: &CompositionPipeline, frame_rect: Option<TypedRect<PagePx, f32>>) -> Rc<Layer<CompositorData>> { let layer_properties = LayerProperties { pipeline_id: pipeline.id, epoch: Epoch(0), id: LayerId::null(), rect: Rect::zero(), background_color: color::transparent(), scroll_policy: ScrollPolicy::Scrollable, }; let root_layer = CompositorData::new_layer(layer_properties, WantsScrollEventsFlag::WantsScrollEvents, opts::get().tile_size); self.get_or_create_pipeline_details(pipeline.id).pipeline = Some(pipeline.clone()); // All root layers mask to bounds. *root_layer.masks_to_bounds.borrow_mut() = true; if let Some(ref frame_rect) = frame_rect { let frame_rect = frame_rect.to_untyped(); *root_layer.bounds.borrow_mut() = Rect::from_untyped(&frame_rect); } return root_layer; } fn create_frame_tree_root_layers(&mut self, frame_tree: &SendableFrameTree, frame_rect: Option<TypedRect<PagePx, f32>>) -> Rc<Layer<CompositorData>> { let root_layer = self.create_root_layer_for_pipeline_and_rect(&frame_tree.pipeline, frame_rect); for kid in frame_tree.children.iter() { root_layer.add_child(self.create_frame_tree_root_layers(kid, kid.rect)); } return root_layer; } fn find_pipeline_root_layer(&self, pipeline_id: PipelineId) -> Option<Rc<Layer<CompositorData>>> { if !self.pipeline_details.contains_key(&pipeline_id) { panic!("Tried to create or update layer for unknown pipeline") } self.find_layer_with_pipeline_and_layer_id(pipeline_id, LayerId::null()) } fn update_layer_if_exists(&mut self, properties: LayerProperties) -> bool { match self.find_layer_with_pipeline_and_layer_id(properties.pipeline_id, properties.id) { Some(existing_layer) => { existing_layer.update_layer(properties); true } None => false, } } fn create_or_update_base_layer(&mut self, layer_properties: LayerProperties) { let pipeline_id = layer_properties.pipeline_id; let root_layer = match self.find_pipeline_root_layer(pipeline_id) { Some(root_layer) => root_layer, None => { debug!("Ignoring CreateOrUpdateBaseLayer message for pipeline \ ({:?}) shutting down.", pipeline_id); return; } }; let need_new_base_layer = !self.update_layer_if_exists(layer_properties); if need_new_base_layer { root_layer.update_layer_except_bounds(layer_properties); let base_layer = CompositorData::new_layer( layer_properties, WantsScrollEventsFlag::DoesntWantScrollEvents, opts::get().tile_size); // Add the base layer to the front of the child list, so that child // iframe layers are painted on top of the base layer. These iframe // layers were added previously when creating the layer tree // skeleton in create_frame_tree_root_layers. root_layer.children().insert(0, base_layer); } self.scroll_layer_to_fragment_point_if_necessary(layer_properties.pipeline_id, layer_properties.id); self.send_buffer_requests_for_all_layers(); } fn create_or_update_descendant_layer(&mut self, layer_properties: LayerProperties) { if !self.update_layer_if_exists(layer_properties) { self.create_descendant_layer(layer_properties); } self.scroll_layer_to_fragment_point_if_necessary(layer_properties.pipeline_id, layer_properties.id); self.send_buffer_requests_for_all_layers(); } fn create_descendant_layer(&self, layer_properties: LayerProperties) { let root_layer = match self.find_pipeline_root_layer(layer_properties.pipeline_id) { Some(root_layer) => root_layer, None => return, // This pipeline is in the process of shutting down. }; let new_layer = CompositorData::new_layer(layer_properties, WantsScrollEventsFlag::DoesntWantScrollEvents, root_layer.tile_size); root_layer.add_child(new_layer); } fn send_window_size(&self) { let dppx = self.page_zoom * self.device_pixels_per_screen_px(); let initial_viewport = self.window_size.as_f32() / dppx; let visible_viewport = initial_viewport / self.viewport_zoom; let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::ResizedWindow(WindowSizeData { device_pixel_ratio: dppx, initial_viewport: initial_viewport, visible_viewport: visible_viewport, })).unwrap() } pub fn move_layer(&self, pipeline_id: PipelineId, layer_id: LayerId, origin: TypedPoint2D<LayerPixel, f32>) -> bool { match self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) { Some(ref layer) => { if layer.wants_scroll_events() == WantsScrollEventsFlag::WantsScrollEvents { layer.clamp_scroll_offset_and_scroll_layer(TypedPoint2D(0f32, 0f32) - origin); } true } None => false, } } fn scroll_layer_to_fragment_point_if_necessary(&mut self, pipeline_id: PipelineId, layer_id: LayerId) { if let Some(point) = self.fragment_point.take() { if !self.move_layer(pipeline_id, layer_id, Point2D::from_untyped(&point)) { panic!("Compositor: Tried to scroll to fragment with unknown layer."); } self.start_scrolling_timer_if_necessary(); } } fn start_scrolling_timer_if_necessary(&mut self) { match self.composition_request { CompositionRequest::CompositeNow(_) | CompositionRequest::CompositeOnScrollTimeout(_) => return, CompositionRequest::NoCompositingNecessary => {} } let timestamp = precise_time_ns(); self.scrolling_timer.scroll_event_processed(timestamp); self.composition_request = CompositionRequest::CompositeOnScrollTimeout(timestamp); } fn set_layer_rect(&mut self, pipeline_id: PipelineId, layer_id: LayerId, new_rect: &Rect<f32>) { match self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) { Some(ref layer) => { *layer.bounds.borrow_mut() = Rect::from_untyped(new_rect) } None => panic!("Compositor received SetLayerRect for nonexistent \ layer: {:?}", pipeline_id), }; self.send_buffer_requests_for_all_layers(); } fn assign_painted_buffers(&mut self, pipeline_id: PipelineId, layer_id: LayerId, new_layer_buffer_set: Box<LayerBufferSet>, epoch: Epoch) { if let Some(layer) = self.find_layer_with_pipeline_and_layer_id(pipeline_id, layer_id) { self.assign_painted_buffers_to_layer(layer, new_layer_buffer_set, epoch); return } let pipeline = self.get_pipeline(pipeline_id); let message = PaintMsg::UnusedBuffer(new_layer_buffer_set.buffers); let _ = pipeline.paint_chan.send(message); } fn assign_painted_buffers_to_layer(&mut self, layer: Rc<Layer<CompositorData>>, new_layer_buffer_set: Box<LayerBufferSet>, epoch: Epoch) { debug!("compositor received new frame at size {:?}x{:?}", self.window_size.width.get(), self.window_size.height.get()); // From now on, if we destroy the buffers, they will leak. let mut new_layer_buffer_set = new_layer_buffer_set; new_layer_buffer_set.mark_will_leak(); // FIXME(pcwalton): This is going to cause problems with inconsistent frames since // we only composite one layer at a time. assert!(layer.add_buffers(self, new_layer_buffer_set, epoch)); self.composite_if_necessary(CompositingReason::NewPaintedBuffers); } fn scroll_fragment_to_point(&mut self, pipeline_id: PipelineId, layer_id: LayerId, point: Point2D<f32>) { if self.move_layer(pipeline_id, layer_id, Point2D::from_untyped(&point)) { self.perform_updates_after_scroll() } else { self.fragment_point = Some(point) } } fn handle_window_message(&mut self, event: WindowEvent) { match event { WindowEvent::Idle => {} WindowEvent::Refresh => { self.composite(); } WindowEvent::InitializeCompositing => { self.initialize_compositing(); } WindowEvent::Resize(size) => { self.on_resize_window_event(size); } WindowEvent::LoadUrl(url_string) => { self.on_load_url_window_event(url_string); } WindowEvent::MouseWindowEventClass(mouse_window_event) => { self.on_mouse_window_event_class(mouse_window_event); } WindowEvent::MouseWindowMoveEventClass(cursor) => { self.on_mouse_window_move_event_class(cursor); } WindowEvent::Scroll(delta, cursor) => { self.on_scroll_window_event(delta, cursor); } WindowEvent::Zoom(magnification) => { self.on_zoom_window_event(magnification); } WindowEvent::PinchZoom(magnification) => { self.on_pinch_zoom_window_event(magnification); } WindowEvent::Navigation(direction) => { self.on_navigation_window_event(direction); } WindowEvent::KeyEvent(key, state, modifiers) => { self.on_key_event(key, state, modifiers); } WindowEvent::Quit => { if !self.has_seen_quit_event { self.has_seen_quit_event = true; debug!("shutting down the constellation for WindowEvent::Quit"); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::Exit).unwrap(); self.shutdown_state = ShutdownState::ShuttingDown; } } } } fn on_resize_window_event(&mut self, new_size: TypedSize2D<DevicePixel, u32>) { debug!("compositor resizing to {:?}", new_size.to_untyped()); // A size change could also mean a resolution change. let new_hidpi_factor = self.window.hidpi_factor(); if self.hidpi_factor != new_hidpi_factor { self.hidpi_factor = new_hidpi_factor; self.update_zoom_transform(); } if self.window_size == new_size { return; } self.window_size = new_size; self.scene.set_root_layer_size(new_size.as_f32()); self.send_window_size(); } fn on_load_url_window_event(&mut self, url_string: String) { debug!("osmain: loading URL `{}`", url_string); self.got_load_complete_message = false; let root_pipeline_id = match self.scene.root { Some(ref layer) => layer.get_pipeline_id(), None => panic!("Compositor: Received WindowEvent::LoadUrl without initialized compositor \ layers"), }; let msg = ConstellationMsg::LoadUrl(root_pipeline_id, LoadData::new(Url::parse(&url_string).unwrap())); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(msg).unwrap() } fn on_mouse_window_event_class(&self, mouse_window_event: MouseWindowEvent) { let point = match mouse_window_event { MouseWindowEvent::Click(_, p) => p, MouseWindowEvent::MouseDown(_, p) => p, MouseWindowEvent::MouseUp(_, p) => p, }; match self.find_topmost_layer_at_point(point / self.scene.scale) { Some(result) => result.layer.send_mouse_event(self, mouse_window_event, result.point), None => {}, } } fn on_mouse_window_move_event_class(&self, cursor: TypedPoint2D<DevicePixel, f32>) { match self.find_topmost_layer_at_point(cursor / self.scene.scale) { Some(result) => result.layer.send_mouse_move_event(self, result.point), None => {}, } } fn on_scroll_window_event(&mut self, delta: TypedPoint2D<DevicePixel, f32>, cursor: TypedPoint2D<DevicePixel, i32>) { self.pending_scroll_events.push(ScrollEvent { delta: delta, cursor: cursor, }); self.composite_if_necessary(CompositingReason::Scroll); } fn process_pending_scroll_events(&mut self) { let had_scroll_events = self.pending_scroll_events.len() > 0; for scroll_event in std_mem::replace(&mut self.pending_scroll_events, Vec::new()).into_iter() { let delta = scroll_event.delta / self.scene.scale; let cursor = scroll_event.cursor.as_f32() / self.scene.scale; if let Some(ref mut layer) = self.scene.root { layer.handle_scroll_event(delta, cursor); } self.perform_updates_after_scroll(); } if had_scroll_events { self.send_viewport_rects_for_all_layers(); } } /// Performs buffer requests and starts the scrolling timer or schedules a recomposite as /// necessary. fn perform_updates_after_scroll(&mut self) { if self.send_buffer_requests_for_all_layers() { self.start_scrolling_timer_if_necessary(); } else { self.channel_to_self.send(Msg::RecompositeAfterScroll); } } /// If there are any animations running, dispatches appropriate messages to the constellation. fn process_animations(&mut self) { for (pipeline_id, pipeline_details) in self.pipeline_details.iter() { if !pipeline_details.animations_running { continue } self.constellation_chan.0.send(ConstellationMsg::TickAnimation(*pipeline_id)).unwrap(); } } fn device_pixels_per_screen_px(&self) -> ScaleFactor<ScreenPx, DevicePixel, f32> { match opts::get().device_pixels_per_px { Some(device_pixels_per_px) => device_pixels_per_px, None => match opts::get().output_file { Some(_) => ScaleFactor::new(1.0), None => self.hidpi_factor } } } fn device_pixels_per_page_px(&self) -> ScaleFactor<PagePx, DevicePixel, f32> { self.viewport_zoom * self.page_zoom * self.device_pixels_per_screen_px() } fn update_zoom_transform(&mut self) { let scale = self.device_pixels_per_page_px(); self.scene.scale = ScaleFactor::new(scale.get()); // We need to set the size of the root layer again, since the window size // has changed in unscaled layer pixels. self.scene.set_root_layer_size(self.window_size.as_f32()); } fn on_zoom_window_event(&mut self, magnification: f32) { self.page_zoom = ScaleFactor::new((self.page_zoom.get() * magnification).max(1.0)); self.update_zoom_transform(); self.send_window_size(); } // TODO(pcwalton): I think this should go through the same queuing as scroll events do. fn on_pinch_zoom_window_event(&mut self, magnification: f32) { self.zoom_action = true; self.zoom_time = precise_time_s(); let old_viewport_zoom = self.viewport_zoom; self.viewport_zoom = ScaleFactor::new((self.viewport_zoom.get() * magnification).max(1.0)); let viewport_zoom = self.viewport_zoom; self.update_zoom_transform(); // Scroll as needed let window_size = self.window_size.as_f32(); let page_delta: TypedPoint2D<LayerPixel, f32> = TypedPoint2D( window_size.width.get() * (viewport_zoom.inv() - old_viewport_zoom.inv()).get() * 0.5, window_size.height.get() * (viewport_zoom.inv() - old_viewport_zoom.inv()).get() * 0.5); let cursor = TypedPoint2D(-1f32, -1f32); // Make sure this hits the base layer. match self.scene.root { Some(ref mut layer) => { layer.handle_scroll_event(page_delta, cursor); } None => { } } self.send_viewport_rects_for_all_layers(); self.composite_if_necessary(CompositingReason::Zoom); } fn on_navigation_window_event(&self, direction: WindowNavigateMsg) { let direction = match direction { windowing::WindowNavigateMsg::Forward => NavigationDirection::Forward, windowing::WindowNavigateMsg::Back => NavigationDirection::Back, }; let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::Navigate(None, direction)).unwrap() } fn on_key_event(&self, key: Key, state: KeyState, modifiers: KeyModifiers) { let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::KeyEvent(key, state, modifiers)).unwrap() } fn convert_buffer_requests_to_pipeline_requests_map(&self, requests: Vec<(Rc<Layer<CompositorData>>, Vec<BufferRequest>)>) -> HashMap<PipelineId, Vec<PaintRequest>> { let scale = self.device_pixels_per_page_px(); let mut results: HashMap<PipelineId, Vec<PaintRequest>> = HashMap::new(); for (layer, mut layer_requests) in requests.into_iter() { let vec = match results.entry(layer.get_pipeline_id()) { Occupied(mut entry) => { *entry.get_mut() = Vec::new(); entry.into_mut() } Vacant(entry) => { entry.insert(Vec::new()) } }; // All the BufferRequests are in layer/device coordinates, but the paint task // wants to know the page coordinates. We scale them before sending them. for request in layer_requests.iter_mut() { request.page_rect = request.page_rect / scale.get(); } vec.push(PaintRequest { buffer_requests: layer_requests, scale: scale.get(), layer_id: layer.extra_data.borrow().id, epoch: layer.extra_data.borrow().epoch, }); } results } fn send_back_unused_buffers(&mut self, unused_buffers: Vec<(Rc<Layer<CompositorData>>, Vec<Box<LayerBuffer>>)>) { for (layer, buffers) in unused_buffers.into_iter() { if !buffers.is_empty() { let pipeline = self.get_pipeline(layer.get_pipeline_id()); let _ = pipeline.paint_chan.send_opt(PaintMsg::UnusedBuffer(buffers)); } } } fn send_viewport_rect_for_layer(&self, layer: Rc<Layer<CompositorData>>) { if layer.extra_data.borrow().id == LayerId::null() { let layer_rect = Rect(-layer.extra_data.borrow().scroll_offset.to_untyped(), layer.bounds.borrow().size.to_untyped()); let pipeline = self.get_pipeline(layer.get_pipeline_id()); let ScriptControlChan(ref chan) = pipeline.script_chan; chan.send(ConstellationControlMsg::Viewport(pipeline.id.clone(), layer_rect)).unwrap(); } for kid in layer.children().iter() { self.send_viewport_rect_for_layer(kid.clone()); } } fn send_viewport_rects_for_all_layers(&self) { match self.scene.root { Some(ref root) => self.send_viewport_rect_for_layer(root.clone()), None => {}, } } /// Returns true if any buffer requests were sent or false otherwise. fn send_buffer_requests_for_all_layers(&mut self) -> bool { let mut layers_and_requests = Vec::new(); let mut unused_buffers = Vec::new(); self.scene.get_buffer_requests(&mut layers_and_requests, &mut unused_buffers); // Return unused tiles first, so that they can be reused by any new BufferRequests. self.send_back_unused_buffers(unused_buffers); if layers_and_requests.len() == 0 { return false; } // We want to batch requests for each pipeline to avoid race conditions // when handling the resulting BufferRequest responses. let pipeline_requests = self.convert_buffer_requests_to_pipeline_requests_map(layers_and_requests); let mut num_paint_msgs_sent = 0; for (pipeline_id, requests) in pipeline_requests.into_iter() { num_paint_msgs_sent += 1; let _ = self.get_pipeline(pipeline_id).paint_chan.send(PaintMsg::Paint(requests)); } self.add_outstanding_paint_msg(num_paint_msgs_sent); true } fn is_ready_to_paint_image_output(&self) -> bool { if !self.got_load_complete_message { return false; } if self.get_earliest_pipeline_ready_state() != ReadyState::FinishedLoading { return false; } if self.has_outstanding_paint_msgs() { return false; } if !self.all_pipelines_in_idle_paint_state() { return false; } if !self.got_set_frame_tree_message { return false; } return true; } fn composite(&mut self) { if !self.window.prepare_for_composite() { return } let output_image = opts::get().output_file.is_some() && self.is_ready_to_paint_image_output(); let mut framebuffer_ids = vec!(); let mut texture_ids = vec!(); let (width, height) = (self.window_size.width.get() as usize, self.window_size.height.get() as usize); if output_image { framebuffer_ids = gl::gen_framebuffers(1); gl::bind_framebuffer(gl::FRAMEBUFFER, framebuffer_ids[0]); texture_ids = gl::gen_textures(1); gl::bind_texture(gl::TEXTURE_2D, texture_ids[0]); gl::tex_image_2d(gl::TEXTURE_2D, 0, gl::RGB as GLint, width as GLsizei, height as GLsizei, 0, gl::RGB, gl::UNSIGNED_BYTE, None); gl::tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::NEAREST as GLint); gl::tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::NEAREST as GLint); gl::framebuffer_texture_2d(gl::FRAMEBUFFER, gl::COLOR_ATTACHMENT0, gl::TEXTURE_2D, texture_ids[0], 0); gl::bind_texture(gl::TEXTURE_2D, 0); } profile(ProfilerCategory::Compositing, None, self.time_profiler_chan.clone(), || { debug!("compositor: compositing"); // Adjust the layer dimensions as necessary to correspond to the size of the window. self.scene.viewport = Rect { origin: Point2D::zero(), size: self.window_size.as_f32(), }; // Paint the scene. if let Some(ref layer) = self.scene.root { match self.context { Some(context) => rendergl::render_scene(layer.clone(), context, &self.scene), None => { debug!("compositor: not compositing because context not yet set up") } } } }); if output_image { let path = opts::get().output_file.as_ref().unwrap(); let mut pixels = gl::read_pixels(0, 0, width as gl::GLsizei, height as gl::GLsizei, gl::RGB, gl::UNSIGNED_BYTE); gl::bind_framebuffer(gl::FRAMEBUFFER, 0); gl::delete_buffers(&texture_ids); gl::delete_frame_buffers(&framebuffer_ids); // flip image vertically (texture is upside down) let orig_pixels = pixels.clone(); let stride = width * 3; for y in 0..height { let dst_start = y * stride; let src_start = (height - y - 1) * stride; let src_slice = &orig_pixels[src_start .. src_start + stride]; copy_memory(&mut pixels[dst_start .. dst_start + stride], &src_slice[..stride]); } let mut img = png::Image { width: width as u32, height: height as u32, pixels: png::PixelsByColorType::RGB8(pixels), }; let res = png::store_png(&mut img, &path); assert!(res.is_ok()); debug!("shutting down the constellation after generating an output file"); let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::Exit).unwrap(); self.shutdown_state = ShutdownState::ShuttingDown; } // Perform the page flip. This will likely block for a while. self.window.present(); self.last_composite_time = precise_time_ns(); self.composition_request = CompositionRequest::NoCompositingNecessary; self.process_pending_scroll_events(); self.process_animations(); } fn composite_if_necessary(&mut self, reason: CompositingReason) { if self.composition_request == CompositionRequest::NoCompositingNecessary { self.composition_request = CompositionRequest::CompositeNow(reason) } } fn initialize_compositing(&mut self) { let context = CompositorTask::create_graphics_context(&self.window.native_metadata()); let show_debug_borders = opts::get().show_debug_borders; self.context = Some(rendergl::RenderContext::new(context, show_debug_borders)) } fn find_topmost_layer_at_point_for_layer(&self, layer: Rc<Layer<CompositorData>>, point: TypedPoint2D<LayerPixel, f32>, clip_rect: &TypedRect<LayerPixel, f32>) -> Option<HitTestResult> { let layer_bounds = *layer.bounds.borrow(); let masks_to_bounds = *layer.masks_to_bounds.borrow(); if layer_bounds.is_empty() && masks_to_bounds { return None; } let clipped_layer_bounds = match clip_rect.intersection(&layer_bounds) { Some(rect) => rect, None => return None, }; let clip_rect_for_children = if masks_to_bounds { Rect(Point2D::zero(), clipped_layer_bounds.size) } else { clipped_layer_bounds.translate(&clip_rect.origin) }; let child_point = point - layer_bounds.origin; for child in layer.children().iter().rev() { // Translate the clip rect into the child's coordinate system. let clip_rect_for_child = clip_rect_for_children.translate(&-*child.content_offset.borrow()); let result = self.find_topmost_layer_at_point_for_layer(child.clone(), child_point, &clip_rect_for_child); if result.is_some() { return result; } } let point = point - *layer.content_offset.borrow(); if !clipped_layer_bounds.contains(&point) { return None; } return Some(HitTestResult { layer: layer, point: point }); } fn find_topmost_layer_at_point(&self, point: TypedPoint2D<LayerPixel, f32>) -> Option<HitTestResult> { match self.scene.root { Some(ref layer) => { self.find_topmost_layer_at_point_for_layer(layer.clone(), point, &*layer.bounds.borrow()) } None => None, } } fn find_layer_with_pipeline_and_layer_id(&self, pipeline_id: PipelineId, layer_id: LayerId) -> Option<Rc<Layer<CompositorData>>> { match self.scene.root { Some(ref layer) => find_layer_with_pipeline_and_layer_id_for_layer(layer.clone(), pipeline_id, layer_id), None => None, } } } fn find_layer_with_pipeline_and_layer_id_for_layer(layer: Rc<Layer<CompositorData>>, pipeline_id: PipelineId, layer_id: LayerId) -> Option<Rc<Layer<CompositorData>>> { if layer.extra_data.borrow().pipeline_id == pipeline_id && layer.extra_data.borrow().id == layer_id { return Some(layer); } for kid in layer.children().iter() { let result = find_layer_with_pipeline_and_layer_id_for_layer(kid.clone(), pipeline_id, layer_id); if result.is_some() { return result; } } return None; } impl<Window> CompositorEventListener for IOCompositor<Window> where Window: WindowMethods { fn handle_event(&mut self, msg: WindowEvent) -> bool { // Check for new messages coming from the other tasks in the system. loop { match self.port.try_recv_compositor_msg() { None => break, Some(msg) => { if !self.handle_browser_message(msg) { break } } } } if self.shutdown_state == ShutdownState::FinishedShuttingDown { // We have exited the compositor and passing window // messages to script may crash. debug!("Exiting the compositor due to a request from script."); return false; } // Handle the message coming from the windowing system. self.handle_window_message(msg); // If a pinch-zoom happened recently, ask for tiles at the new resolution if self.zoom_action && precise_time_s() - self.zoom_time > 0.3 { self.zoom_action = false; self.scene.mark_layer_contents_as_changed_recursively(); self.send_buffer_requests_for_all_layers(); } match self.composition_request { CompositionRequest::NoCompositingNecessary | CompositionRequest::CompositeOnScrollTimeout(_) => {} CompositionRequest::CompositeNow(_) => { self.composite() } } self.shutdown_state != ShutdownState::FinishedShuttingDown } /// Repaints and recomposites synchronously. You must be careful when calling this, as if a /// paint is not scheduled the compositor will hang forever. /// /// This is used when resizing the window. fn repaint_synchronously(&mut self) { while self.shutdown_state != ShutdownState::ShuttingDown { let msg = self.port.recv_compositor_msg(); let received_new_buffers = match msg { Msg::AssignPaintedBuffers(..) => true, _ => false, }; let keep_going = self.handle_browser_message(msg); if received_new_buffers { self.composite(); break } if !keep_going { break } } } fn shutdown(&mut self) { // Clear out the compositor layers so that painting tasks can destroy the buffers. match self.scene.root { None => {} Some(ref layer) => layer.forget_all_tiles(), } // Drain compositor port, sometimes messages contain channels that are blocking // another task from finishing (i.e. SetFrameTree). while self.port.try_recv_compositor_msg().is_some() {} // Tell the profiler, memory profiler, and scrolling timer to shut down. self.time_profiler_chan.send(time::ProfilerMsg::Exit); self.mem_profiler_chan.send(mem::ProfilerMsg::Exit); self.scrolling_timer.shutdown(); }<|fim▁hole|> } fn get_title_for_main_frame(&self) { let root_pipeline_id = match self.root_pipeline { None => return, Some(ref root_pipeline) => root_pipeline.id, }; let ConstellationChan(ref chan) = self.constellation_chan; chan.send(ConstellationMsg::GetPipelineTitle(root_pipeline_id)).unwrap(); } } /// Why we performed a composite. This is used for debugging. #[derive(Copy, Clone, PartialEq)] pub enum CompositingReason { /// We hit the scroll timeout and are therefore drawing unrendered content. HitScrollTimeout, /// The window has been scrolled and we're starting the first recomposite. Scroll, /// A scroll has continued and we need to recomposite again. ContinueScroll, /// We're performing the single composite in headless mode. Headless, /// We're performing a composite to run an animation. Animation, /// A new frame tree has been loaded. NewFrameTree, /// New painted buffers have been received. NewPaintedBuffers, /// The window has been zoomed. Zoom, }<|fim▁end|>
fn pinch_zoom_level(&self) -> f32 { self.viewport_zoom.get() as f32
<|file_name|>editor_subscribe_label_deleted.py<|end_file_name|><|fim▁begin|>""" .. module:: editor_subscribe_label_deleted The **Editor Subscribe Label Deleted** Model. PostgreSQL Definition --------------------- The :code:`editor_subscribe_label_deleted` table is defined in the MusicBrainz Server as: .. code-block:: sql CREATE TABLE editor_subscribe_label_deleted ( editor INTEGER NOT NULL, -- PK, references editor.id<|fim▁hole|> ); """ from django.db import models from django.utils.encoding import python_2_unicode_compatible @python_2_unicode_compatible class editor_subscribe_label_deleted(models.Model): """ Not all parameters are listed here, only those that present some interest in their Django implementation. :param editor: references :class:`.editor` :param gid: references :class:`.deleted_entity` :param deleted_by: references :class:`.edit` """ editor = models.OneToOneField('editor', primary_key=True) gid = models.OneToOneField('deleted_entity') deleted_by = models.ForeignKey('edit') def __str__(self): return 'Editor Subscribe Label Deleted' class Meta: db_table = 'editor_subscribe_label_deleted'<|fim▁end|>
gid UUID NOT NULL, -- PK, references deleted_entity.gid deleted_by INTEGER NOT NULL -- references edit.id
<|file_name|>0001_initial.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone<|fim▁hole|>class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Node', fields=[ ('id', models.CharField(max_length=40, serialize=False, primary_key=True)), ('data', sentry.db.models.fields.gzippeddict.GzippedDictField()), ('timestamp', models.DateTimeField(default=django.utils.timezone.now, db_index=True)), ], ), ]<|fim▁end|>
import sentry.db.models.fields.gzippeddict
<|file_name|>update.py<|end_file_name|><|fim▁begin|>from __future__ import print_function """ Deprecated. Use ``update-tld-names`` command instead. """ __title__ = 'tld.update' __author__ = 'Artur Barseghyan' __copyright__ = '2013-2015 Artur Barseghyan' __license__ = 'GPL 2.0/LGPL 2.1' from tld.utils import update_tld_names _ = lambda x: x <|fim▁hole|><|fim▁end|>
if __name__ == '__main__': update_tld_names() print(_("Local TLD names file has been successfully updated!"))
<|file_name|>SubScreenContext.java<|end_file_name|><|fim▁begin|>package de.verygame.surface.screen.base; import com.badlogic.gdx.InputMultiplexer; import com.badlogic.gdx.graphics.g2d.PolygonSpriteBatch; import com.badlogic.gdx.utils.viewport.Viewport; import java.util.Map; /** * @author Rico Schrage * * Context which can contain several subscreens. */ public class SubScreenContext implements ScreenContext { /** List of subScreen's plus visibility flag */ protected final ScreenSwitch screenSwitch; /** viewport of the screen (manages the glViewport) */ protected final Viewport viewport; /** True if a subScreen is visible */ protected boolean showSubScreen = false; /** * Constructs a context with the given viewport. * * @param viewport viewport viewport of the screen */ public SubScreenContext(Viewport viewport) { super(); this.viewport = viewport; this.screenSwitch = new ScreenSwitch(); } /** * Sets the dependency map of the screen switch. * * @param dependencies map of dependencies */ public void setDependencies(Map<String, Object> dependencies) { screenSwitch.setDependencyMap(dependencies); } /** * Sets the batch of the context.<|fim▁hole|> * * @param polygonSpriteBatch batch */ public void setBatch(PolygonSpriteBatch polygonSpriteBatch) { screenSwitch.setBatch(polygonSpriteBatch); } /** * Sets the inputHandler of the context. * * @param inputHandler inputHandler */ public void setInputHandler(InputMultiplexer inputHandler) { screenSwitch.setInputHandler(inputHandler); } public InputMultiplexer getInputHandler() { return screenSwitch.getInputHandler(); } public void onActivate(ScreenId screenKey) { if (screenSwitch.getActiveScreen() != null) { screenSwitch.getActiveScreen().onActivate(screenKey); } } public float onDeactivate(ScreenId screenKey) { if (screenSwitch.getActiveScreen() != null) { return screenSwitch.getActiveScreen().onDeactivate(screenKey); } return 0; } /** * Applies the viewport of the context. Calls {@link Viewport#apply(boolean)}. */ public void applyViewport() { viewport.apply(true); } /** * Updates the viewport of the context. Calls {@link Viewport#update(int, int, boolean)}. * * @param width width of the frame * @param height height of the frame */ public void updateViewport(int width, int height) { viewport.update(width, height, true); } public void update() { screenSwitch.updateSwitch(); screenSwitch.updateScreen(); } public void renderScreen() { if (showSubScreen) { screenSwitch.renderScreen(); } } public void resizeSubScreen(int width, int height) { screenSwitch.resize(width, height); } public void pauseSubScreen() { screenSwitch.pause(); } public void resumeSubScreen() { screenSwitch.resume(); } public void dispose() { screenSwitch.dispose(); } @Override public Viewport getViewport() { return viewport; } @Override public PolygonSpriteBatch getBatch() { return screenSwitch.getBatch(); } @Override public void addSubScreen(SubScreenId id, SubScreen subScreen) { if (screenSwitch.getBatch() == null) { throw new IllegalStateException("Parent screen have to be attached to a screen switch!"); } this.screenSwitch.addScreen(id, subScreen); } @Override public SubScreen getActiveSubScreen() { return (SubScreen) screenSwitch.getActiveScreen(); } @Override public void showScreen(SubScreenId id) { showSubScreen = true; screenSwitch.setActive(id); } @Override public void initialize(SubScreenId id) { showSubScreen = true; screenSwitch.setScreenSimple(id); } @Override public void hideScreen() { showSubScreen = false; } }<|fim▁end|>
<|file_name|>test_parsing.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- """ Test functionality of coursera module. """ import json import os.path import pytest from six import iteritems from mock import patch, Mock, mock_open from coursera import coursera_dl # JSon Handling @pytest.fixture def get_page(monkeypatch): monkeypatch.setattr(coursera_dl, 'get_page', Mock()) @pytest.fixture def json_path(): return os.path.join(os.path.dirname(__file__), "fixtures", "json") def test_that_should_not_dl_if_file_exist(get_page, json_path): coursera_dl.get_page = Mock() coursera_dl.download_about(object(), "matrix-002", json_path) assert coursera_dl.get_page.called is False def test_that_we_parse_and_write_json_correctly(get_page, json_path): unprocessed_json = os.path.join(os.path.dirname(__file__), "fixtures", "json", "unprocessed.json") raw_data = open(unprocessed_json).read() coursera_dl.get_page = lambda x, y: raw_data open_mock = mock_open() with patch('coursera.coursera_dl.open', open_mock, create=True): coursera_dl.download_about(object(), "networksonline-002", json_path) about_json = os.path.join(json_path, 'networksonline-002-about.json') open_mock.assert_called_once_with(about_json, 'w') data = json.loads(open_mock().write.call_args[0][0]) assert data['id'] == 394 assert data['shortName'] == 'networksonline' # Test Syllabus Parsing @pytest.fixture def get_video(monkeypatch): """ Mock some methods that would, otherwise, create repeateadly many web requests. More specifically, we mock: * the search for hidden videos * the actual download of videos """ # Mock coursera_dl.grab_hidden_video_url monkeypatch.setattr(coursera_dl, 'grab_hidden_video_url', lambda session, href: None) # Mock coursera_dl.get_video monkeypatch.setattr(coursera_dl, 'get_video', lambda session, href: None) <|fim▁hole|> ("regular-syllabus.html", 23, 102, 502, 102), ("links-to-wikipedia.html", 5, 37, 158, 36), ("preview.html", 20, 106, 106, 106), ("sections-not-to-be-missed.html", 9, 61, 224, 61), ("sections-not-to-be-missed-2.html", 20, 121, 397, 121), ("parsing-datasci-001-with-bs4.html", 10, 97, 358, 97), # issue 134 ("parsing-startup-001-with-bs4.html", 4, 44, 136, 44), # issue 137 ("parsing-wealthofnations-001-with-bs4.html", 8, 74, 296, 74), # issue 131 ("parsing-malsoftware-001-with-bs4.html", 3, 18, 56, 16), # issue 148 ("multiple-resources-with-the-same-format.html", 18, 97, 478, 97), ] ) def test_parse(get_video, filename, num_sections, num_lectures, num_resources, num_videos): filename = os.path.join(os.path.dirname(__file__), "fixtures", "html", filename) with open(filename) as syllabus: syllabus_page = syllabus.read() sections = coursera_dl.parse_syllabus(None, syllabus_page, None) # section count assert len(sections) == num_sections # lecture count lectures = [lec for sec in sections for lec in sec[1]] assert len(lectures) == num_lectures # resource count resources = [(res[0], len(res[1])) for lec in lectures for res in iteritems(lec[1])] assert sum(r for f, r in resources) == num_resources # mp4 count assert sum(r for f, r in resources if f == "mp4") == num_videos<|fim▁end|>
@pytest.mark.parametrize( "filename,num_sections,num_lectures,num_resources,num_videos", [
<|file_name|>io.go<|end_file_name|><|fim▁begin|>package esl import ( "io" "errors" "unicode/utf8" ) // Buffer ... type buffer []byte // MemoryReader ... type memReader [ ]byte // MemoryWriter ... type memWriter [ ]byte // ErrBufferSize indicates that memory cannot be allocated to store data in a buffer. var ErrBufferSize = errors.New(`could not allocate memory`) func newBuffer( size int ) *buffer { buf := make([ ]byte, 0, size ) return (*buffer)(&buf) } func ( buf *buffer ) reader( ) *memReader { n := len( *buf ) rbuf := ( *buf )[:n:n] return ( *memReader )( &rbuf ) } func ( buf *buffer ) writer( ) *memWriter { return ( *memWriter )( buf ) } func ( buf *buffer ) grow( n int ) error { if ( len( *buf )+ n ) > cap( *buf ) { // Not enough space to store [:+(n)]byte(s) mbuf, err := makebuf( cap( *buf )+ n ) if ( err != nil ) { return ( err ) } copy( mbuf, *buf ) *( buf ) = mbuf } return nil }<|fim▁hole|> // allocates a byte slice of size. // If the allocation fails, returns error // indicating that memory cannot be allocated to store data in a buffer. func makebuf( size int ) ( buf [ ]byte, memerr error ) { defer func( ) { // If the make fails, give a known error. if ( recover( ) != nil ) { ( memerr ) = ErrBufferSize } }( ) return make( [ ]byte, 0, size ), nil } func ( buf *memReader ) Read( b [ ]byte ) ( n int, err error ) { if len( *buf ) == 0 { return ( 0 ), io.EOF } n, *buf = copy( b, *buf ), ( *buf )[ n: ] return // n, nil } func ( buf *memReader ) ReadByte( ) ( c byte, err error ) { if len(*buf) == 0 { return ( 0 ), io.EOF } c, *buf = (*buf)[0], (*buf)[1:] return // c, nil } func ( buf *memReader ) ReadRune( ) ( r rune, size int, err error ) { if len(*buf) == 0 { return 0, 0, io.EOF } r, size = utf8.DecodeRune(*buf) *buf = (*buf)[size:] return // r, size, nil } func ( buf *memReader ) WriteTo( w io.Writer ) ( n int64, err error ) { for len( *buf ) > 0 { rw, err := w.Write( *buf ) if ( rw > 0 ) { n, *buf = n + int64( rw ), (*buf)[rw:] } if ( err != nil ) { return n, err } } return ( 0 ), io.EOF } func ( buf *memWriter ) Write( b []byte ) ( n int, err error ) { *buf = append( *buf, b...) return len( b ), nil } func ( buf *memWriter ) WriteByte( c byte ) error { *buf = append( *buf, c ) return ( nil ) } func ( buf *memWriter ) WriteRune( r rune ) error { if ( r < utf8.RuneSelf ) { return buf.WriteByte( byte( r )) } b := *buf n := len( b ) if ( n + utf8.UTFMax ) > cap( b ) { b = make( []byte, ( n + utf8.UTFMax )) copy( b, *buf ) } w := utf8.EncodeRune( b[ n:( n + utf8.UTFMax )], r ) *buf = b[ :( n + w )] return nil } func ( buf *memWriter ) WriteString( s string ) ( n int, err error ) { *buf = append( *buf, s...) return len( s ), nil } // func (buf *memWriter) ReadFrom(r io.Reader) (n int64, err error) { // // NOTE: indefinite allocation! Try to use io.WriterTo interface! // }<|fim▁end|>
<|file_name|>dockerplugin.pb.go<|end_file_name|><|fim▁begin|>// Code generated by protoc-gen-go. // source: dockerplugin.proto // DO NOT EDIT! package dockerplugin import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import _ "github.com/gengo/grpc-gateway/third_party/googleapis/google/api" import google_protobuf1 "go.pedge.io/pb/go/google/protobuf" import ( context "golang.org/x/net/context" grpc "google.golang.org/grpc" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // ActivateResponse is a response for the docker plugin API activate function call. type ActivateResponse struct { Implements []string `protobuf:"bytes,1,rep,name=implements" json:"implements,omitempty"` } func (m *ActivateResponse) Reset() { *m = ActivateResponse{} }<|fim▁hole|>func (m *ActivateResponse) String() string { return proto.CompactTextString(m) } func (*ActivateResponse) ProtoMessage() {} func (*ActivateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func init() { proto.RegisterType((*ActivateResponse)(nil), "dockerplugin.ActivateResponse") } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion3 // Client API for API service type APIClient interface { // Activate is the activate function call for the docker plugin API. Activate(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ActivateResponse, error) } type aPIClient struct { cc *grpc.ClientConn } func NewAPIClient(cc *grpc.ClientConn) APIClient { return &aPIClient{cc} } func (c *aPIClient) Activate(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ActivateResponse, error) { out := new(ActivateResponse) err := grpc.Invoke(ctx, "/dockerplugin.API/Activate", in, out, c.cc, opts...) if err != nil { return nil, err } return out, nil } // Server API for API service type APIServer interface { // Activate is the activate function call for the docker plugin API. Activate(context.Context, *google_protobuf1.Empty) (*ActivateResponse, error) } func RegisterAPIServer(s *grpc.Server, srv APIServer) { s.RegisterService(&_API_serviceDesc, srv) } func _API_Activate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(google_protobuf1.Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(APIServer).Activate(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/dockerplugin.API/Activate", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(APIServer).Activate(ctx, req.(*google_protobuf1.Empty)) } return interceptor(ctx, in, info, handler) } var _API_serviceDesc = grpc.ServiceDesc{ ServiceName: "dockerplugin.API", HandlerType: (*APIServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "Activate", Handler: _API_Activate_Handler, }, }, Streams: []grpc.StreamDesc{}, Metadata: fileDescriptor0, } func init() { proto.RegisterFile("dockerplugin.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 188 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0x2a, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x41, 0x16, 0x93, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, 0xa8, 0x95, 0x92, 0x86, 0xca, 0x82, 0x79, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x10, 0x49, 0x25, 0x23, 0x2e, 0x01, 0xc7, 0xe4, 0x92, 0xcc, 0xb2, 0xc4, 0x92, 0xd4, 0xa0, 0xd4, 0xe2, 0x02, 0xa0, 0xae, 0x54, 0x21, 0x39, 0x2e, 0xae, 0xcc, 0xdc, 0x82, 0x9c, 0xd4, 0xdc, 0xd4, 0xbc, 0x92, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d, 0xce, 0x20, 0x24, 0x11, 0xa3, 0x34, 0x2e, 0x66, 0xc7, 0x00, 0x4f, 0xa1, 0x78, 0x2e, 0x0e, 0x98, 0x56, 0x21, 0x31, 0x3d, 0x88, 0x25, 0x7a, 0x30, 0x4b, 0xf4, 0x5c, 0x41, 0x96, 0x48, 0xc9, 0xe9, 0xa1, 0x38, 0x1e, 0xdd, 0x2a, 0x25, 0xe9, 0xa6, 0xcb, 0x4f, 0x26, 0x33, 0x89, 0x2a, 0x09, 0xe8, 0x07, 0xa0, 0xaa, 0xb0, 0x62, 0xd4, 0x4a, 0x62, 0x03, 0x1b, 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x13, 0x35, 0x82, 0x01, 0x01, 0x00, 0x00, }<|fim▁end|>
<|file_name|>company-structure.java<|end_file_name|><|fim▁begin|>package test; /** scala> map res6: java.util.HashMap[String,java.util.List[String]] = {AAA=[BBB, CCC, EEE], CCC=[DDD]} scala> test.Test.printCompany(map) -AAA -BBB -CCC -DDD -EEE */ import java.util.HashMap; import java.util.HashSet; import java.util.List; public class Test { public static void printCompany(HashMap<String, List<String>> graph) { if (graph.size() == 0) return;<|fim▁hole|> } for (String v : graph.keySet()) { for (String l : graph.get(v)) { roots.remove(l); } } for (String v : roots) { printCompany(graph, v, 0); } } private static void printCompany(HashMap<String, List<String>> graph, String vertex, int depth) { printVertex(vertex, depth); if (graph.containsKey(vertex)) { for (String v : graph.get(vertex)) { printCompany(graph, v, depth + 1); } } } private static void printVertex(String vertex, int depth) { StringBuilder sb = new StringBuilder(); for (int i = 0; i < depth; i++) { sb.append(" "); } sb.append("-"); sb.append(vertex); System.out.println(sb.toString()); } }<|fim▁end|>
HashSet<String> roots = new HashSet<String>(); for (String v : graph.keySet()) { roots.add(v);
<|file_name|>eap.py<|end_file_name|><|fim▁begin|># Copyright (c) 2003-2013 CORE Security Technologies # # This software is provided under under a slightly modified version # of the Apache Software License. See the accompanying LICENSE file # for more information. # # $Id$ # # Description: # EAP packets # # Author: # Aureliano Calvo from impacket.helper import ProtocolPacket, Byte, Word, Long, ThreeBytesBigEndian DOT1X_AUTHENTICATION = 0x888E class EAPExpanded(ProtocolPacket): """EAP expanded data according to RFC 3748, section 5.7""" WFA_SMI = 0x00372a SIMPLE_CONFIG = 0x00000001 header_size = 7 tail_size = 0 vendor_id = ThreeBytesBigEndian(0) vendor_type = Long(3, ">") class EAPR(ProtocolPacket): """It represents a request or a response in EAP (codes 1 and 2)""" IDENTITY = 0x01 EXPANDED = 0xfe header_size = 1 tail_size = 0 type = Byte(0) class EAP(ProtocolPacket): REQUEST = 0x01 RESPONSE = 0x02 SUCCESS = 0x03 FAILURE = 0x04 header_size = 4 tail_size = 0 code = Byte(0) identifier = Byte(1) length = Word(2, ">") class EAPOL(ProtocolPacket): EAP_PACKET = 0x00<|fim▁hole|> EAPOL_LOGOFF = 0x02 EAPOL_KEY = 0x03 EAPOL_ENCAPSULATED_ASF_ALERT = 0x04 DOT1X_VERSION = 0x01 header_size = 4 tail_size = 0 version = Byte(0) packet_type = Byte(1) body_length = Word(2, ">")<|fim▁end|>
EAPOL_START = 0x01
<|file_name|>model.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- class Item(object): def __init__(self, uid, name, time): self._uid = uid self._name = name self._time = time @property def name(self): return self._name @property def uid(self): return self._uid def display(self, size): delta = size - len(self._name) - len(self._time) -1 if delta < 0: return self._name[:size-len(self._time)-1] + ' ' + self._time else: return self._name + ' '*(delta+1) + self._time class ItemList(object): def __init__(self): self.clear() def clear(self): self._items = [] self._offset = 0 self._position = 0 self._selected = [] def add(self, item): self._items.append(item) def go_up(self): self._position -= 1 self._position %= len(self._items) def go_down(self): self._position += 1 self._position %= len(self._items) def go_top(self): self._position = 0 def go_bottom(self): self._position = len(self._items)-1 def is_empty(self): return len(self._items) == 0 def get_current_uid(self): return self._items[self._position].uid def _compute_offset(self, max_len): if self._position < self._offset: self._offset = self._position elif self._position-self._offset > max_len-1: self._offset = self._position-max_len+1 def visible_items(self, max_len): self._compute_offset(max_len) return self._items[self._offset:self._offset+max_len] def select(self): if self._position in self._selected: self._selected.remove(self._position) else: self._selected.append(self._position) def unselect_all(self): self._selected = [] def has_selection(self): return len(self._selected) > 0 def selected_items(self): for i in self._selected: yield self._items[i] def position_item(self): return self._items[self._position] def is_selected(self, i, offset=True): if offset: i += self._offset return i in self._selected def is_position(self, i, offset=True): if offset: i += self._offset return i == self._position class Playlist(object): def __init__(self, space=1): self._space = space self.clear() def clear(self): self._list = [] self._iplay = 0 self._offset = 0 def add(self, item):<|fim▁hole|> def current_uid(self): return self._list[self._iplay].uid def next(self, step=1, secure=True): self._iplay += step if secure and self._iplay > len(self._list): self._iplay = len(self._list) def previous(self, step=1, secure=True): self._iplay -= step if secure and self._iplay < 0: self._iplay = 0 def _compute_offset(self, max_len): if self._iplay-self._space < self._offset: self._offset = max(0, self._iplay-self._space) elif self._iplay - self._offset > max_len-self._space-1: self._offset = min(len(self._list)-max_len, self._iplay-max_len+self._space+1) def visible_items(self, max_len): self._compute_offset(max_len) return self._list[self._offset:self._offset+max_len] def is_current(self, i, offset=True): if offset: i += self._offset return i == self._iplay<|fim▁end|>
self._list.append(item) def is_over(self): return self._iplay >= len(self._list)
<|file_name|>build.ts<|end_file_name|><|fim▁begin|>/// <reference path="../custom_typings/ambient.d.ts" /> import * as gulp from "gulp";<|fim▁hole|> /* Build task for deployment */ gulp.task("build:prod", done => build(["typescript:prod", "sass", "html"], done)); /* Build task for dev environment */ gulp.task("build:dev", done => build(["typescript:dev:bundle", "typescript:dev", "sass", "html"], done)); function build(tasks: string[], done: any) { runSequence("clean", tasks, done) }<|fim▁end|>
import * as runSequence from "run-sequence";
<|file_name|>d3d12.rs<|end_file_name|><|fim▁begin|>// Copyright © 2015, Dmitry Roschin // Licensed under the MIT License <LICENSE.md> pub const D3D12_16BIT_INDEX_STRIP_CUT_VALUE: ::UINT = 0xffff; pub const D3D12_32BIT_INDEX_STRIP_CUT_VALUE: ::UINT = 0xffffffff; pub const D3D12_8BIT_INDEX_STRIP_CUT_VALUE: ::UINT = 0xff; pub const D3D12_ANISOTROPIC_FILTERING_BIT: ::UINT = 0x40; pub const D3D12_APPEND_ALIGNED_ELEMENT: ::UINT = 0xffffffff; pub const D3D12_ARRAY_AXIS_ADDRESS_RANGE_BIT_COUNT: ::UINT = 9; pub const D3D12_CLIP_OR_CULL_DISTANCE_COUNT: ::UINT = 8; pub const D3D12_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT: ::UINT = 2; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_API_SLOT_COUNT: ::UINT = 14; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_COMPONENTS: ::UINT = 4; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_HW_SLOT_COUNT: ::UINT = 15; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_PARTIAL_UPDATE_EXTENTS_BYTE_ALIGNMENT: ::UINT = 16; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_REGISTER_COUNT: ::UINT = 15; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_COMMONSHADER_CONSTANT_BUFFER_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_COMMONSHADER_FLOWCONTROL_NESTING_LIMIT: ::UINT = 64; pub const D3D12_COMMONSHADER_IMMEDIATE_CONSTANT_BUFFER_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_COMMONSHADER_IMMEDIATE_CONSTANT_BUFFER_REGISTER_COUNT: ::UINT = 1; pub const D3D12_COMMONSHADER_IMMEDIATE_CONSTANT_BUFFER_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_COMMONSHADER_IMMEDIATE_CONSTANT_BUFFER_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_COMMONSHADER_IMMEDIATE_VALUE_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_COMMONSHADER_INPUT_RESOURCE_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT: ::UINT = 128; pub const D3D12_COMMONSHADER_INPUT_RESOURCE_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_COMMONSHADER_INPUT_RESOURCE_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_COMMONSHADER_INPUT_RESOURCE_SLOT_COUNT: ::UINT = 128; pub const D3D12_COMMONSHADER_SAMPLER_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_COMMONSHADER_SAMPLER_REGISTER_COUNT: ::UINT = 16; pub const D3D12_COMMONSHADER_SAMPLER_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_COMMONSHADER_SAMPLER_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_COMMONSHADER_SAMPLER_SLOT_COUNT: ::UINT = 16; pub const D3D12_COMMONSHADER_SUBROUTINE_NESTING_LIMIT: ::UINT = 32; pub const D3D12_COMMONSHADER_TEMP_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_COMMONSHADER_TEMP_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_COMMONSHADER_TEMP_REGISTER_COUNT: ::UINT = 4096; pub const D3D12_COMMONSHADER_TEMP_REGISTER_READS_PER_INST: ::UINT = 3; pub const D3D12_COMMONSHADER_TEMP_REGISTER_READ_PORTS: ::UINT = 3; pub const D3D12_COMMONSHADER_TEXCOORD_RANGE_REDUCTION_MAX: ::UINT = 10; pub const D3D12_COMMONSHADER_TEXCOORD_RANGE_REDUCTION_MIN: ::INT = -10; pub const D3D12_COMMONSHADER_TEXEL_OFFSET_MAX_NEGATIVE: ::INT = -8; pub const D3D12_COMMONSHADER_TEXEL_OFFSET_MAX_POSITIVE: ::UINT = 7; pub const D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT: ::UINT = 256; pub const D3D12_CS_4_X_BUCKET00_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 256; pub const D3D12_CS_4_X_BUCKET00_MAX_NUM_THREADS_PER_GROUP: ::UINT = 64; pub const D3D12_CS_4_X_BUCKET01_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 240; pub const D3D12_CS_4_X_BUCKET01_MAX_NUM_THREADS_PER_GROUP: ::UINT = 68; pub const D3D12_CS_4_X_BUCKET02_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 224; pub const D3D12_CS_4_X_BUCKET02_MAX_NUM_THREADS_PER_GROUP: ::UINT = 72; pub const D3D12_CS_4_X_BUCKET03_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 208; pub const D3D12_CS_4_X_BUCKET03_MAX_NUM_THREADS_PER_GROUP: ::UINT = 76; pub const D3D12_CS_4_X_BUCKET04_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 192; pub const D3D12_CS_4_X_BUCKET04_MAX_NUM_THREADS_PER_GROUP: ::UINT = 84; pub const D3D12_CS_4_X_BUCKET05_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 176; pub const D3D12_CS_4_X_BUCKET05_MAX_NUM_THREADS_PER_GROUP: ::UINT = 92; pub const D3D12_CS_4_X_BUCKET06_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 160; pub const D3D12_CS_4_X_BUCKET06_MAX_NUM_THREADS_PER_GROUP: ::UINT = 100; pub const D3D12_CS_4_X_BUCKET07_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 144; pub const D3D12_CS_4_X_BUCKET07_MAX_NUM_THREADS_PER_GROUP: ::UINT = 112; pub const D3D12_CS_4_X_BUCKET08_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 128; pub const D3D12_CS_4_X_BUCKET08_MAX_NUM_THREADS_PER_GROUP: ::UINT = 128; pub const D3D12_CS_4_X_BUCKET09_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 112; pub const D3D12_CS_4_X_BUCKET09_MAX_NUM_THREADS_PER_GROUP: ::UINT = 144; pub const D3D12_CS_4_X_BUCKET10_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 96; pub const D3D12_CS_4_X_BUCKET10_MAX_NUM_THREADS_PER_GROUP: ::UINT = 168; pub const D3D12_CS_4_X_BUCKET11_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 80; pub const D3D12_CS_4_X_BUCKET11_MAX_NUM_THREADS_PER_GROUP: ::UINT = 204; pub const D3D12_CS_4_X_BUCKET12_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 64; pub const D3D12_CS_4_X_BUCKET12_MAX_NUM_THREADS_PER_GROUP: ::UINT = 256; pub const D3D12_CS_4_X_BUCKET13_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 48; pub const D3D12_CS_4_X_BUCKET13_MAX_NUM_THREADS_PER_GROUP: ::UINT = 340; pub const D3D12_CS_4_X_BUCKET14_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 32; pub const D3D12_CS_4_X_BUCKET14_MAX_NUM_THREADS_PER_GROUP: ::UINT = 512; pub const D3D12_CS_4_X_BUCKET15_MAX_BYTES_TGSM_WRITABLE_PER_THREAD: ::UINT = 16; pub const D3D12_CS_4_X_BUCKET15_MAX_NUM_THREADS_PER_GROUP: ::UINT = 768; pub const D3D12_CS_4_X_DISPATCH_MAX_THREAD_GROUPS_IN_Z_DIMENSION: ::UINT = 1; pub const D3D12_CS_4_X_RAW_UAV_BYTE_ALIGNMENT: ::UINT = 256; pub const D3D12_CS_4_X_THREAD_GROUP_MAX_THREADS_PER_GROUP: ::UINT = 768; pub const D3D12_CS_4_X_THREAD_GROUP_MAX_X: ::UINT = 768; pub const D3D12_CS_4_X_THREAD_GROUP_MAX_Y: ::UINT = 768; pub const D3D12_CS_4_X_UAV_REGISTER_COUNT: ::UINT = 1; pub const D3D12_CS_DISPATCH_MAX_THREAD_GROUPS_PER_DIMENSION: ::UINT = 65535; pub const D3D12_CS_TGSM_REGISTER_COUNT: ::UINT = 8192; pub const D3D12_CS_TGSM_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_CS_TGSM_RESOURCE_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_CS_TGSM_RESOURCE_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_CS_THREADGROUPID_REGISTER_COMPONENTS: ::UINT = 3; pub const D3D12_CS_THREADGROUPID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_CS_THREADIDINGROUPFLATTENED_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_CS_THREADIDINGROUPFLATTENED_REGISTER_COUNT: ::UINT = 1; pub const D3D12_CS_THREADIDINGROUP_REGISTER_COMPONENTS: ::UINT = 3; pub const D3D12_CS_THREADIDINGROUP_REGISTER_COUNT: ::UINT = 1; pub const D3D12_CS_THREADID_REGISTER_COMPONENTS: ::UINT = 3; pub const D3D12_CS_THREADID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_CS_THREAD_GROUP_MAX_THREADS_PER_GROUP: ::UINT = 1024; pub const D3D12_CS_THREAD_GROUP_MAX_X: ::UINT = 1024; pub const D3D12_CS_THREAD_GROUP_MAX_Y: ::UINT = 1024; pub const D3D12_CS_THREAD_GROUP_MAX_Z: ::UINT = 64; pub const D3D12_CS_THREAD_GROUP_MIN_X: ::UINT = 1; pub const D3D12_CS_THREAD_GROUP_MIN_Y: ::UINT = 1; pub const D3D12_CS_THREAD_GROUP_MIN_Z: ::UINT = 1; pub const D3D12_CS_THREAD_LOCAL_TEMP_REGISTER_POOL: ::UINT = 16384; pub const D3D12_DEFAULT_BLEND_FACTOR_ALPHA: ::FLOAT = 1.0; pub const D3D12_DEFAULT_BLEND_FACTOR_BLUE: ::FLOAT = 1.0; pub const D3D12_DEFAULT_BLEND_FACTOR_GREEN: ::FLOAT = 1.0; pub const D3D12_DEFAULT_BLEND_FACTOR_RED: ::FLOAT = 1.0; pub const D3D12_DEFAULT_BORDER_COLOR_COMPONENT: ::FLOAT = 0.0; pub const D3D12_DEFAULT_DEPTH_BIAS: ::UINT = 0; pub const D3D12_DEFAULT_DEPTH_BIAS_CLAMP: ::FLOAT = 0.0; pub const D3D12_DEFAULT_MAX_ANISOTROPY: ::UINT = 16; pub const D3D12_DEFAULT_MIP_LOD_BIAS: ::FLOAT = 0.0; pub const D3D12_DEFAULT_MSAA_RESOURCE_PLACEMENT_ALIGNMENT: ::UINT = 4194304; pub const D3D12_DEFAULT_RENDER_TARGET_ARRAY_INDEX: ::UINT = 0; pub const D3D12_DEFAULT_RESOURCE_PLACEMENT_ALIGNMENT: ::UINT = 65536; pub const D3D12_DEFAULT_SAMPLE_MASK: ::UINT = 0xffffffff; pub const D3D12_DEFAULT_SCISSOR_ENDX: ::UINT = 0; pub const D3D12_DEFAULT_SCISSOR_ENDY: ::UINT = 0; pub const D3D12_DEFAULT_SCISSOR_STARTX: ::UINT = 0; pub const D3D12_DEFAULT_SCISSOR_STARTY: ::UINT = 0; pub const D3D12_DEFAULT_SLOPE_SCALED_DEPTH_BIAS: ::FLOAT = 0.0; pub const D3D12_DEFAULT_STENCIL_READ_MASK: ::UINT = 0xff; pub const D3D12_DEFAULT_STENCIL_REFERENCE: ::UINT = 0; pub const D3D12_DEFAULT_STENCIL_WRITE_MASK: ::UINT = 0xff; pub const D3D12_DEFAULT_VIEWPORT_AND_SCISSORRECT_INDEX: ::UINT = 0; pub const D3D12_DEFAULT_VIEWPORT_HEIGHT: ::UINT = 0; pub const D3D12_DEFAULT_VIEWPORT_MAX_DEPTH: ::FLOAT = 0.0; pub const D3D12_DEFAULT_VIEWPORT_MIN_DEPTH: ::FLOAT = 0.0; pub const D3D12_DEFAULT_VIEWPORT_TOPLEFTX: ::UINT = 0; pub const D3D12_DEFAULT_VIEWPORT_TOPLEFTY: ::UINT = 0; pub const D3D12_DEFAULT_VIEWPORT_WIDTH: ::UINT = 0; pub const D3D12_DESCRIPTOR_RANGE_OFFSET_APPEND: ::UINT = 0xffffffff; pub const D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_END: ::UINT = 0xfffffff7; pub const D3D12_DRIVER_RESERVED_REGISTER_SPACE_VALUES_START: ::UINT = 0xfffffff0; pub const D3D12_DS_INPUT_CONTROL_POINTS_MAX_TOTAL_SCALARS: ::UINT = 3968; pub const D3D12_DS_INPUT_CONTROL_POINT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_DS_INPUT_CONTROL_POINT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_CONTROL_POINT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_CONTROL_POINT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_DS_INPUT_CONTROL_POINT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_DS_INPUT_DOMAIN_POINT_REGISTER_COMPONENTS: ::UINT = 3; pub const D3D12_DS_INPUT_DOMAIN_POINT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_DOMAIN_POINT_REGISTER_COUNT: ::UINT = 1; pub const D3D12_DS_INPUT_DOMAIN_POINT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_DS_INPUT_DOMAIN_POINT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_DS_INPUT_PATCH_CONSTANT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_DS_INPUT_PATCH_CONSTANT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_PATCH_CONSTANT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_PATCH_CONSTANT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_DS_INPUT_PATCH_CONSTANT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_DS_INPUT_PRIMITIVE_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_DS_INPUT_PRIMITIVE_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_DS_INPUT_PRIMITIVE_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_DS_INPUT_PRIMITIVE_ID_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_DS_INPUT_PRIMITIVE_ID_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_DS_OUTPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_DS_OUTPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_DS_OUTPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_FILTER_REDUCTION_TYPE_MASK: ::UINT = 0x3; pub const D3D12_FILTER_REDUCTION_TYPE_SHIFT: ::UINT = 7; pub const D3D12_FILTER_TYPE_MASK: ::UINT = 0x3; pub const D3D12_FLOAT16_FUSED_TOLERANCE_IN_ULP: ::DOUBLE = 0.6; pub const D3D12_FLOAT32_MAX: ::FLOAT = 3.402823466e+38; pub const D3D12_FLOAT32_TO_INTEGER_TOLERANCE_IN_ULP: ::FLOAT = 0.6; pub const D3D12_FLOAT_TO_SRGB_EXPONENT_DENOMINATOR: ::FLOAT = 2.4; pub const D3D12_FLOAT_TO_SRGB_EXPONENT_NUMERATOR: ::FLOAT = 1.0; pub const D3D12_FLOAT_TO_SRGB_OFFSET: ::FLOAT = 0.055; pub const D3D12_FLOAT_TO_SRGB_SCALE_1: ::FLOAT = 12.92; pub const D3D12_FLOAT_TO_SRGB_SCALE_2: ::FLOAT = 1.055; pub const D3D12_FLOAT_TO_SRGB_THRESHOLD: ::FLOAT = 0.0031308; pub const D3D12_FTOI_INSTRUCTION_MAX_INPUT: ::FLOAT = 2147483647.999; pub const D3D12_FTOI_INSTRUCTION_MIN_INPUT: ::FLOAT = -2147483648.999; pub const D3D12_FTOU_INSTRUCTION_MAX_INPUT: ::FLOAT = 4294967295.999; pub const D3D12_FTOU_INSTRUCTION_MIN_INPUT: ::FLOAT = 0.0; pub const D3D12_GS_INPUT_INSTANCE_ID_READS_PER_INST: ::UINT = 2; pub const D3D12_GS_INPUT_INSTANCE_ID_READ_PORTS: ::UINT = 1; pub const D3D12_GS_INPUT_INSTANCE_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_GS_INPUT_INSTANCE_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_GS_INPUT_INSTANCE_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_GS_INPUT_PRIM_CONST_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_GS_INPUT_PRIM_CONST_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_GS_INPUT_PRIM_CONST_REGISTER_COUNT: ::UINT = 1; pub const D3D12_GS_INPUT_PRIM_CONST_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_GS_INPUT_PRIM_CONST_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_GS_INPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_GS_INPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_GS_INPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_GS_INPUT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_GS_INPUT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_GS_INPUT_REGISTER_VERTICES: ::UINT = 32; pub const D3D12_GS_MAX_INSTANCE_COUNT: ::UINT = 32; pub const D3D12_GS_MAX_OUTPUT_VERTEX_COUNT_ACROSS_INSTANCES: ::UINT = 1024; pub const D3D12_GS_OUTPUT_ELEMENTS: ::UINT = 32; pub const D3D12_GS_OUTPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_GS_OUTPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_GS_OUTPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_HS_CONTROL_POINT_PHASE_INPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_HS_CONTROL_POINT_PHASE_OUTPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_HS_CONTROL_POINT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_HS_CONTROL_POINT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_CONTROL_POINT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_CONTROL_POINT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_FORK_PHASE_INSTANCE_COUNT_UPPER_BOUND: ::UINT = 0xffffffff; pub const D3D12_HS_INPUT_FORK_INSTANCE_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_HS_INPUT_FORK_INSTANCE_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_INPUT_FORK_INSTANCE_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_HS_INPUT_FORK_INSTANCE_ID_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_INPUT_FORK_INSTANCE_ID_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_INPUT_JOIN_INSTANCE_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_HS_INPUT_JOIN_INSTANCE_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_INPUT_JOIN_INSTANCE_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_HS_INPUT_JOIN_INSTANCE_ID_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_INPUT_JOIN_INSTANCE_ID_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_INPUT_PRIMITIVE_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_HS_INPUT_PRIMITIVE_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_INPUT_PRIMITIVE_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_HS_INPUT_PRIMITIVE_ID_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_INPUT_PRIMITIVE_ID_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_JOIN_PHASE_INSTANCE_COUNT_UPPER_BOUND: ::UINT = 0xffffffff; pub const D3D12_HS_MAXTESSFACTOR_LOWER_BOUND: ::FLOAT = 1.0; pub const D3D12_HS_MAXTESSFACTOR_UPPER_BOUND: ::FLOAT = 64.0; pub const D3D12_HS_OUTPUT_CONTROL_POINTS_MAX_TOTAL_SCALARS: ::UINT = 3968; pub const D3D12_HS_OUTPUT_CONTROL_POINT_ID_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_HS_OUTPUT_CONTROL_POINT_ID_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_OUTPUT_CONTROL_POINT_ID_REGISTER_COUNT: ::UINT = 1; pub const D3D12_HS_OUTPUT_CONTROL_POINT_ID_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_OUTPUT_CONTROL_POINT_ID_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_HS_OUTPUT_PATCH_CONSTANT_REGISTER_SCALAR_COMPONENTS: ::UINT = 128; pub const D3D12_IA_DEFAULT_INDEX_BUFFER_OFFSET_IN_BYTES: ::UINT = 0; pub const D3D12_IA_DEFAULT_PRIMITIVE_TOPOLOGY: ::UINT = 0; pub const D3D12_IA_DEFAULT_VERTEX_BUFFER_OFFSET_IN_BYTES: ::UINT = 0; pub const D3D12_IA_INDEX_INPUT_RESOURCE_SLOT_COUNT: ::UINT = 1; pub const D3D12_IA_INSTANCE_ID_BIT_COUNT: ::UINT = 32; pub const D3D12_IA_INTEGER_ARITHMETIC_BIT_COUNT: ::UINT = 32; pub const D3D12_IA_PATCH_MAX_CONTROL_POINT_COUNT: ::UINT = 32; pub const D3D12_IA_PRIMITIVE_ID_BIT_COUNT: ::UINT = 32; pub const D3D12_IA_VERTEX_ID_BIT_COUNT: ::UINT = 32; pub const D3D12_IA_VERTEX_INPUT_RESOURCE_SLOT_COUNT: ::UINT = 32; pub const D3D12_IA_VERTEX_INPUT_STRUCTURE_ELEMENTS_COMPONENTS: ::UINT = 128; pub const D3D12_IA_VERTEX_INPUT_STRUCTURE_ELEMENT_COUNT: ::UINT = 32; pub const D3D12_INTEGER_DIVIDE_BY_ZERO_QUOTIENT: ::UINT = 0xffffffff; pub const D3D12_INTEGER_DIVIDE_BY_ZERO_REMAINDER: ::UINT = 0xffffffff; pub const D3D12_KEEP_RENDER_TARGETS_AND_DEPTH_STENCIL: ::UINT = 0xffffffff; pub const D3D12_KEEP_UNORDERED_ACCESS_VIEWS: ::UINT = 0xffffffff; pub const D3D12_LINEAR_GAMMA: ::FLOAT = 1.0; pub const D3D12_MAG_FILTER_SHIFT: ::UINT = 2; pub const D3D12_MAJOR_VERSION: ::UINT = 12; pub const D3D12_MAX_BORDER_COLOR_COMPONENT: ::FLOAT = 1.0; pub const D3D12_MAX_DEPTH: ::FLOAT = 1.0; pub const D3D12_MAX_LIVE_STATIC_SAMPLERS: ::UINT = 2032; pub const D3D12_MAX_MAXANISOTROPY: ::UINT = 16; pub const D3D12_MAX_MULTISAMPLE_SAMPLE_COUNT: ::UINT = 32; pub const D3D12_MAX_POSITION_VALUE: ::FLOAT = 3.402823466e+34; pub const D3D12_MAX_ROOT_COST: ::UINT = 64; pub const D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_1: ::UINT = 1000000; pub const D3D12_MAX_SHADER_VISIBLE_DESCRIPTOR_HEAP_SIZE_TIER_2: ::UINT = 1000000; pub const D3D12_MAX_SHADER_VISIBLE_SAMPLER_HEAP_SIZE: ::UINT = 2048; pub const D3D12_MAX_TEXTURE_DIMENSION_2_TO_EXP: ::UINT = 17; pub const D3D12_MINOR_VERSION: ::UINT = 0; pub const D3D12_MIN_BORDER_COLOR_COMPONENT: ::FLOAT = 0.0; pub const D3D12_MIN_DEPTH: ::FLOAT = 0.0; pub const D3D12_MIN_FILTER_SHIFT: ::UINT = 4; pub const D3D12_MIN_MAXANISOTROPY: ::UINT = 0; pub const D3D12_MIP_FILTER_SHIFT: ::UINT = 0; pub const D3D12_MIP_LOD_BIAS_MAX: ::FLOAT = 15.99; pub const D3D12_MIP_LOD_BIAS_MIN: ::FLOAT = -16.0; pub const D3D12_MIP_LOD_FRACTIONAL_BIT_COUNT: ::UINT = 8; pub const D3D12_MIP_LOD_RANGE_BIT_COUNT: ::UINT = 8; pub const D3D12_MULTISAMPLE_ANTIALIAS_LINE_WIDTH: ::FLOAT = 1.4; pub const D3D12_NONSAMPLE_FETCH_OUT_OF_RANGE_ACCESS_RESULT: ::UINT = 0; pub const D3D12_OS_RESERVED_REGISTER_SPACE_VALUES_END: ::UINT = 0xffffffff; pub const D3D12_OS_RESERVED_REGISTER_SPACE_VALUES_START: ::UINT = 0xfffffff8; pub const D3D12_PACKED_TILE: ::UINT = 0xffffffff; pub const D3D12_PIXEL_ADDRESS_RANGE_BIT_COUNT: ::UINT = 15; pub const D3D12_PRE_SCISSOR_PIXEL_ADDRESS_RANGE_BIT_COUNT: ::UINT = 16; pub const D3D12_PS_CS_UAV_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_PS_CS_UAV_REGISTER_COUNT: ::UINT = 8; pub const D3D12_PS_CS_UAV_REGISTER_READS_PER_INST: ::UINT = 1; pub const D3D12_PS_CS_UAV_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_PS_FRONTFACING_DEFAULT_VALUE: ::UINT = 0xffffffff; pub const D3D12_PS_FRONTFACING_FALSE_VALUE: ::UINT = 0; pub const D3D12_PS_FRONTFACING_TRUE_VALUE: ::UINT = 0xffffffff; pub const D3D12_PS_INPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_PS_INPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_PS_INPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_PS_INPUT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_PS_INPUT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_PS_LEGACY_PIXEL_CENTER_FRACTIONAL_COMPONENT: ::FLOAT = 0.0; pub const D3D12_PS_OUTPUT_DEPTH_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_PS_OUTPUT_DEPTH_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_PS_OUTPUT_DEPTH_REGISTER_COUNT: ::UINT = 1; pub const D3D12_PS_OUTPUT_MASK_REGISTER_COMPONENTS: ::UINT = 1; pub const D3D12_PS_OUTPUT_MASK_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_PS_OUTPUT_MASK_REGISTER_COUNT: ::UINT = 1; pub const D3D12_PS_OUTPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_PS_OUTPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_PS_OUTPUT_REGISTER_COUNT: ::UINT = 8; pub const D3D12_PS_PIXEL_CENTER_FRACTIONAL_COMPONENT: ::FLOAT = 0.5; pub const D3D12_RAW_UAV_SRV_BYTE_ALIGNMENT: ::UINT = 16; pub const D3D12_REQ_BLEND_OBJECT_COUNT_PER_DEVICE: ::UINT = 4096; pub const D3D12_REQ_BUFFER_RESOURCE_TEXEL_COUNT_2_TO_EXP: ::UINT = 27; pub const D3D12_REQ_CONSTANT_BUFFER_ELEMENT_COUNT: ::UINT = 4096; pub const D3D12_REQ_DEPTH_STENCIL_OBJECT_COUNT_PER_DEVICE: ::UINT = 4096; pub const D3D12_REQ_DRAWINDEXED_INDEX_COUNT_2_TO_EXP: ::UINT = 32; pub const D3D12_REQ_DRAW_VERTEX_COUNT_2_TO_EXP: ::UINT = 32; pub const D3D12_REQ_FILTERING_HW_ADDRESSABLE_RESOURCE_DIMENSION: ::UINT = 16384; pub const D3D12_REQ_GS_INVOCATION_32BIT_OUTPUT_COMPONENT_LIMIT: ::UINT = 1024; pub const D3D12_REQ_IMMEDIATE_CONSTANT_BUFFER_ELEMENT_COUNT: ::UINT = 4096; pub const D3D12_REQ_MAXANISOTROPY: ::UINT = 16; pub const D3D12_REQ_MIP_LEVELS: ::UINT = 15; pub const D3D12_REQ_MULTI_ELEMENT_STRUCTURE_SIZE_IN_BYTES: ::UINT = 2048; pub const D3D12_REQ_RASTERIZER_OBJECT_COUNT_PER_DEVICE: ::UINT = 4096; pub const D3D12_REQ_RENDER_TO_BUFFER_WINDOW_WIDTH: ::UINT = 16384; pub const D3D12_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_A_TERM: ::UINT = 128; pub const D3D12_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_B_TERM: ::FLOAT = 0.25; pub const D3D12_REQ_RESOURCE_SIZE_IN_MEGABYTES_EXPRESSION_C_TERM: ::UINT = 2048; pub const D3D12_REQ_RESOURCE_VIEW_COUNT_PER_DEVICE_2_TO_EXP: ::UINT = 20; pub const D3D12_REQ_SAMPLER_OBJECT_COUNT_PER_DEVICE: ::UINT = 4096; pub const D3D12_REQ_SUBRESOURCES: ::UINT = 30720; pub const D3D12_REQ_TEXTURE1D_ARRAY_AXIS_DIMENSION: ::UINT = 2048; pub const D3D12_REQ_TEXTURE1D_U_DIMENSION: ::UINT = 16384; pub const D3D12_REQ_TEXTURE2D_ARRAY_AXIS_DIMENSION: ::UINT = 2048; pub const D3D12_REQ_TEXTURE2D_U_OR_V_DIMENSION: ::UINT = 16384; pub const D3D12_REQ_TEXTURE3D_U_V_OR_W_DIMENSION: ::UINT = 2048; pub const D3D12_REQ_TEXTURECUBE_DIMENSION: ::UINT = 16384; pub const D3D12_RESINFO_INSTRUCTION_MISSING_COMPONENT_RETVAL: ::UINT = 0; pub const D3D12_RESOURCE_BARRIER_ALL_SUBRESOURCES: ::UINT = 0xffffffff; pub const D3D12_SHADER_COMPONENT_MAPPING_MASK: ::UINT = 0x7; pub const D3D12_SHADER_COMPONENT_MAPPING_SHIFT: ::UINT = 3; pub const D3D12_SHADER_MAJOR_VERSION: ::UINT = 5; pub const D3D12_SHADER_MAX_INSTANCES: ::UINT = 65535; pub const D3D12_SHADER_MAX_INTERFACES: ::UINT = 253; pub const D3D12_SHADER_MAX_INTERFACE_CALL_SITES: ::UINT = 4096; pub const D3D12_SHADER_MAX_TYPES: ::UINT = 65535; pub const D3D12_SHADER_MINOR_VERSION: ::UINT = 1; pub const D3D12_SHIFT_INSTRUCTION_PAD_VALUE: ::UINT = 0; pub const D3D12_SHIFT_INSTRUCTION_SHIFT_VALUE_BIT_COUNT: ::UINT = 5; pub const D3D12_SIMULTANEOUS_RENDER_TARGET_COUNT: ::UINT = 8; pub const D3D12_SMALL_MSAA_RESOURCE_PLACEMENT_ALIGNMENT: ::UINT = 65536; pub const D3D12_SMALL_RESOURCE_PLACEMENT_ALIGNMENT: ::UINT = 4096; pub const D3D12_SO_BUFFER_MAX_STRIDE_IN_BYTES: ::UINT = 2048; pub const D3D12_SO_BUFFER_MAX_WRITE_WINDOW_IN_BYTES: ::UINT = 512; pub const D3D12_SO_BUFFER_SLOT_COUNT: ::UINT = 4; pub const D3D12_SO_DDI_REGISTER_INDEX_DENOTING_GAP: ::UINT = 0xffffffff; pub const D3D12_SO_NO_RASTERIZED_STREAM: ::UINT = 0xffffffff; pub const D3D12_SO_OUTPUT_COMPONENT_COUNT: ::UINT = 128; pub const D3D12_SO_STREAM_COUNT: ::UINT = 4; pub const D3D12_SPEC_DATE_DAY: ::UINT = 14; pub const D3D12_SPEC_DATE_MONTH: ::UINT = 11; pub const D3D12_SPEC_DATE_YEAR: ::UINT = 2014; pub const D3D12_SPEC_VERSION: ::DOUBLE = 1.16; pub const D3D12_SRGB_GAMMA: ::FLOAT = 2.2; pub const D3D12_SRGB_TO_FLOAT_DENOMINATOR_1: ::FLOAT = 12.92; pub const D3D12_SRGB_TO_FLOAT_DENOMINATOR_2: ::FLOAT = 1.055; pub const D3D12_SRGB_TO_FLOAT_EXPONENT: ::FLOAT = 2.4; pub const D3D12_SRGB_TO_FLOAT_OFFSET: ::FLOAT = 0.055; pub const D3D12_SRGB_TO_FLOAT_THRESHOLD: ::FLOAT = 0.04045; pub const D3D12_SRGB_TO_FLOAT_TOLERANCE_IN_ULP: ::FLOAT = 0.5; pub const D3D12_STANDARD_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_STANDARD_COMPONENT_BIT_COUNT_DOUBLED: ::UINT = 64; pub const D3D12_STANDARD_MAXIMUM_ELEMENT_ALIGNMENT_BYTE_MULTIPLE: ::UINT = 4; pub const D3D12_STANDARD_PIXEL_COMPONENT_COUNT: ::UINT = 128; pub const D3D12_STANDARD_PIXEL_ELEMENT_COUNT: ::UINT = 32; pub const D3D12_STANDARD_VECTOR_SIZE: ::UINT = 4; pub const D3D12_STANDARD_VERTEX_ELEMENT_COUNT: ::UINT = 32; pub const D3D12_STANDARD_VERTEX_TOTAL_COMPONENT_COUNT: ::UINT = 64; pub const D3D12_SUBPIXEL_FRACTIONAL_BIT_COUNT: ::UINT = 8; pub const D3D12_SUBTEXEL_FRACTIONAL_BIT_COUNT: ::UINT = 8; pub const D3D12_SYSTEM_RESERVED_REGISTER_SPACE_VALUES_END: ::UINT = 0xffffffff; pub const D3D12_SYSTEM_RESERVED_REGISTER_SPACE_VALUES_START: ::UINT = 0xfffffff0; pub const D3D12_TESSELLATOR_MAX_EVEN_TESSELLATION_FACTOR: ::UINT = 64; pub const D3D12_TESSELLATOR_MAX_ISOLINE_DENSITY_TESSELLATION_FACTOR: ::UINT = 64; pub const D3D12_TESSELLATOR_MAX_ODD_TESSELLATION_FACTOR: ::UINT = 63; pub const D3D12_TESSELLATOR_MAX_TESSELLATION_FACTOR: ::UINT = 64; pub const D3D12_TESSELLATOR_MIN_EVEN_TESSELLATION_FACTOR: ::UINT = 2; pub const D3D12_TESSELLATOR_MIN_ISOLINE_DENSITY_TESSELLATION_FACTOR: ::UINT = 1; pub const D3D12_TESSELLATOR_MIN_ODD_TESSELLATION_FACTOR: ::UINT = 1; pub const D3D12_TEXEL_ADDRESS_RANGE_BIT_COUNT: ::UINT = 16; pub const D3D12_TEXTURE_DATA_PITCH_ALIGNMENT: ::UINT = 256; pub const D3D12_TEXTURE_DATA_PLACEMENT_ALIGNMENT: ::UINT = 512; pub const D3D12_TILED_RESOURCE_TILE_SIZE_IN_BYTES: ::UINT = 65536; pub const D3D12_UAV_COUNTER_PLACEMENT_ALIGNMENT: ::UINT = 4096; pub const D3D12_UAV_SLOT_COUNT: ::UINT = 64; pub const D3D12_UNBOUND_MEMORY_ACCESS_RESULT: ::UINT = 0; pub const D3D12_VIEWPORT_AND_SCISSORRECT_MAX_INDEX: ::UINT = 15; pub const D3D12_VIEWPORT_AND_SCISSORRECT_OBJECT_COUNT_PER_PIPELINE: ::UINT = 16; pub const D3D12_VIEWPORT_BOUNDS_MAX: ::UINT = 32767; pub const D3D12_VIEWPORT_BOUNDS_MIN: ::INT = -32768; pub const D3D12_VS_INPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_VS_INPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_VS_INPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_VS_INPUT_REGISTER_READS_PER_INST: ::UINT = 2; pub const D3D12_VS_INPUT_REGISTER_READ_PORTS: ::UINT = 1; pub const D3D12_VS_OUTPUT_REGISTER_COMPONENTS: ::UINT = 4; pub const D3D12_VS_OUTPUT_REGISTER_COMPONENT_BIT_COUNT: ::UINT = 32; pub const D3D12_VS_OUTPUT_REGISTER_COUNT: ::UINT = 32; pub const D3D12_WHQL_CONTEXT_COUNT_FOR_RESOURCE_LIMIT: ::UINT = 10; pub const D3D12_WHQL_DRAWINDEXED_INDEX_COUNT_2_TO_EXP: ::UINT = 25; pub const D3D12_WHQL_DRAW_VERTEX_COUNT_2_TO_EXP: ::UINT = 25; pub type D3D12_GPU_VIRTUAL_ADDRESS = ::UINT64; ENUM!{enum D3D12_COMMAND_LIST_TYPE { D3D12_COMMAND_LIST_TYPE_DIRECT = 0, D3D12_COMMAND_LIST_TYPE_BUNDLE = 1, D3D12_COMMAND_LIST_TYPE_COMPUTE = 2, D3D12_COMMAND_LIST_TYPE_COPY = 3, }} FLAGS!{enum D3D12_COMMAND_QUEUE_FLAGS { D3D12_COMMAND_QUEUE_FLAG_NONE = 0x0, D3D12_COMMAND_QUEUE_FLAG_DISABLE_GPU_TIMEOUT = 0x1, }} ENUM!{enum D3D12_COMMAND_QUEUE_PRIORITY { D3D12_COMMAND_QUEUE_PRIORITY_NORMAL = 0, D3D12_COMMAND_QUEUE_PRIORITY_HIGH = 100, }} STRUCT!{struct D3D12_COMMAND_QUEUE_DESC { Type: D3D12_COMMAND_LIST_TYPE, Priority: ::INT, Flags: D3D12_COMMAND_QUEUE_FLAGS, NodeMask: ::UINT, }} ENUM!{enum D3D12_PRIMITIVE_TOPOLOGY_TYPE { D3D12_PRIMITIVE_TOPOLOGY_TYPE_UNDEFINED = 0, D3D12_PRIMITIVE_TOPOLOGY_TYPE_POINT = 1, D3D12_PRIMITIVE_TOPOLOGY_TYPE_LINE = 2, D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE = 3, D3D12_PRIMITIVE_TOPOLOGY_TYPE_PATCH = 4, }} ENUM!{enum D3D12_INPUT_CLASSIFICATION { D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA = 0, D3D12_INPUT_CLASSIFICATION_PER_INSTANCE_DATA = 1, }} STRUCT!{struct D3D12_INPUT_ELEMENT_DESC { SemanticName: ::LPCSTR, SemanticIndex: ::UINT, Format: ::DXGI_FORMAT, InputSlot: ::UINT, AlignedByteOffset: ::UINT, InputSlotClass: D3D12_INPUT_CLASSIFICATION, InstanceDataStepRate: ::UINT, }} ENUM!{enum D3D12_FILL_MODE { D3D12_FILL_MODE_WIREFRAME = 2, D3D12_FILL_MODE_SOLID = 3, }} pub type D3D12_PRIMITIVE_TOPOLOGY = ::D3D_PRIMITIVE_TOPOLOGY; pub type D3D12_PRIMITIVE = ::D3D_PRIMITIVE; ENUM!{enum D3D12_CULL_MODE { D3D12_CULL_MODE_NONE = 1, D3D12_CULL_MODE_FRONT = 2, D3D12_CULL_MODE_BACK = 3, }} STRUCT!{struct D3D12_SO_DECLARATION_ENTRY { Stream: ::UINT, SemanticName: ::LPCSTR, SemanticIndex: ::UINT, StartComponent: ::BYTE, ComponentCount: ::BYTE, OutputSlot: ::BYTE, }} STRUCT!{struct D3D12_VIEWPORT { TopLeftX: ::FLOAT, TopLeftY: ::FLOAT, Width: ::FLOAT, Height: ::FLOAT, MinDepth: ::FLOAT, MaxDepth: ::FLOAT, }} pub type D3D12_RECT = ::RECT; STRUCT!{struct D3D12_BOX { left: ::UINT, top: ::UINT, front: ::UINT, right: ::UINT, bottom: ::UINT, back: ::UINT, }} ENUM!{enum D3D12_COMPARISON_FUNC { D3D12_COMPARISON_FUNC_NEVER = 1, D3D12_COMPARISON_FUNC_LESS = 2, D3D12_COMPARISON_FUNC_EQUAL = 3, D3D12_COMPARISON_FUNC_LESS_EQUAL = 4, D3D12_COMPARISON_FUNC_GREATER = 5, D3D12_COMPARISON_FUNC_NOT_EQUAL = 6, D3D12_COMPARISON_FUNC_GREATER_EQUAL = 7, D3D12_COMPARISON_FUNC_ALWAYS = 8, }} ENUM!{enum D3D12_DEPTH_WRITE_MASK { D3D12_DEPTH_WRITE_MASK_ZERO = 0, D3D12_DEPTH_WRITE_MASK_ALL = 1, }} ENUM!{enum D3D12_STENCIL_OP { D3D12_STENCIL_OP_KEEP = 1, D3D12_STENCIL_OP_ZERO = 2, D3D12_STENCIL_OP_REPLACE = 3, D3D12_STENCIL_OP_INCR_SAT = 4, D3D12_STENCIL_OP_DECR_SAT = 5, D3D12_STENCIL_OP_INVERT = 6, D3D12_STENCIL_OP_INCR = 7, D3D12_STENCIL_OP_DECR = 8, }} STRUCT!{struct D3D12_DEPTH_STENCILOP_DESC { StencilFailOp: D3D12_STENCIL_OP, StencilDepthFailOp: D3D12_STENCIL_OP, StencilPassOp: D3D12_STENCIL_OP, StencilFunc: D3D12_COMPARISON_FUNC, }} STRUCT!{struct D3D12_DEPTH_STENCIL_DESC { DepthEnable: ::BOOL, DepthWriteMask: D3D12_DEPTH_WRITE_MASK, DepthFunc: D3D12_COMPARISON_FUNC, StencilEnable: ::BOOL, StencilReadMask: ::UINT8, StencilWriteMask: ::UINT8, FrontFace: D3D12_DEPTH_STENCILOP_DESC, BackFace: D3D12_DEPTH_STENCILOP_DESC, }} ENUM!{enum D3D12_BLEND { D3D12_BLEND_ZERO = 1, D3D12_BLEND_ONE = 2, D3D12_BLEND_SRC_COLOR = 3, D3D12_BLEND_INV_SRC_COLOR = 4, D3D12_BLEND_SRC_ALPHA = 5, D3D12_BLEND_INV_SRC_ALPHA = 6, D3D12_BLEND_DEST_ALPHA = 7, D3D12_BLEND_INV_DEST_ALPHA = 8, D3D12_BLEND_DEST_COLOR = 9, D3D12_BLEND_INV_DEST_COLOR = 10, D3D12_BLEND_SRC_ALPHA_SAT = 11, D3D12_BLEND_BLEND_FACTOR = 14, D3D12_BLEND_INV_BLEND_FACTOR = 15, D3D12_BLEND_SRC1_COLOR = 16, D3D12_BLEND_INV_SRC1_COLOR = 17, D3D12_BLEND_SRC1_ALPHA = 18, D3D12_BLEND_INV_SRC1_ALPHA = 19, }} ENUM!{enum D3D12_BLEND_OP { D3D12_BLEND_OP_ADD = 1, D3D12_BLEND_OP_SUBTRACT = 2, D3D12_BLEND_OP_REV_SUBTRACT = 3, D3D12_BLEND_OP_MIN = 4, D3D12_BLEND_OP_MAX = 5, }} FLAGS!{enum D3D12_COLOR_WRITE_ENABLE { D3D12_COLOR_WRITE_ENABLE_RED = 0x1, D3D12_COLOR_WRITE_ENABLE_GREEN = 0x2, D3D12_COLOR_WRITE_ENABLE_BLUE = 0x4, D3D12_COLOR_WRITE_ENABLE_ALPHA = 0x8, D3D12_COLOR_WRITE_ENABLE_ALL = 0xF, }} ENUM!{enum D3D12_LOGIC_OP { D3D12_LOGIC_OP_CLEAR = 0, D3D12_LOGIC_OP_SET = 1, D3D12_LOGIC_OP_COPY = 2, D3D12_LOGIC_OP_COPY_INVERTED = 3, D3D12_LOGIC_OP_NOOP = 4, D3D12_LOGIC_OP_INVERT = 5, D3D12_LOGIC_OP_AND = 6, D3D12_LOGIC_OP_NAND = 7, D3D12_LOGIC_OP_OR = 8, D3D12_LOGIC_OP_NOR = 9, D3D12_LOGIC_OP_XOR = 10, D3D12_LOGIC_OP_EQUIV = 11, D3D12_LOGIC_OP_AND_REVERSE = 12, D3D12_LOGIC_OP_AND_INVERTED = 13, D3D12_LOGIC_OP_OR_REVERSE = 14, D3D12_LOGIC_OP_OR_INVERTED = 15, }} STRUCT!{struct D3D12_RENDER_TARGET_BLEND_DESC { BlendEnable: ::BOOL, LogicOpEnable: ::BOOL, SrcBlend: D3D12_BLEND, DestBlend: D3D12_BLEND, BlendOp: D3D12_BLEND_OP, SrcBlendAlpha: D3D12_BLEND, DestBlendAlpha: D3D12_BLEND, BlendOpAlpha: D3D12_BLEND_OP, LogicOp: D3D12_LOGIC_OP, RenderTargetWriteMask: ::UINT8, }} STRUCT!{struct D3D12_BLEND_DESC { AlphaToCoverageEnable: ::BOOL, IndependentBlendEnable: ::BOOL, RenderTarget: [D3D12_RENDER_TARGET_BLEND_DESC; 8], }} ENUM!{enum D3D12_CONSERVATIVE_RASTERIZATION_MODE { D3D12_CONSERVATIVE_RASTERIZATION_MODE_OFF = 0, D3D12_CONSERVATIVE_RASTERIZATION_MODE_ON = 1, }} STRUCT!{struct D3D12_RASTERIZER_DESC { FillMode: D3D12_FILL_MODE, CullMode: D3D12_CULL_MODE, FrontCounterClockwise: ::BOOL, DepthBias: ::INT, DepthBiasClamp: ::FLOAT, SlopeScaledDepthBias: ::FLOAT, DepthClipEnable: ::BOOL, MultisampleEnable: ::BOOL, AntialiasedLineEnable: ::BOOL, ForcedSampleCount: ::UINT, ConservativeRaster: D3D12_CONSERVATIVE_RASTERIZATION_MODE, }} RIDL!{interface ID3D12Object(ID3D12ObjectVtbl): IUnknown(IUnknownVtbl) { fn GetPrivateData( &mut self, guid: ::REFGUID, pDataSize: *mut ::UINT, pData: *mut ::c_void ) -> ::HRESULT, fn SetPrivateData( &mut self, guid: ::REFGUID, DataSize: ::UINT, pData: *const ::c_void ) -> ::HRESULT, fn SetPrivateDataInterface( &mut self, guid: ::REFGUID, pData: *const ::IUnknown ) -> ::HRESULT, fn SetName(&mut self, Name: ::LPCWSTR) -> ::HRESULT }} RIDL!{interface ID3D12DeviceChild(ID3D12DeviceChildVtbl): ID3D12Object(ID3D12ObjectVtbl) { fn GetDevice( &mut self, riid: ::REFGUID, ppvDevice: *mut *mut ::c_void ) -> ::HRESULT }} RIDL!{interface ID3D12RootSignature(ID3D12RootSignatureVtbl): ID3D12DeviceChild(ID3D12DeviceChildVtbl) { }} STRUCT!{struct D3D12_SHADER_BYTECODE { pShaderBytecode: *const ::c_void, BytecodeLength: ::SIZE_T, }} STRUCT!{struct D3D12_STREAM_OUTPUT_DESC { pSODeclaration: *const D3D12_SO_DECLARATION_ENTRY, NumEntries: ::UINT, pBufferStrides: *const ::UINT, NumStrides: ::UINT, RasterizedStream: ::UINT, }} STRUCT!{struct D3D12_INPUT_LAYOUT_DESC { pInputElementDescs: *const D3D12_INPUT_ELEMENT_DESC, NumElements: ::UINT, }} ENUM!{enum D3D12_INDEX_BUFFER_STRIP_CUT_VALUE { D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_DISABLED = 0, D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFF = 1, D3D12_INDEX_BUFFER_STRIP_CUT_VALUE_0xFFFFFFFF = 2, }} STRUCT!{struct D3D12_CACHED_PIPELINE_STATE { pCachedBlob: *const ::c_void, CachedBlobSizeInBytes: ::SIZE_T, }} FLAGS!{enum D3D12_PIPELINE_STATE_FLAGS { D3D12_PIPELINE_STATE_FLAG_NONE = 0x0, D3D12_PIPELINE_STATE_FLAG_TOOL_DEBUG = 0x1, }} STRUCT!{struct D3D12_GRAPHICS_PIPELINE_STATE_DESC { pRootSignature: *mut ID3D12RootSignature, VS: D3D12_SHADER_BYTECODE, PS: D3D12_SHADER_BYTECODE, DS: D3D12_SHADER_BYTECODE, HS: D3D12_SHADER_BYTECODE, GS: D3D12_SHADER_BYTECODE, StreamOutput: D3D12_STREAM_OUTPUT_DESC, BlendState: D3D12_BLEND_DESC, SampleMask: ::UINT, RasterizerState: D3D12_RASTERIZER_DESC, DepthStencilState: D3D12_DEPTH_STENCIL_DESC, InputLayout: D3D12_INPUT_LAYOUT_DESC, IBStripCutValue: D3D12_INDEX_BUFFER_STRIP_CUT_VALUE, PrimitiveTopologyType: D3D12_PRIMITIVE_TOPOLOGY_TYPE, NumRenderTargets: ::UINT, RTVFormats: [::DXGI_FORMAT; 8], DSVFormat: ::DXGI_FORMAT, SampleDesc: ::DXGI_SAMPLE_DESC, NodeMask: ::UINT, CachedPSO: D3D12_CACHED_PIPELINE_STATE, Flags: D3D12_PIPELINE_STATE_FLAGS, }} STRUCT!{struct D3D12_COMPUTE_PIPELINE_STATE_DESC { pRootSignature: *mut ID3D12RootSignature, CS: D3D12_SHADER_BYTECODE, NodeMask: ::UINT, CachedPSO: D3D12_CACHED_PIPELINE_STATE, Flags: D3D12_PIPELINE_STATE_FLAGS, }} ENUM!{enum D3D12_FEATURE { D3D12_FEATURE_D3D12_OPTIONS = 0, D3D12_FEATURE_ARCHITECTURE = 1, D3D12_FEATURE_FEATURE_LEVELS = 2, D3D12_FEATURE_FORMAT_SUPPORT = 3, D3D12_FEATURE_MULTISAMPLE_QUALITY_LEVELS = 4, D3D12_FEATURE_FORMAT_INFO = 5, D3D12_FEATURE_GPU_VIRTUAL_ADDRESS_SUPPORT = 6, }} FLAGS!{enum D3D12_SHADER_MIN_PRECISION_SUPPORT { D3D12_SHADER_MIN_PRECISION_SUPPORT_NONE = 0, D3D12_SHADER_MIN_PRECISION_SUPPORT_10_BIT = 0x1, D3D12_SHADER_MIN_PRECISION_SUPPORT_16_BIT = 0x2, }} ENUM!{enum D3D12_TILED_RESOURCES_TIER { D3D12_TILED_RESOURCES_TIER_NOT_SUPPORTED = 0, D3D12_TILED_RESOURCES_TIER_1 = 1, D3D12_TILED_RESOURCES_TIER_2 = 2, D3D12_TILED_RESOURCES_TIER_3 = 3, }} ENUM!{enum D3D12_RESOURCE_BINDING_TIER { D3D12_RESOURCE_BINDING_TIER_1 = 1, D3D12_RESOURCE_BINDING_TIER_2 = 2, D3D12_RESOURCE_BINDING_TIER_3 = 3, }} ENUM!{enum D3D12_CONSERVATIVE_RASTERIZATION_TIER { D3D12_CONSERVATIVE_RASTERIZATION_TIER_NOT_SUPPORTED = 0, D3D12_CONSERVATIVE_RASTERIZATION_TIER_1 = 1, D3D12_CONSERVATIVE_RASTERIZATION_TIER_2 = 2, D3D12_CONSERVATIVE_RASTERIZATION_TIER_3 = 3, }} FLAGS!{enum D3D12_FORMAT_SUPPORT1 { D3D12_FORMAT_SUPPORT1_NONE = 0x0, D3D12_FORMAT_SUPPORT1_BUFFER = 0x1, D3D12_FORMAT_SUPPORT1_IA_VERTEX_BUFFER = 0x2, D3D12_FORMAT_SUPPORT1_IA_INDEX_BUFFER = 0x4, D3D12_FORMAT_SUPPORT1_SO_BUFFER = 0x8, D3D12_FORMAT_SUPPORT1_TEXTURE1D = 0x10, D3D12_FORMAT_SUPPORT1_TEXTURE2D = 0x20, D3D12_FORMAT_SUPPORT1_TEXTURE3D = 0x40, D3D12_FORMAT_SUPPORT1_TEXTURECUBE = 0x80, D3D12_FORMAT_SUPPORT1_SHADER_LOAD = 0x100, D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE = 0x200, D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE_COMPARISON = 0x400, D3D12_FORMAT_SUPPORT1_SHADER_SAMPLE_MONO_TEXT = 0x800, D3D12_FORMAT_SUPPORT1_MIP = 0x1000, D3D12_FORMAT_SUPPORT1_RENDER_TARGET = 0x4000, D3D12_FORMAT_SUPPORT1_BLENDABLE = 0x8000, D3D12_FORMAT_SUPPORT1_DEPTH_STENCIL = 0x10000, D3D12_FORMAT_SUPPORT1_MULTISAMPLE_RESOLVE = 0x40000, D3D12_FORMAT_SUPPORT1_DISPLAY = 0x80000, D3D12_FORMAT_SUPPORT1_CAST_WITHIN_BIT_LAYOUT = 0x100000, D3D12_FORMAT_SUPPORT1_MULTISAMPLE_RENDERTARGET = 0x200000, D3D12_FORMAT_SUPPORT1_MULTISAMPLE_LOAD = 0x400000, D3D12_FORMAT_SUPPORT1_SHADER_GATHER = 0x800000, D3D12_FORMAT_SUPPORT1_BACK_BUFFER_CAST = 0x1000000, D3D12_FORMAT_SUPPORT1_TYPED_UNORDERED_ACCESS_VIEW = 0x2000000, D3D12_FORMAT_SUPPORT1_SHADER_GATHER_COMPARISON = 0x4000000, D3D12_FORMAT_SUPPORT1_DECODER_OUTPUT = 0x8000000, D3D12_FORMAT_SUPPORT1_VIDEO_PROCESSOR_OUTPUT = 0x10000000, D3D12_FORMAT_SUPPORT1_VIDEO_PROCESSOR_INPUT = 0x20000000, D3D12_FORMAT_SUPPORT1_VIDEO_ENCODER = 0x40000000, }} FLAGS!{enum D3D12_FORMAT_SUPPORT2 { D3D12_FORMAT_SUPPORT2_NONE = 0x0, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_ADD = 0x1, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_BITWISE_OPS = 0x2, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_COMPARE_STORE_OR_COMPARE_EXCHANGE = 0x4, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_EXCHANGE = 0x8, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_SIGNED_MIN_OR_MAX = 0x10, D3D12_FORMAT_SUPPORT2_UAV_ATOMIC_UNSIGNED_MIN_OR_MAX = 0x20, D3D12_FORMAT_SUPPORT2_UAV_TYPED_LOAD = 0x40, D3D12_FORMAT_SUPPORT2_UAV_TYPED_STORE = 0x80, D3D12_FORMAT_SUPPORT2_OUTPUT_MERGER_LOGIC_OP = 0x100, D3D12_FORMAT_SUPPORT2_TILED = 0x200, D3D12_FORMAT_SUPPORT2_MULTIPLANE_OVERLAY = 0x4000, }} FLAGS!{enum D3D12_MULTISAMPLE_QUALITY_LEVEL_FLAGS { D3D12_MULTISAMPLE_QUALITY_LEVELS_FLAG_NONE = 0x0, D3D12_MULTISAMPLE_QUALITY_LEVELS_FLAG_TILED_RESOURCE = 0x1, }} ENUM!{enum D3D12_CROSS_NODE_SHARING_TIER { D3D12_CROSS_NODE_SHARING_TIER_NOT_SUPPORTED = 0, D3D12_CROSS_NODE_SHARING_TIER_1_EMULATED = 1, D3D12_CROSS_NODE_SHARING_TIER_1 = 2, D3D12_CROSS_NODE_SHARING_TIER_2 = 3, }} ENUM!{enum D3D12_RESOURCE_HEAP_TIER { D3D12_RESOURCE_HEAP_TIER_1 = 1, D3D12_RESOURCE_HEAP_TIER_2 = 2, }} STRUCT!{struct D3D12_FEATURE_DATA_D3D12_OPTIONS { DoublePrecisionFloatShaderOps: ::BOOL, OutputMergerLogicOp: ::BOOL, MinPrecisionSupport: D3D12_SHADER_MIN_PRECISION_SUPPORT, TiledResourcesTier: D3D12_TILED_RESOURCES_TIER, ResourceBindingTier: D3D12_RESOURCE_BINDING_TIER, PSSpecifiedStencilRefSupported: ::BOOL, TypedUAVLoadAdditionalFormats: ::BOOL, ROVsSupported: ::BOOL, ConservativeRasterizationTier: D3D12_CONSERVATIVE_RASTERIZATION_TIER, MaxGPUVirtualAddressBitsPerResource: ::UINT, StandardSwizzle64KBSupported: ::BOOL, CrossNodeSharingTier: D3D12_CROSS_NODE_SHARING_TIER, CrossAdapterRowMajorTextureSupported: ::BOOL, VPAndRTArrayIndexFromAnyShaderFeedingRasterizerSupportedWithoutGSEmulation: ::BOOL, ResourceHeapTier: D3D12_RESOURCE_HEAP_TIER, }} FLAGS!{ enum D3D12_BUFFER_SRV_FLAGS { D3D12_BUFFER_SRV_FLAG_NONE = 0x0, D3D12_BUFFER_SRV_FLAG_RAW = 0x1, }} FLAGS!{ enum D3D12_BUFFER_UAV_FLAGS { D3D12_BUFFER_UAV_FLAG_NONE = 0x0, D3D12_BUFFER_UAV_FLAG_RAW = 0x1, }} FLAGS!{ enum D3D12_CLEAR_FLAGS { D3D12_CLEAR_FLAG_DEPTH = 0x1, D3D12_CLEAR_FLAG_STENCIL = 0x2, }} ENUM!{ enum D3D12_CPU_PAGE_PROPERTY { D3D12_CPU_PAGE_PROPERTY_UNKNOWN = 0, D3D12_CPU_PAGE_PROPERTY_NOT_AVAILABLE = 1, D3D12_CPU_PAGE_PROPERTY_WRITE_COMBINE = 2, D3D12_CPU_PAGE_PROPERTY_WRITE_BACK = 3, }} FLAGS!{ enum D3D12_DESCRIPTOR_HEAP_FLAGS { D3D12_DESCRIPTOR_HEAP_FLAG_NONE = 0x0, D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE = 0x1, }} ENUM!{ enum D3D12_DESCRIPTOR_HEAP_TYPE { D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV = 0, D3D12_DESCRIPTOR_HEAP_TYPE_SAMPLER = 1, D3D12_DESCRIPTOR_HEAP_TYPE_RTV = 2, D3D12_DESCRIPTOR_HEAP_TYPE_DSV = 3, D3D12_DESCRIPTOR_HEAP_TYPE_NUM_TYPES = 4, }} ENUM!{ enum D3D12_DESCRIPTOR_RANGE_TYPE { D3D12_DESCRIPTOR_RANGE_TYPE_SRV = 0, D3D12_DESCRIPTOR_RANGE_TYPE_UAV = 1, D3D12_DESCRIPTOR_RANGE_TYPE_CBV = 2, D3D12_DESCRIPTOR_RANGE_TYPE_SAMPLER = 3, }} ENUM!{ enum D3D12_DSV_DIMENSION { D3D12_DSV_DIMENSION_UNKNOWN = 0, D3D12_DSV_DIMENSION_TEXTURE1D = 1, D3D12_DSV_DIMENSION_TEXTURE1DARRAY = 2, D3D12_DSV_DIMENSION_TEXTURE2D = 3, D3D12_DSV_DIMENSION_TEXTURE2DARRAY = 4, D3D12_DSV_DIMENSION_TEXTURE2DMS = 5, D3D12_DSV_DIMENSION_TEXTURE2DMSARRAY = 6, }} FLAGS!{ enum D3D12_DSV_FLAGS { D3D12_DSV_FLAG_NONE = 0x0, D3D12_DSV_FLAG_READ_ONLY_DEPTH = 0x1, D3D12_DSV_FLAG_READ_ONLY_STENCIL = 0x2, }} FLAGS!{ enum D3D12_FENCE_FLAGS { D3D12_FENCE_FLAG_NONE = 0x0, D3D12_FENCE_FLAG_SHARED = 0x1, D3D12_FENCE_FLAG_SHARED_CROSS_ADAPTER = 0x2, }} ENUM!{ enum D3D12_FILTER { D3D12_FILTER_MIN_MAG_MIP_POINT = 0, D3D12_FILTER_MIN_MAG_POINT_MIP_LINEAR = 1, D3D12_FILTER_MIN_POINT_MAG_LINEAR_MIP_POINT = 4, D3D12_FILTER_MIN_POINT_MAG_MIP_LINEAR = 5, D3D12_FILTER_MIN_LINEAR_MAG_MIP_POINT = 16, D3D12_FILTER_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 17, D3D12_FILTER_MIN_MAG_LINEAR_MIP_POINT = 20, D3D12_FILTER_MIN_MAG_MIP_LINEAR = 21, D3D12_FILTER_ANISOTROPIC = 85, D3D12_FILTER_COMPARISON_MIN_MAG_MIP_POINT = 128, D3D12_FILTER_COMPARISON_MIN_MAG_POINT_MIP_LINEAR = 129, D3D12_FILTER_COMPARISON_MIN_POINT_MAG_LINEAR_MIP_POINT = 132, D3D12_FILTER_COMPARISON_MIN_POINT_MAG_MIP_LINEAR = 133, D3D12_FILTER_COMPARISON_MIN_LINEAR_MAG_MIP_POINT = 144, D3D12_FILTER_COMPARISON_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 145, D3D12_FILTER_COMPARISON_MIN_MAG_LINEAR_MIP_POINT = 148, D3D12_FILTER_COMPARISON_MIN_MAG_MIP_LINEAR = 149, D3D12_FILTER_COMPARISON_ANISOTROPIC = 213, D3D12_FILTER_MINIMUM_MIN_MAG_MIP_POINT = 256, D3D12_FILTER_MINIMUM_MIN_MAG_POINT_MIP_LINEAR = 257, D3D12_FILTER_MINIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT = 260, D3D12_FILTER_MINIMUM_MIN_POINT_MAG_MIP_LINEAR = 261, D3D12_FILTER_MINIMUM_MIN_LINEAR_MAG_MIP_POINT = 272, D3D12_FILTER_MINIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 273, D3D12_FILTER_MINIMUM_MIN_MAG_LINEAR_MIP_POINT = 276, D3D12_FILTER_MINIMUM_MIN_MAG_MIP_LINEAR = 277, D3D12_FILTER_MINIMUM_ANISOTROPIC = 341, D3D12_FILTER_MAXIMUM_MIN_MAG_MIP_POINT = 384, D3D12_FILTER_MAXIMUM_MIN_MAG_POINT_MIP_LINEAR = 385, D3D12_FILTER_MAXIMUM_MIN_POINT_MAG_LINEAR_MIP_POINT = 388, D3D12_FILTER_MAXIMUM_MIN_POINT_MAG_MIP_LINEAR = 389, D3D12_FILTER_MAXIMUM_MIN_LINEAR_MAG_MIP_POINT = 400, D3D12_FILTER_MAXIMUM_MIN_LINEAR_MAG_POINT_MIP_LINEAR = 401, D3D12_FILTER_MAXIMUM_MIN_MAG_LINEAR_MIP_POINT = 404, D3D12_FILTER_MAXIMUM_MIN_MAG_MIP_LINEAR = 405, D3D12_FILTER_MAXIMUM_ANISOTROPIC = 469, }} ENUM!{ enum D3D12_FILTER_REDUCTION_TYPE { D3D12_FILTER_REDUCTION_TYPE_STANDARD = 0, D3D12_FILTER_REDUCTION_TYPE_COMPARISON = 1, D3D12_FILTER_REDUCTION_TYPE_MINIMUM = 2, D3D12_FILTER_REDUCTION_TYPE_MAXIMUM = 3, }} ENUM!{ enum D3D12_FILTER_TYPE { D3D12_FILTER_TYPE_POINT = 0, D3D12_FILTER_TYPE_LINEAR = 1, }} FLAGS!{ enum D3D12_HEAP_FLAGS { D3D12_HEAP_FLAG_NONE = 0x0, D3D12_HEAP_FLAG_SHARED = 0x1, D3D12_HEAP_FLAG_DENY_BUFFERS = 0x4, D3D12_HEAP_FLAG_ALLOW_DISPLAY = 0x8, D3D12_HEAP_FLAG_SHARED_CROSS_ADAPTER = 0x20, D3D12_HEAP_FLAG_DENY_RT_DS_TEXTURES = 0x40, D3D12_HEAP_FLAG_DENY_NON_RT_DS_TEXTURES = 0x80, D3D12_HEAP_FLAG_ALLOW_ALL_BUFFERS_AND_TEXTURES = 0x0, D3D12_HEAP_FLAG_ALLOW_ONLY_BUFFERS = 0xC0, D3D12_HEAP_FLAG_ALLOW_ONLY_NON_RT_DS_TEXTURES = 0x44, D3D12_HEAP_FLAG_ALLOW_ONLY_RT_DS_TEXTURES = 0x84, }} ENUM!{ enum D3D12_HEAP_TYPE { D3D12_HEAP_TYPE_DEFAULT = 1, D3D12_HEAP_TYPE_UPLOAD = 2, D3D12_HEAP_TYPE_READBACK = 3, D3D12_HEAP_TYPE_CUSTOM = 4, }} ENUM!{ enum D3D12_INDIRECT_ARGUMENT_TYPE { D3D12_INDIRECT_ARGUMENT_TYPE_DRAW = 0, D3D12_INDIRECT_ARGUMENT_TYPE_DRAW_INDEXED = 1, D3D12_INDIRECT_ARGUMENT_TYPE_DISPATCH = 2, D3D12_INDIRECT_ARGUMENT_TYPE_VERTEX_BUFFER_VIEW = 3, D3D12_INDIRECT_ARGUMENT_TYPE_INDEX_BUFFER_VIEW = 4, D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT = 5, D3D12_INDIRECT_ARGUMENT_TYPE_CONSTANT_BUFFER_VIEW = 6, D3D12_INDIRECT_ARGUMENT_TYPE_SHADER_RESOURCE_VIEW = 7, D3D12_INDIRECT_ARGUMENT_TYPE_UNORDERED_ACCESS_VIEW = 8, }} ENUM!{ enum D3D12_MEMORY_POOL { D3D12_MEMORY_POOL_UNKNOWN = 0, D3D12_MEMORY_POOL_L0 = 1, D3D12_MEMORY_POOL_L1 = 2, }} ENUM!{ enum D3D12_PREDICATION_OP { D3D12_PREDICATION_OP_EQUAL_ZERO = 0, D3D12_PREDICATION_OP_NOT_EQUAL_ZERO = 1, }} ENUM!{ enum D3D12_QUERY_HEAP_TYPE { D3D12_QUERY_HEAP_TYPE_OCCLUSION = 0, D3D12_QUERY_HEAP_TYPE_TIMESTAMP = 1, D3D12_QUERY_HEAP_TYPE_PIPELINE_STATISTICS = 2, D3D12_QUERY_HEAP_TYPE_SO_STATISTICS = 3, }} ENUM!{ enum D3D12_QUERY_TYPE { D3D12_QUERY_TYPE_OCCLUSION = 0, D3D12_QUERY_TYPE_BINARY_OCCLUSION = 1, D3D12_QUERY_TYPE_TIMESTAMP = 2, D3D12_QUERY_TYPE_PIPELINE_STATISTICS = 3, D3D12_QUERY_TYPE_SO_STATISTICS_STREAM0 = 4, D3D12_QUERY_TYPE_SO_STATISTICS_STREAM1 = 5, D3D12_QUERY_TYPE_SO_STATISTICS_STREAM2 = 6, D3D12_QUERY_TYPE_SO_STATISTICS_STREAM3 = 7, }} FLAGS!{ enum D3D12_RESOURCE_BARRIER_FLAGS { D3D12_RESOURCE_BARRIER_FLAG_NONE = 0x0, D3D12_RESOURCE_BARRIER_FLAG_BEGIN_ONLY = 0x1, D3D12_RESOURCE_BARRIER_FLAG_END_ONLY = 0x2, }} ENUM!{ enum D3D12_RESOURCE_BARRIER_TYPE { D3D12_RESOURCE_BARRIER_TYPE_TRANSITION = 0, D3D12_RESOURCE_BARRIER_TYPE_ALIASING = 1, D3D12_RESOURCE_BARRIER_TYPE_UAV = 2, }} ENUM!{ enum D3D12_RESOURCE_DIMENSION { D3D12_RESOURCE_DIMENSION_UNKNOWN = 0, D3D12_RESOURCE_DIMENSION_BUFFER = 1, D3D12_RESOURCE_DIMENSION_TEXTURE1D = 2, D3D12_RESOURCE_DIMENSION_TEXTURE2D = 3, D3D12_RESOURCE_DIMENSION_TEXTURE3D = 4, }} FLAGS!{ enum D3D12_RESOURCE_FLAGS { D3D12_RESOURCE_FLAG_NONE = 0x0, D3D12_RESOURCE_FLAG_ALLOW_RENDER_TARGET = 0x1, D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL = 0x2, D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS = 0x4, D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE = 0x8, D3D12_RESOURCE_FLAG_ALLOW_CROSS_ADAPTER = 0x10, D3D12_RESOURCE_FLAG_ALLOW_SIMULTANEOUS_ACCESS = 0x20, }} FLAGS!{ enum D3D12_RESOURCE_STATES { D3D12_RESOURCE_STATE_COMMON = 0x0, D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER = 0x1, D3D12_RESOURCE_STATE_INDEX_BUFFER = 0x2, D3D12_RESOURCE_STATE_RENDER_TARGET = 0x4, D3D12_RESOURCE_STATE_UNORDERED_ACCESS = 0x8, D3D12_RESOURCE_STATE_DEPTH_WRITE = 0x10, D3D12_RESOURCE_STATE_DEPTH_READ = 0x20, D3D12_RESOURCE_STATE_NON_PIXEL_SHADER_RESOURCE = 0x40, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE = 0x80, D3D12_RESOURCE_STATE_STREAM_OUT = 0x100, D3D12_RESOURCE_STATE_INDIRECT_ARGUMENT = 0x200, D3D12_RESOURCE_STATE_COPY_DEST = 0x400, D3D12_RESOURCE_STATE_COPY_SOURCE = 0x800, D3D12_RESOURCE_STATE_RESOLVE_DEST = 0x1000, D3D12_RESOURCE_STATE_RESOLVE_SOURCE = 0x2000, D3D12_RESOURCE_STATE_GENERIC_READ = 0xAC3, D3D12_RESOURCE_STATE_PRESENT = 0x0, D3D12_RESOURCE_STATE_PREDICATION = 0x200, }} ENUM!{ enum D3D12_ROOT_PARAMETER_TYPE { D3D12_ROOT_PARAMETER_TYPE_DESCRIPTOR_TABLE = 0, D3D12_ROOT_PARAMETER_TYPE_32BIT_CONSTANTS = 1, D3D12_ROOT_PARAMETER_TYPE_CBV = 2, D3D12_ROOT_PARAMETER_TYPE_SRV = 3, D3D12_ROOT_PARAMETER_TYPE_UAV = 4, }} FLAGS!{ enum D3D12_ROOT_SIGNATURE_FLAGS { D3D12_ROOT_SIGNATURE_FLAG_NONE = 0x0, D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT = 0x1, D3D12_ROOT_SIGNATURE_FLAG_DENY_VERTEX_SHADER_ROOT_ACCESS = 0x2, D3D12_ROOT_SIGNATURE_FLAG_DENY_HULL_SHADER_ROOT_ACCESS = 0x4, D3D12_ROOT_SIGNATURE_FLAG_DENY_DOMAIN_SHADER_ROOT_ACCESS = 0x8, D3D12_ROOT_SIGNATURE_FLAG_DENY_GEOMETRY_SHADER_ROOT_ACCESS = 0x10, D3D12_ROOT_SIGNATURE_FLAG_DENY_PIXEL_SHADER_ROOT_ACCESS = 0x20, D3D12_ROOT_SIGNATURE_FLAG_ALLOW_STREAM_OUTPUT = 0x40, }} ENUM!{ enum D3D12_RTV_DIMENSION { D3D12_RTV_DIMENSION_UNKNOWN = 0, D3D12_RTV_DIMENSION_BUFFER = 1, D3D12_RTV_DIMENSION_TEXTURE1D = 2, D3D12_RTV_DIMENSION_TEXTURE1DARRAY = 3, D3D12_RTV_DIMENSION_TEXTURE2D = 4, D3D12_RTV_DIMENSION_TEXTURE2DARRAY = 5, D3D12_RTV_DIMENSION_TEXTURE2DMS = 6, D3D12_RTV_DIMENSION_TEXTURE2DMSARRAY = 7, D3D12_RTV_DIMENSION_TEXTURE3D = 8, }} ENUM!{ enum D3D12_SHADER_COMPONENT_MAPPING { D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_0 = 0, D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_1 = 1, D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_2 = 2, D3D12_SHADER_COMPONENT_MAPPING_FROM_MEMORY_COMPONENT_3 = 3, D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_0 = 4, D3D12_SHADER_COMPONENT_MAPPING_FORCE_VALUE_1 = 5, }} ENUM!{ enum D3D12_SHADER_VISIBILITY { D3D12_SHADER_VISIBILITY_ALL = 0, D3D12_SHADER_VISIBILITY_VERTEX = 1, D3D12_SHADER_VISIBILITY_HULL = 2, D3D12_SHADER_VISIBILITY_DOMAIN = 3, D3D12_SHADER_VISIBILITY_GEOMETRY = 4, D3D12_SHADER_VISIBILITY_PIXEL = 5, }} ENUM!{ enum D3D12_SRV_DIMENSION { D3D12_SRV_DIMENSION_UNKNOWN = 0, D3D12_SRV_DIMENSION_BUFFER = 1, D3D12_SRV_DIMENSION_TEXTURE1D = 2, D3D12_SRV_DIMENSION_TEXTURE1DARRAY = 3, D3D12_SRV_DIMENSION_TEXTURE2D = 4, D3D12_SRV_DIMENSION_TEXTURE2DARRAY = 5, D3D12_SRV_DIMENSION_TEXTURE2DMS = 6, D3D12_SRV_DIMENSION_TEXTURE2DMSARRAY = 7, D3D12_SRV_DIMENSION_TEXTURE3D = 8, D3D12_SRV_DIMENSION_TEXTURECUBE = 9, D3D12_SRV_DIMENSION_TEXTURECUBEARRAY = 10, }} ENUM!{ enum D3D12_STATIC_BORDER_COLOR { D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK = 0, D3D12_STATIC_BORDER_COLOR_OPAQUE_BLACK = 1, D3D12_STATIC_BORDER_COLOR_OPAQUE_WHITE = 2, }} ENUM!{ enum D3D12_TEXTURE_ADDRESS_MODE { D3D12_TEXTURE_ADDRESS_MODE_WRAP = 1, D3D12_TEXTURE_ADDRESS_MODE_MIRROR = 2, D3D12_TEXTURE_ADDRESS_MODE_CLAMP = 3, D3D12_TEXTURE_ADDRESS_MODE_BORDER = 4, D3D12_TEXTURE_ADDRESS_MODE_MIRROR_ONCE = 5, }} ENUM!{ enum D3D12_TEXTURE_COPY_TYPE { D3D12_TEXTURE_COPY_TYPE_SUBRESOURCE_INDEX = 0, D3D12_TEXTURE_COPY_TYPE_PLACED_FOOTPRINT = 1, }} ENUM!{ enum D3D12_TEXTURE_LAYOUT { D3D12_TEXTURE_LAYOUT_UNKNOWN = 0, D3D12_TEXTURE_LAYOUT_ROW_MAJOR = 1, D3D12_TEXTURE_LAYOUT_64KB_UNDEFINED_SWIZZLE = 2, D3D12_TEXTURE_LAYOUT_64KB_STANDARD_SWIZZLE = 3, }} FLAGS!{ enum D3D12_TILE_COPY_FLAGS { D3D12_TILE_COPY_FLAG_NONE = 0x0, D3D12_TILE_COPY_FLAG_NO_HAZARD = 0x1, D3D12_TILE_COPY_FLAG_LINEAR_BUFFER_TO_SWIZZLED_TILED_RESOURCE = 0x2, D3D12_TILE_COPY_FLAG_SWIZZLED_TILED_RESOURCE_TO_LINEAR_BUFFER = 0x4, }} FLAGS!{ enum D3D12_TILE_MAPPING_FLAGS { D3D12_TILE_MAPPING_FLAG_NONE = 0x0, D3D12_TILE_MAPPING_FLAG_NO_HAZARD = 0x1, }} FLAGS!{ enum D3D12_TILE_RANGE_FLAGS { D3D12_TILE_RANGE_FLAG_NONE = 0x0, D3D12_TILE_RANGE_FLAG_NULL = 0x1, D3D12_TILE_RANGE_FLAG_SKIP = 0x2, D3D12_TILE_RANGE_FLAG_REUSE_SINGLE_TILE = 0x4, }} ENUM!{ enum D3D12_UAV_DIMENSION { D3D12_UAV_DIMENSION_UNKNOWN = 0, D3D12_UAV_DIMENSION_BUFFER = 1, D3D12_UAV_DIMENSION_TEXTURE1D = 2, D3D12_UAV_DIMENSION_TEXTURE1DARRAY = 3, D3D12_UAV_DIMENSION_TEXTURE2D = 4, D3D12_UAV_DIMENSION_TEXTURE2DARRAY = 5, D3D12_UAV_DIMENSION_TEXTURE3D = 8, }} ENUM!{ enum D3D_ROOT_SIGNATURE_VERSION { D3D_ROOT_SIGNATURE_VERSION_1 = 1, }} STRUCT!{struct D3D12_BUFFER_RTV { FirstElement: ::UINT64, NumElements: ::UINT, }} STRUCT!{struct D3D12_BUFFER_SRV { FirstElement: ::UINT64, NumElements: ::UINT, StructureByteStride: ::UINT, Flags: ::D3D12_BUFFER_SRV_FLAGS, }} STRUCT!{struct D3D12_BUFFER_UAV { FirstElement: ::UINT64, NumElements: ::UINT, StructureByteStride: ::UINT, CounterOffsetInBytes: ::UINT64, Flags: ::D3D12_BUFFER_UAV_FLAGS, }} STRUCT!{struct D3D12_CLEAR_VALUE { Format: ::DXGI_FORMAT, u: [::FLOAT; 4], }} UNION!(D3D12_CLEAR_VALUE, u, DepthStencil, DepthStencil_mut, ::D3D12_DEPTH_STENCIL_VALUE); UNION!(D3D12_CLEAR_VALUE, u, Color, Color_mut, [::FLOAT; 4]); STRUCT!{struct D3D12_COMMAND_SIGNATURE_DESC { ByteStride: ::UINT, NumArgumentDescs: ::UINT, pArgumentDescs: *const ::D3D12_INDIRECT_ARGUMENT_DESC, NodeMask: ::UINT, }} STRUCT!{struct D3D12_CONSTANT_BUFFER_VIEW_DESC { BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS, SizeInBytes: ::UINT, }} STRUCT!{struct D3D12_CPU_DESCRIPTOR_HANDLE { ptr: ::SIZE_T, }} STRUCT!{struct D3D12_DEPTH_STENCIL_VALUE { Depth: ::FLOAT, Stencil: ::UINT8, }} STRUCT!{struct D3D12_DEPTH_STENCIL_VIEW_DESC { Format: ::DXGI_FORMAT, ViewDimension: ::D3D12_DSV_DIMENSION, Flags: ::D3D12_DSV_FLAGS, u: ::D3D12_TEX1D_ARRAY_DSV, }} UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture2DMSArray, Texture2DMSArray_mut, ::D3D12_TEX2DMS_ARRAY_DSV); UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture2DMS, Texture2DMS_mut, ::D3D12_TEX2DMS_DSV); UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture2DArray, Texture2DArray_mut, ::D3D12_TEX2D_ARRAY_DSV); UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture2D, Texture2D_mut, ::D3D12_TEX2D_DSV); UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture1DArray, Texture1DArray_mut, ::D3D12_TEX1D_ARRAY_DSV); UNION!(D3D12_DEPTH_STENCIL_VIEW_DESC, u, Texture1D, Texture1D_mut, ::D3D12_TEX1D_DSV); STRUCT!{struct D3D12_DESCRIPTOR_HEAP_DESC { Type: ::D3D12_DESCRIPTOR_HEAP_TYPE, NumDescriptors: ::UINT, Flags: ::D3D12_DESCRIPTOR_HEAP_FLAGS, NodeMask: ::UINT, }} STRUCT!{struct D3D12_DESCRIPTOR_RANGE { RangeType: ::D3D12_DESCRIPTOR_RANGE_TYPE, NumDescriptors: ::UINT, BaseShaderRegister: ::UINT, RegisterSpace: ::UINT, OffsetInDescriptorsFromTableStart: ::UINT, }} STRUCT!{struct D3D12_DISCARD_REGION { NumRects: ::UINT, pRects: *const ::D3D12_RECT, FirstSubresource: ::UINT, NumSubresources: ::UINT, }} STRUCT!{struct D3D12_DISPATCH_ARGUMENTS { ThreadGroupCountX: ::UINT, ThreadGroupCountY: ::UINT, ThreadGroupCountZ: ::UINT, }} STRUCT!{struct D3D12_DRAW_ARGUMENTS { VertexCountPerInstance: ::UINT, InstanceCount: ::UINT, StartVertexLocation: ::UINT, StartInstanceLocation: ::UINT, }} STRUCT!{struct D3D12_DRAW_INDEXED_ARGUMENTS { IndexCountPerInstance: ::UINT, InstanceCount: ::UINT, StartIndexLocation: ::UINT, BaseVertexLocation: ::INT, StartInstanceLocation: ::UINT, }} STRUCT!{struct D3D12_FEATURE_DATA_ARCHITECTURE { NodeIndex: ::UINT, TileBasedRenderer: ::BOOL, UMA: ::BOOL, CacheCoherentUMA: ::BOOL, }} STRUCT!{struct D3D12_FEATURE_DATA_FEATURE_LEVELS { NumFeatureLevels: ::UINT, pFeatureLevelsRequested: *const ::D3D_FEATURE_LEVEL, MaxSupportedFeatureLevel: ::D3D_FEATURE_LEVEL, }} STRUCT!{struct D3D12_FEATURE_DATA_FORMAT_INFO { Format: ::DXGI_FORMAT, PlaneCount: ::UINT8, }} STRUCT!{struct D3D12_FEATURE_DATA_FORMAT_SUPPORT { Format: ::DXGI_FORMAT, Support1: ::D3D12_FORMAT_SUPPORT1, Support2: ::D3D12_FORMAT_SUPPORT2, }} STRUCT!{struct D3D12_FEATURE_DATA_GPU_VIRTUAL_ADDRESS_SUPPORT { MaxGPUVirtualAddressBitsPerResource: ::UINT, MaxGPUVirtualAddressBitsPerProcess: ::UINT, }} STRUCT!{struct D3D12_FEATURE_DATA_MULTISAMPLE_QUALITY_LEVELS { Format: ::DXGI_FORMAT, SampleCount: ::UINT, Flags: ::D3D12_MULTISAMPLE_QUALITY_LEVEL_FLAGS, NumQualityLevels: ::UINT, }} STRUCT!{struct D3D12_GPU_DESCRIPTOR_HANDLE { ptr: ::UINT64, }} STRUCT!{struct D3D12_HEAP_DESC { SizeInBytes: ::UINT64, Properties: ::D3D12_HEAP_PROPERTIES, Alignment: ::UINT64, Flags: ::D3D12_HEAP_FLAGS, }} STRUCT!{struct D3D12_HEAP_PROPERTIES { Type: ::D3D12_HEAP_TYPE, CPUPageProperty: ::D3D12_CPU_PAGE_PROPERTY, MemoryPoolPreference: ::D3D12_MEMORY_POOL, CreationNodeMask: ::UINT, VisibleNodeMask: ::UINT, }} STRUCT!{struct D3D12_INDEX_BUFFER_VIEW { BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS, SizeInBytes: ::UINT, Format: ::DXGI_FORMAT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC_VertexBuffer { Slot: ::UINT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC_Constant { RootParameterIndex: ::UINT, DestOffsetIn32BitValues: ::UINT, Num32BitValuesToSet: ::UINT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC_ConstantBufferView { RootParameterIndex: ::UINT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC_ShaderResourceView { RootParameterIndex: ::UINT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC_UnorderedAccessView { RootParameterIndex: ::UINT, }} STRUCT!{struct D3D12_INDIRECT_ARGUMENT_DESC { Type: ::D3D12_INDIRECT_ARGUMENT_TYPE, u: ::D3D12_INDIRECT_ARGUMENT_DESC_Constant, }} UNION!(D3D12_INDIRECT_ARGUMENT_DESC, u, UnorderedAccessView, UnorderedAccessView_mut, D3D12_INDIRECT_ARGUMENT_DESC_UnorderedAccessView); UNION!(D3D12_INDIRECT_ARGUMENT_DESC, u, ShaderResourceView, ShaderResourceView_mut, D3D12_INDIRECT_ARGUMENT_DESC_ShaderResourceView); UNION!(D3D12_INDIRECT_ARGUMENT_DESC, u, ConstantBufferView, ConstantBufferView_mut, D3D12_INDIRECT_ARGUMENT_DESC_ConstantBufferView); UNION!(D3D12_INDIRECT_ARGUMENT_DESC, u, Constant, Constant_mut, D3D12_INDIRECT_ARGUMENT_DESC_Constant); UNION!(D3D12_INDIRECT_ARGUMENT_DESC, u, VertexBuffer, VertexBuffer_mut, D3D12_INDIRECT_ARGUMENT_DESC_VertexBuffer); STRUCT!{struct D3D12_MEMCPY_DEST { pData: *mut ::c_void, RowPitch: ::SIZE_T, SlicePitch: ::SIZE_T, }} STRUCT!{struct D3D12_PACKED_MIP_INFO { NumStandardMips: ::UINT8, NumPackedMips: ::UINT8, NumTilesForPackedMips: ::UINT, StartTileIndexInOverallResource: ::UINT, }} STRUCT!{struct D3D12_PLACED_SUBRESOURCE_FOOTPRINT { Offset: ::UINT64, Footprint: ::D3D12_SUBRESOURCE_FOOTPRINT, }} STRUCT!{struct D3D12_QUERY_DATA_PIPELINE_STATISTICS { IAVertices: ::UINT64, IAPrimitives: ::UINT64, VSInvocations: ::UINT64, GSInvocations: ::UINT64, GSPrimitives: ::UINT64, CInvocations: ::UINT64, CPrimitives: ::UINT64, PSInvocations: ::UINT64, HSInvocations: ::UINT64, DSInvocations: ::UINT64, CSInvocations: ::UINT64, }} STRUCT!{struct D3D12_QUERY_DATA_SO_STATISTICS { NumPrimitivesWritten: ::UINT64, PrimitivesStorageNeeded: ::UINT64, }} STRUCT!{struct D3D12_QUERY_HEAP_DESC { Type: ::D3D12_QUERY_HEAP_TYPE, Count: ::UINT, NodeMask: ::UINT, }} STRUCT!{struct D3D12_RANGE { Begin: ::SIZE_T, End: ::SIZE_T, }} STRUCT!{struct D3D12_RENDER_TARGET_VIEW_DESC { Format: ::DXGI_FORMAT, ViewDimension: ::D3D12_RTV_DIMENSION, u: ::D3D12_BUFFER_RTV, }} UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture3D, Texture3D_mut, ::D3D12_TEX3D_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture2DMSArray, Texture2DMSArray_mut, ::D3D12_TEX2DMS_ARRAY_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture2DMS, Texture2DMS_mut, ::D3D12_TEX2DMS_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture2DArray, Texture2DArray_mut, ::D3D12_TEX2D_ARRAY_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture2D, Texture2D_mut, ::D3D12_TEX2D_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture1DArray, Texture1DArray_mut, ::D3D12_TEX1D_ARRAY_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Texture1D, Texture1D_mut, ::D3D12_TEX1D_RTV); UNION!(D3D12_RENDER_TARGET_VIEW_DESC, u, Buffer, Buffer_mut, ::D3D12_BUFFER_RTV); STRUCT!{struct D3D12_RESOURCE_ALIASING_BARRIER { pResourceBefore: *mut ::ID3D12Resource, pResourceAfter: *mut ::ID3D12Resource, }} STRUCT!{struct D3D12_RESOURCE_ALLOCATION_INFO { SizeInBytes: ::UINT64, Alignment: ::UINT64, }} STRUCT!{struct D3D12_RESOURCE_BARRIER { Type: ::D3D12_RESOURCE_BARRIER_TYPE, Flags: ::D3D12_RESOURCE_BARRIER_FLAGS, u: ::D3D12_RESOURCE_TRANSITION_BARRIER, }} UNION!(D3D12_RESOURCE_BARRIER, u, UAV, UAV_mut, ::D3D12_RESOURCE_UAV_BARRIER); UNION!(D3D12_RESOURCE_BARRIER, u, Aliasing, Aliasing_mut, ::D3D12_RESOURCE_ALIASING_BARRIER); UNION!(D3D12_RESOURCE_BARRIER, u, Transition, Transition_mut, ::D3D12_RESOURCE_TRANSITION_BARRIER); STRUCT!{struct D3D12_RESOURCE_DESC { Dimension: ::D3D12_RESOURCE_DIMENSION, Alignment: ::UINT64, Width: ::UINT64, Height: ::UINT, DepthOrArraySize: ::UINT16, MipLevels: ::UINT16, Format: ::DXGI_FORMAT, SampleDesc: ::DXGI_SAMPLE_DESC, Layout: ::D3D12_TEXTURE_LAYOUT, Flags: ::D3D12_RESOURCE_FLAGS, }} STRUCT!{struct D3D12_RESOURCE_TRANSITION_BARRIER { pResource: *mut ::ID3D12Resource, Subresource: ::UINT, StateBefore: ::D3D12_RESOURCE_STATES, StateAfter: ::D3D12_RESOURCE_STATES, }} STRUCT!{struct D3D12_RESOURCE_UAV_BARRIER { pResource: *mut ::ID3D12Resource, }} STRUCT!{struct D3D12_ROOT_CONSTANTS { ShaderRegister: ::UINT, RegisterSpace: ::UINT, Num32BitValues: ::UINT, }} STRUCT!{struct D3D12_ROOT_DESCRIPTOR { ShaderRegister: ::UINT, RegisterSpace: ::UINT, }} STRUCT!{struct D3D12_ROOT_DESCRIPTOR_TABLE { NumDescriptorRanges: ::UINT, pDescriptorRanges: *const ::D3D12_DESCRIPTOR_RANGE, }} #[cfg(target_pointer_width = "64")] STRUCT!{struct D3D12_ROOT_PARAMETER { ParameterType: ::D3D12_ROOT_PARAMETER_TYPE, u: ::D3D12_ROOT_DESCRIPTOR_TABLE, ShaderVisibility: ::D3D12_SHADER_VISIBILITY, }} #[cfg(target_pointer_width = "32")] STRUCT!{struct D3D12_ROOT_PARAMETER { ParameterType: ::D3D12_ROOT_PARAMETER_TYPE, u: ::D3D12_ROOT_CONSTANTS, ShaderVisibility: ::D3D12_SHADER_VISIBILITY, }} UNION!(D3D12_ROOT_PARAMETER, u, Descriptor, Descriptor_mut, ::D3D12_ROOT_DESCRIPTOR); UNION!(D3D12_ROOT_PARAMETER, u, Constants, Constants_mut, ::D3D12_ROOT_CONSTANTS); UNION!(D3D12_ROOT_PARAMETER, u, DescriptorTable, DescriptorTable_mut, ::D3D12_ROOT_DESCRIPTOR_TABLE); STRUCT!{struct D3D12_ROOT_SIGNATURE_DESC { NumParameters: ::UINT, pParameters: *const ::D3D12_ROOT_PARAMETER, NumStaticSamplers: ::UINT, pStaticSamplers: *const ::D3D12_STATIC_SAMPLER_DESC, Flags: ::D3D12_ROOT_SIGNATURE_FLAGS, }} STRUCT!{struct D3D12_SAMPLER_DESC { Filter: ::D3D12_FILTER, AddressU: ::D3D12_TEXTURE_ADDRESS_MODE, AddressV: ::D3D12_TEXTURE_ADDRESS_MODE, AddressW: ::D3D12_TEXTURE_ADDRESS_MODE, MipLODBias: ::FLOAT, MaxAnisotropy: ::UINT, ComparisonFunc: ::D3D12_COMPARISON_FUNC, BorderColor: [::FLOAT; 4], MinLOD: ::FLOAT, MaxLOD: ::FLOAT, }} STRUCT!{struct D3D12_SHADER_RESOURCE_VIEW_DESC { Format: ::DXGI_FORMAT, ViewDimension: ::D3D12_SRV_DIMENSION, Shader4ComponentMapping: ::UINT, u: ::D3D12_BUFFER_SRV, }} UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, TextureCubeArray, TextureCubeArray_mut, ::D3D12_TEXCUBE_ARRAY_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, TextureCube, TextureCube_mut, ::D3D12_TEXCUBE_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture3D, Texture3D_mut, ::D3D12_TEX3D_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture2DMSArray, Texture2DMSArray_mut, ::D3D12_TEX2DMS_ARRAY_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture2DMS, Texture2DMS_mut, ::D3D12_TEX2DMS_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture2DArray, Texture2DArray_mut, ::D3D12_TEX2D_ARRAY_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture2D, Texture2D_mut, ::D3D12_TEX2D_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture1DArray, Texture1DArray_mut, ::D3D12_TEX1D_ARRAY_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Texture1D, Texture1D_mut, ::D3D12_TEX1D_SRV); UNION!(D3D12_SHADER_RESOURCE_VIEW_DESC, u, Buffer, Buffer_mut, ::D3D12_BUFFER_SRV); STRUCT!{struct D3D12_STATIC_SAMPLER_DESC { Filter: ::D3D12_FILTER, AddressU: ::D3D12_TEXTURE_ADDRESS_MODE, AddressV: ::D3D12_TEXTURE_ADDRESS_MODE, AddressW: ::D3D12_TEXTURE_ADDRESS_MODE, MipLODBias: ::FLOAT, MaxAnisotropy: ::UINT, ComparisonFunc: ::D3D12_COMPARISON_FUNC, BorderColor: ::D3D12_STATIC_BORDER_COLOR, MinLOD: ::FLOAT, MaxLOD: ::FLOAT, ShaderRegister: ::UINT, RegisterSpace: ::UINT, ShaderVisibility: ::D3D12_SHADER_VISIBILITY, }} STRUCT!{struct D3D12_STREAM_OUTPUT_BUFFER_VIEW { BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS, SizeInBytes: ::UINT64, BufferFilledSizeLocation: ::D3D12_GPU_VIRTUAL_ADDRESS, }} STRUCT!{struct D3D12_SUBRESOURCE_DATA { pData: *const ::c_void, RowPitch: ::LONG_PTR, SlicePitch: ::LONG_PTR, }} STRUCT!{struct D3D12_SUBRESOURCE_FOOTPRINT { Format: ::DXGI_FORMAT, Width: ::UINT, Height: ::UINT, Depth: ::UINT, RowPitch: ::UINT, }} STRUCT!{struct D3D12_SUBRESOURCE_INFO { Offset: ::UINT64, RowPitch: ::UINT, DepthPitch: ::UINT, }} STRUCT!{struct D3D12_SUBRESOURCE_TILING { WidthInTiles: ::UINT, HeightInTiles: ::UINT16, DepthInTiles: ::UINT16, StartTileIndexInOverallResource: ::UINT, }} STRUCT!{struct D3D12_TEX1D_ARRAY_DSV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX1D_ARRAY_RTV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX1D_ARRAY_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEX1D_ARRAY_UAV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX1D_DSV { MipSlice: ::UINT, }} STRUCT!{struct D3D12_TEX1D_RTV { MipSlice: ::UINT, }} STRUCT!{struct D3D12_TEX1D_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEX1D_UAV { MipSlice: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_ARRAY_DSV { FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_ARRAY_RTV { FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_ARRAY_SRV { FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_DSV { UnusedField_NothingToDefine: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_RTV { UnusedField_NothingToDefine: ::UINT, }} STRUCT!{struct D3D12_TEX2DMS_SRV { UnusedField_NothingToDefine: ::UINT, }} STRUCT!{struct D3D12_TEX2D_ARRAY_DSV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, }} STRUCT!{struct D3D12_TEX2D_ARRAY_RTV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, PlaneSlice: ::UINT, }} STRUCT!{struct D3D12_TEX2D_ARRAY_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, PlaneSlice: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEX2D_ARRAY_UAV { MipSlice: ::UINT, FirstArraySlice: ::UINT, ArraySize: ::UINT, PlaneSlice: ::UINT, }} STRUCT!{struct D3D12_TEX2D_DSV { MipSlice: ::UINT, }} STRUCT!{struct D3D12_TEX2D_RTV { MipSlice: ::UINT, PlaneSlice: ::UINT, }}<|fim▁hole|>STRUCT!{struct D3D12_TEX2D_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, PlaneSlice: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEX2D_UAV { MipSlice: ::UINT, PlaneSlice: ::UINT, }} STRUCT!{struct D3D12_TEX3D_RTV { MipSlice: ::UINT, FirstWSlice: ::UINT, WSize: ::UINT, }} STRUCT!{struct D3D12_TEX3D_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEX3D_UAV { MipSlice: ::UINT, FirstWSlice: ::UINT, WSize: ::UINT, }} STRUCT!{struct D3D12_TEXCUBE_ARRAY_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, First2DArrayFace: ::UINT, NumCubes: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEXCUBE_SRV { MostDetailedMip: ::UINT, MipLevels: ::UINT, ResourceMinLODClamp: ::FLOAT, }} STRUCT!{struct D3D12_TEXTURE_COPY_LOCATION { pResource: *mut ::ID3D12Resource, Type: ::D3D12_TEXTURE_COPY_TYPE, u: ::D3D12_PLACED_SUBRESOURCE_FOOTPRINT, }} UNION!(D3D12_TEXTURE_COPY_LOCATION, u, SubresourceIndex, SubresourceIndex_mut, ::UINT); UNION!(D3D12_TEXTURE_COPY_LOCATION, u, PlacedFootprint, PlacedFootprint_mut, ::D3D12_PLACED_SUBRESOURCE_FOOTPRINT); STRUCT!{struct D3D12_TILED_RESOURCE_COORDINATE { X: ::UINT, Y: ::UINT, Z: ::UINT, Subresource: ::UINT, }} STRUCT!{struct D3D12_TILE_REGION_SIZE { NumTiles: ::UINT, UseBox: ::BOOL, Width: ::UINT, Height: ::UINT16, Depth: ::UINT16, }} STRUCT!{struct D3D12_TILE_SHAPE { WidthInTexels: ::UINT, HeightInTexels: ::UINT, DepthInTexels: ::UINT, }} STRUCT!{struct D3D12_UNORDERED_ACCESS_VIEW_DESC { Format: ::DXGI_FORMAT, ViewDimension: ::D3D12_UAV_DIMENSION, u: ::D3D12_BUFFER_UAV, }} UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Texture3D, Texture3D_mut, ::D3D12_TEX3D_UAV); UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Texture2DArray, Texture2DArray_mut, ::D3D12_TEX2D_ARRAY_UAV); UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Texture2D, Texture2D_mut, ::D3D12_TEX2D_UAV); UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Texture1DArray, Texture1DArray_mut, ::D3D12_TEX1D_ARRAY_UAV); UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Texture1D, Texture1D_mut, ::D3D12_TEX1D_UAV); UNION!(D3D12_UNORDERED_ACCESS_VIEW_DESC, u, Buffer, Buffer_mut, ::D3D12_BUFFER_UAV); STRUCT!{struct D3D12_VERTEX_BUFFER_VIEW { BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS, SizeInBytes: ::UINT, StrideInBytes: ::UINT, }} RIDL!( interface ID3D12CommandAllocator(ID3D12CommandAllocatorVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn Reset(&mut self) -> ::HRESULT }); RIDL!( interface ID3D12CommandList(ID3D12CommandListVtbl): ID3D12DeviceChild(ID3D12DeviceChildVtbl) { fn GetType(&mut self) -> ::D3D12_COMMAND_LIST_TYPE }); RIDL!( interface ID3D12CommandQueue(ID3D12CommandQueueVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn UpdateTileMappings( &mut self, pResource: *mut ::ID3D12Resource, NumResourceRegions: ::UINT, pResourceRegionStartCoordinates: *const ::D3D12_TILED_RESOURCE_COORDINATE, pResourceRegionSizes: *const ::D3D12_TILE_REGION_SIZE, pHeap: *mut ::ID3D12Heap, NumRanges: ::UINT, pRangeFlags: *const ::D3D12_TILE_RANGE_FLAGS, pHeapRangeStartOffsets: *const ::UINT, pRangeTileCounts: *const ::UINT, Flags: ::D3D12_TILE_MAPPING_FLAGS ) -> (), fn CopyTileMappings( &mut self, pDstResource: *mut ::ID3D12Resource, pDstRegionStartCoordinate: *const ::D3D12_TILED_RESOURCE_COORDINATE, pSrcResource: *mut ::ID3D12Resource, pSrcRegionStartCoordinate: *const ::D3D12_TILED_RESOURCE_COORDINATE, pRegionSize: *const ::D3D12_TILE_REGION_SIZE, Flags: ::D3D12_TILE_MAPPING_FLAGS ) -> (), fn ExecuteCommandLists( &mut self, NumCommandLists: ::UINT, ppCommandLists: *mut *mut ::ID3D12CommandList ) -> (), fn SetMarker( &mut self, Metadata: ::UINT, pData: *const ::c_void, Size: ::UINT ) -> (), fn BeginEvent( &mut self, Metadata: ::UINT, pData: *const ::c_void, Size: ::UINT ) -> (), fn EndEvent(&mut self) -> (), fn Signal( &mut self, pFence: *mut ::ID3D12Fence, Value: ::UINT64 ) -> ::HRESULT, fn Wait( &mut self, pFence: *mut ::ID3D12Fence, Value: ::UINT64 ) -> ::HRESULT, fn GetTimestampFrequency(&mut self, pFrequency: *mut ::UINT64) -> ::HRESULT, fn GetClockCalibration( &mut self, pGpuTimestamp: *mut ::UINT64, pCpuTimestamp: *mut ::UINT64 ) -> ::HRESULT, fn GetDesc( &mut self, __ret_val: *mut ::D3D12_COMMAND_QUEUE_DESC ) -> *mut ::D3D12_COMMAND_QUEUE_DESC }); RIDL!( interface ID3D12CommandSignature(ID3D12CommandSignatureVtbl): ID3D12Pageable(ID3D12PageableVtbl) { }); RIDL!( interface ID3D12DescriptorHeap(ID3D12DescriptorHeapVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn GetDesc( &mut self, __ret_val: *mut ::D3D12_DESCRIPTOR_HEAP_DESC ) -> *mut ::D3D12_DESCRIPTOR_HEAP_DESC, fn GetCPUDescriptorHandleForHeapStart( &mut self, __ret_val: *mut ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> *mut ::D3D12_CPU_DESCRIPTOR_HANDLE, fn GetGPUDescriptorHandleForHeapStart( &mut self, __ret_val: *mut ::D3D12_GPU_DESCRIPTOR_HANDLE ) -> *mut ::D3D12_GPU_DESCRIPTOR_HANDLE }); RIDL!( interface ID3D12Device(ID3D12DeviceVtbl): ID3D12Object(ID3D12ObjectVtbl) { fn GetNodeCount(&mut self) -> ::UINT, fn CreateCommandQueue( &mut self, pDesc: *const ::D3D12_COMMAND_QUEUE_DESC, riid: ::REFGUID, ppCommandQueue: *mut *mut ::c_void ) -> ::HRESULT, fn CreateCommandAllocator( &mut self, type_: ::D3D12_COMMAND_LIST_TYPE, riid: ::REFGUID, ppCommandAllocator: *mut *mut ::c_void ) -> ::HRESULT, fn CreateGraphicsPipelineState( &mut self, pDesc: *const ::D3D12_GRAPHICS_PIPELINE_STATE_DESC, riid: ::REFGUID, ppPipelineState: *mut *mut ::c_void ) -> ::HRESULT, fn CreateComputePipelineState( &mut self, pDesc: *const ::D3D12_COMPUTE_PIPELINE_STATE_DESC, riid: ::REFGUID, ppPipelineState: *mut *mut ::c_void ) -> ::HRESULT, fn CreateCommandList( &mut self, nodeMask: ::UINT, type_: ::D3D12_COMMAND_LIST_TYPE, pCommandAllocator: *mut ::ID3D12CommandAllocator, pInitialState: *mut ::ID3D12PipelineState, riid: ::REFGUID, ppCommandList: *mut *mut ::c_void ) -> ::HRESULT, fn CheckFeatureSupport( &mut self, Feature: ::D3D12_FEATURE, pFeatureSupportData: *mut ::c_void, FeatureSupportDataSize: ::UINT ) -> ::HRESULT, fn CreateDescriptorHeap( &mut self, pDescriptorHeapDesc: *const ::D3D12_DESCRIPTOR_HEAP_DESC, riid: ::REFGUID, ppvHeap: *mut *mut ::c_void ) -> ::HRESULT, fn GetDescriptorHandleIncrementSize( &mut self, DescriptorHeapType: ::D3D12_DESCRIPTOR_HEAP_TYPE ) -> ::UINT, fn CreateRootSignature( &mut self, nodeMask: ::UINT, pBlobWithRootSignature: *const ::c_void, blobLengthInBytes: ::SIZE_T, riid: ::REFGUID, ppvRootSignature: *mut *mut ::c_void ) -> ::HRESULT, fn CreateConstantBufferView( &mut self, pDesc: *const ::D3D12_CONSTANT_BUFFER_VIEW_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CreateShaderResourceView( &mut self, pResource: *mut ::ID3D12Resource, pDesc: *const ::D3D12_SHADER_RESOURCE_VIEW_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CreateUnorderedAccessView( &mut self, pResource: *mut ::ID3D12Resource, pCounterResource: *mut ::ID3D12Resource, pDesc: *const ::D3D12_UNORDERED_ACCESS_VIEW_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CreateRenderTargetView( &mut self, pResource: *mut ::ID3D12Resource, pDesc: *const ::D3D12_RENDER_TARGET_VIEW_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CreateDepthStencilView( &mut self, pResource: *mut ::ID3D12Resource, pDesc: *const ::D3D12_DEPTH_STENCIL_VIEW_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CreateSampler( &mut self, pDesc: *const ::D3D12_SAMPLER_DESC, DestDescriptor: ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn CopyDescriptors( &mut self, NumDestDescriptorRanges: ::UINT, pDestDescriptorRangeStarts: *const ::D3D12_CPU_DESCRIPTOR_HANDLE, pDestDescriptorRangeSizes: *const ::UINT, NumSrcDescriptorRanges: ::UINT, pSrcDescriptorRangeStarts: *const ::D3D12_CPU_DESCRIPTOR_HANDLE, pSrcDescriptorRangeSizes: *const ::UINT, DescriptorHeapsType: ::D3D12_DESCRIPTOR_HEAP_TYPE ) -> (), fn CopyDescriptorsSimple( &mut self, NumDescriptors: ::UINT, DestDescriptorRangeStart: ::D3D12_CPU_DESCRIPTOR_HANDLE, SrcDescriptorRangeStart: ::D3D12_CPU_DESCRIPTOR_HANDLE, DescriptorHeapsType: ::D3D12_DESCRIPTOR_HEAP_TYPE ) -> (), fn GetResourceAllocationInfo( &mut self, visibleMask: ::UINT, numResourceDescs: ::UINT, pResourceDescs: *const ::D3D12_RESOURCE_DESC, __ret_val: *mut ::D3D12_RESOURCE_ALLOCATION_INFO ) -> *mut ::D3D12_RESOURCE_ALLOCATION_INFO, fn GetCustomHeapProperties( &mut self, nodeMask: ::UINT, heapType: ::D3D12_HEAP_TYPE, __ret_val: *mut ::D3D12_HEAP_PROPERTIES ) -> *mut ::D3D12_HEAP_PROPERTIES, fn CreateCommittedResource( &mut self, pHeapProperties: *const ::D3D12_HEAP_PROPERTIES, HeapFlags: ::D3D12_HEAP_FLAGS, pResourceDesc: *const ::D3D12_RESOURCE_DESC, InitialResourceState: ::D3D12_RESOURCE_STATES, pOptimizedClearValue: *const ::D3D12_CLEAR_VALUE, riidResource: ::REFGUID, ppvResource: *mut *mut ::c_void ) -> ::HRESULT, fn CreateHeap( &mut self, pDesc: *const ::D3D12_HEAP_DESC, riid: ::REFGUID, ppvHeap: *mut *mut ::c_void ) -> ::HRESULT, fn CreatePlacedResource( &mut self, pHeap: *mut ::ID3D12Heap, HeapOffset: ::UINT64, pDesc: *const ::D3D12_RESOURCE_DESC, InitialState: ::D3D12_RESOURCE_STATES, pOptimizedClearValue: *const ::D3D12_CLEAR_VALUE, riid: ::REFGUID, ppvResource: *mut *mut ::c_void ) -> ::HRESULT, fn CreateReservedResource( &mut self, pDesc: *const ::D3D12_RESOURCE_DESC, InitialState: ::D3D12_RESOURCE_STATES, pOptimizedClearValue: *const ::D3D12_CLEAR_VALUE, riid: ::REFGUID, ppvResource: *mut *mut ::c_void ) -> ::HRESULT, fn CreateSharedHandle( &mut self, pObject: *mut ::ID3D12DeviceChild, pAttributes: *const ::SECURITY_ATTRIBUTES, Access: ::DWORD, Name: ::LPCWSTR, pHandle: *mut ::HANDLE ) -> ::HRESULT, fn OpenSharedHandle( &mut self, NTHandle: ::HANDLE, riid: ::REFGUID, ppvObj: *mut *mut ::c_void ) -> ::HRESULT, fn OpenSharedHandleByName( &mut self, Name: ::LPCWSTR, Access: ::DWORD, pNTHandle: *mut ::HANDLE ) -> ::HRESULT, fn MakeResident( &mut self, NumObjects: ::UINT, ppObjects: *mut *mut ::ID3D12Pageable ) -> ::HRESULT, fn Evict( &mut self, NumObjects: ::UINT, ppObjects: *mut *mut ::ID3D12Pageable ) -> ::HRESULT, fn CreateFence( &mut self, InitialValue: ::UINT64, Flags: ::D3D12_FENCE_FLAGS, riid: ::REFGUID, ppFence: *mut *mut ::c_void ) -> ::HRESULT, fn GetDeviceRemovedReason(&mut self) -> ::HRESULT, fn GetCopyableFootprints( &mut self, pResourceDesc: *const ::D3D12_RESOURCE_DESC, FirstSubresource: ::UINT, NumSubresources: ::UINT, BaseOffset: ::UINT64, pLayouts: *mut ::D3D12_PLACED_SUBRESOURCE_FOOTPRINT, pNumRows: *mut ::UINT, pRowSizeInBytes: *mut ::UINT64, pTotalBytes: *mut ::UINT64 ) -> (), fn CreateQueryHeap( &mut self, pDesc: *const ::D3D12_QUERY_HEAP_DESC, riid: ::REFGUID, ppvHeap: *mut *mut ::c_void ) -> ::HRESULT, fn SetStablePowerState(&mut self, Enable: ::BOOL) -> ::HRESULT, fn CreateCommandSignature( &mut self, pDesc: *const ::D3D12_COMMAND_SIGNATURE_DESC, pRootSignature: *mut ::ID3D12RootSignature, riid: ::REFGUID, ppvCommandSignature: *mut *mut ::c_void ) -> ::HRESULT, fn GetResourceTiling( &mut self, pTiledResource: *mut ::ID3D12Resource, pNumTilesForEntireResource: *mut ::UINT, pPackedMipDesc: *mut ::D3D12_PACKED_MIP_INFO, pStandardTileShapeForNonPackedMips: *mut ::D3D12_TILE_SHAPE, pNumSubresourceTilings: *mut ::UINT, FirstSubresourceTilingToGet: ::UINT, pSubresourceTilingsForNonPackedMips: *mut ::D3D12_SUBRESOURCE_TILING ) -> (), fn GetAdapterLuid(&mut self, __ret_val: *mut ::LUID) -> *mut ::LUID }); RIDL!( interface ID3D12Fence(ID3D12FenceVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn GetCompletedValue(&mut self) -> ::UINT64, fn SetEventOnCompletion( &mut self, Value: ::UINT64, hEvent: ::HANDLE ) -> ::HRESULT, fn Signal(&mut self, Value: ::UINT64) -> ::HRESULT }); RIDL!( interface ID3D12GraphicsCommandList(ID3D12GraphicsCommandListVtbl): ID3D12CommandList(ID3D12CommandListVtbl) { fn Close(&mut self) -> ::HRESULT, fn Reset( &mut self, pAllocator: *mut ::ID3D12CommandAllocator, pInitialState: *mut ::ID3D12PipelineState ) -> ::HRESULT, fn ClearState(&mut self, pPipelineState: *mut ::ID3D12PipelineState) -> (), fn DrawInstanced( &mut self, VertexCountPerInstance: ::UINT, InstanceCount: ::UINT, StartVertexLocation: ::UINT, StartInstanceLocation: ::UINT ) -> (), fn DrawIndexedInstanced( &mut self, IndexCountPerInstance: ::UINT, InstanceCount: ::UINT, StartIndexLocation: ::UINT, BaseVertexLocation: ::INT, StartInstanceLocation: ::UINT ) -> (), fn Dispatch( &mut self, ThreadGroupCountX: ::UINT, ThreadGroupCountY: ::UINT, ThreadGroupCountZ: ::UINT ) -> (), fn CopyBufferRegion( &mut self, pDstBuffer: *mut ::ID3D12Resource, DstOffset: ::UINT64, pSrcBuffer: *mut ::ID3D12Resource, SrcOffset: ::UINT64, NumBytes: ::UINT64 ) -> (), fn CopyTextureRegion( &mut self, pDst: *const ::D3D12_TEXTURE_COPY_LOCATION, DstX: ::UINT, DstY: ::UINT, DstZ: ::UINT, pSrc: *const ::D3D12_TEXTURE_COPY_LOCATION, pSrcBox: *const ::D3D12_BOX ) -> (), fn CopyResource( &mut self, pDstResource: *mut ::ID3D12Resource, pSrcResource: *mut ::ID3D12Resource ) -> (), fn CopyTiles( &mut self, pTiledResource: *mut ::ID3D12Resource, pTileRegionStartCoordinate: *const ::D3D12_TILED_RESOURCE_COORDINATE, pTileRegionSize: *const ::D3D12_TILE_REGION_SIZE, pBuffer: *mut ::ID3D12Resource, BufferStartOffsetInBytes: ::UINT64, Flags: ::D3D12_TILE_COPY_FLAGS ) -> (), fn ResolveSubresource( &mut self, pDstResource: *mut ::ID3D12Resource, DstSubresource: ::UINT, pSrcResource: *mut ::ID3D12Resource, SrcSubresource: ::UINT, Format: ::DXGI_FORMAT ) -> (), fn IASetPrimitiveTopology( &mut self, PrimitiveTopology: ::D3D12_PRIMITIVE_TOPOLOGY ) -> (), fn RSSetViewports( &mut self, NumViewports: ::UINT, pViewports: *const ::D3D12_VIEWPORT ) -> (), fn RSSetScissorRects( &mut self, NumRects: ::UINT, pRects: *const ::D3D12_RECT ) -> (), fn OMSetBlendFactor(&mut self, BlendFactor: *const [::FLOAT; 4]) -> (), fn OMSetStencilRef(&mut self, StencilRef: ::UINT) -> (), fn SetPipelineState( &mut self, pPipelineState: *mut ::ID3D12PipelineState ) -> (), fn ResourceBarrier( &mut self, NumBarriers: ::UINT, pBarriers: *const ::D3D12_RESOURCE_BARRIER ) -> (), fn ExecuteBundle( &mut self, pCommandList: *mut ::ID3D12GraphicsCommandList ) -> (), fn SetDescriptorHeaps( &mut self, NumDescriptorHeaps: ::UINT, ppDescriptorHeaps: *mut *mut ::ID3D12DescriptorHeap ) -> (), fn SetComputeRootSignature( &mut self, pRootSignature: *mut ::ID3D12RootSignature ) -> (), fn SetGraphicsRootSignature( &mut self, pRootSignature: *mut ::ID3D12RootSignature ) -> (), fn SetComputeRootDescriptorTable( &mut self, RootParameterIndex: ::UINT, BaseDescriptor: ::D3D12_GPU_DESCRIPTOR_HANDLE ) -> (), fn SetGraphicsRootDescriptorTable( &mut self, RootParameterIndex: ::UINT, BaseDescriptor: ::D3D12_GPU_DESCRIPTOR_HANDLE ) -> (), fn SetComputeRoot32BitConstant( &mut self, RootParameterIndex: ::UINT, SrcData: ::UINT, DestOffsetIn32BitValues: ::UINT ) -> (), fn SetGraphicsRoot32BitConstant( &mut self, RootParameterIndex: ::UINT, SrcData: ::UINT, DestOffsetIn32BitValues: ::UINT ) -> (), fn SetComputeRoot32BitConstants( &mut self, RootParameterIndex: ::UINT, Num32BitValuesToSet: ::UINT, pSrcData: *const ::c_void, DestOffsetIn32BitValues: ::UINT ) -> (), fn SetGraphicsRoot32BitConstants( &mut self, RootParameterIndex: ::UINT, Num32BitValuesToSet: ::UINT, pSrcData: *const ::c_void, DestOffsetIn32BitValues: ::UINT ) -> (), fn SetComputeRootConstantBufferView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn SetGraphicsRootConstantBufferView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn SetComputeRootShaderResourceView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn SetGraphicsRootShaderResourceView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn SetComputeRootUnorderedAccessView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn SetGraphicsRootUnorderedAccessView( &mut self, RootParameterIndex: ::UINT, BufferLocation: ::D3D12_GPU_VIRTUAL_ADDRESS ) -> (), fn IASetIndexBuffer( &mut self, pView: *const ::D3D12_INDEX_BUFFER_VIEW ) -> (), fn IASetVertexBuffers( &mut self, StartSlot: ::UINT, NumViews: ::UINT, pViews: *const ::D3D12_VERTEX_BUFFER_VIEW ) -> (), fn SOSetTargets( &mut self, StartSlot: ::UINT, NumViews: ::UINT, pViews: *const ::D3D12_STREAM_OUTPUT_BUFFER_VIEW ) -> (), fn OMSetRenderTargets( &mut self, NumRenderTargetDescriptors: ::UINT, pRenderTargetDescriptors: *const ::D3D12_CPU_DESCRIPTOR_HANDLE, RTsSingleHandleToDescriptorRange: ::BOOL, pDepthStencilDescriptor: *const ::D3D12_CPU_DESCRIPTOR_HANDLE ) -> (), fn ClearDepthStencilView( &mut self, DepthStencilView: ::D3D12_CPU_DESCRIPTOR_HANDLE, ClearFlags: ::D3D12_CLEAR_FLAGS, Depth: ::FLOAT, Stencil: ::UINT8, NumRects: ::UINT, pRects: *const ::D3D12_RECT ) -> (), fn ClearRenderTargetView( &mut self, RenderTargetView: ::D3D12_CPU_DESCRIPTOR_HANDLE, ColorRGBA: *const [::FLOAT; 4], NumRects: ::UINT, pRects: *const ::D3D12_RECT ) -> (), fn ClearUnorderedAccessViewUint( &mut self, ViewGPUHandleInCurrentHeap: ::D3D12_GPU_DESCRIPTOR_HANDLE, ViewCPUHandle: ::D3D12_CPU_DESCRIPTOR_HANDLE, pResource: *mut ::ID3D12Resource, Values: *const [::UINT; 4], NumRects: ::UINT, pRects: *const ::D3D12_RECT ) -> (), fn ClearUnorderedAccessViewFloat( &mut self, ViewGPUHandleInCurrentHeap: ::D3D12_GPU_DESCRIPTOR_HANDLE, ViewCPUHandle: ::D3D12_CPU_DESCRIPTOR_HANDLE, pResource: *mut ::ID3D12Resource, Values: *const [::FLOAT; 4], NumRects: ::UINT, pRects: *const ::D3D12_RECT ) -> (), fn DiscardResource( &mut self, pResource: *mut ::ID3D12Resource, pRegion: *const ::D3D12_DISCARD_REGION ) -> (), fn BeginQuery( &mut self, pQueryHeap: *mut ::ID3D12QueryHeap, Type: ::D3D12_QUERY_TYPE, Index: ::UINT ) -> (), fn EndQuery( &mut self, pQueryHeap: *mut ::ID3D12QueryHeap, Type: ::D3D12_QUERY_TYPE, Index: ::UINT ) -> (), fn ResolveQueryData( &mut self, pQueryHeap: *mut ::ID3D12QueryHeap, Type: ::D3D12_QUERY_TYPE, StartIndex: ::UINT, NumQueries: ::UINT, pDestinationBuffer: *mut ::ID3D12Resource, AlignedDestinationBufferOffset: ::UINT64 ) -> (), fn SetPredication( &mut self, pBuffer: *mut ::ID3D12Resource, AlignedBufferOffset: ::UINT64, Operation: ::D3D12_PREDICATION_OP ) -> (), fn SetMarker( &mut self, Metadata: ::UINT, pData: *const ::c_void, Size: ::UINT ) -> (), fn BeginEvent( &mut self, Metadata: ::UINT, pData: *const ::c_void, Size: ::UINT ) -> (), fn EndEvent(&mut self) -> (), fn ExecuteIndirect( &mut self, pCommandSignature: *mut ::ID3D12CommandSignature, MaxCommandCount: ::UINT, pArgumentBuffer: *mut ::ID3D12Resource, ArgumentBufferOffset: ::UINT64, pCountBuffer: *mut ::ID3D12Resource, CountBufferOffset: ::UINT64 ) -> () }); RIDL!( interface ID3D12Heap(ID3D12HeapVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn GetDesc( &mut self, __ret_val: *mut ::D3D12_HEAP_DESC ) -> *mut ::D3D12_HEAP_DESC }); RIDL!( interface ID3D12Pageable(ID3D12PageableVtbl): ID3D12DeviceChild(ID3D12DeviceChildVtbl) { }); RIDL!( interface ID3D12PipelineState(ID3D12PipelineStateVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn GetCachedBlob(&mut self, ppBlob: *mut *mut ::ID3DBlob) -> ::HRESULT }); RIDL!( interface ID3D12QueryHeap(ID3D12QueryHeapVtbl): ID3D12Pageable(ID3D12PageableVtbl) { }); RIDL!( interface ID3D12Resource(ID3D12ResourceVtbl): ID3D12Pageable(ID3D12PageableVtbl) { fn Map( &mut self, Subresource: ::UINT, pReadRange: *const ::D3D12_RANGE, ppData: *mut *mut ::c_void ) -> ::HRESULT, fn Unmap( &mut self, Subresource: ::UINT, pWrittenRange: *const ::D3D12_RANGE ) -> (), fn GetDesc( &mut self, __ret_val: *mut ::D3D12_RESOURCE_DESC ) -> *mut ::D3D12_RESOURCE_DESC, fn GetGPUVirtualAddress(&mut self) -> ::D3D12_GPU_VIRTUAL_ADDRESS, fn WriteToSubresource( &mut self, DstSubresource: ::UINT, pDstBox: *const ::D3D12_BOX, pSrcData: *const ::c_void, SrcRowPitch: ::UINT, SrcDepthPitch: ::UINT ) -> ::HRESULT, fn ReadFromSubresource( &mut self, pDstData: *mut ::c_void, DstRowPitch: ::UINT, DstDepthPitch: ::UINT, SrcSubresource: ::UINT, pSrcBox: *const ::D3D12_BOX ) -> ::HRESULT, fn GetHeapProperties( &mut self, pHeapProperties: *mut ::D3D12_HEAP_PROPERTIES, pHeapFlags: *mut ::D3D12_HEAP_FLAGS ) -> ::HRESULT }); RIDL!( interface ID3D12RootSignatureDeserializer(ID3D12RootSignatureDeserializerVtbl): IUnknown(IUnknownVtbl) { fn GetRootSignatureDesc(&mut self) -> *const ::D3D12_ROOT_SIGNATURE_DESC }); pub type PFN_D3D12_CREATE_DEVICE = extern "system" fn (_ : *mut ::IUnknown, _ : ::D3D_FEATURE_LEVEL, _ : ::REFGUID, _ : *mut *mut ::c_void) -> ::HRESULT; pub type PFN_D3D12_CREATE_ROOT_SIGNATURE_DESERIALIZER = extern "system" fn (pSrcData: ::LPCVOID, SrcDataSizeInBytes: ::SIZE_T, pRootSignatureDeserializerInterface: ::REFGUID, ppRootSignatureDeserializer: *mut *mut ::c_void) -> ::HRESULT; pub type PFN_D3D12_GET_DEBUG_INTERFACE = extern "system" fn (_ : ::REFGUID, _ : *mut *mut ::c_void) -> ::HRESULT; pub type PFN_D3D12_SERIALIZE_ROOT_SIGNATURE = extern "system" fn (pRootSignature: *const ::D3D12_ROOT_SIGNATURE_DESC, Version: ::D3D_ROOT_SIGNATURE_VERSION, ppBlob: *mut *mut ::ID3DBlob, ppErrorBlob: *mut *mut ::ID3DBlob) -> ::HRESULT;<|fim▁end|>
<|file_name|>import.py<|end_file_name|><|fim▁begin|>import sys from optparse import make_option<|fim▁hole|>from django.core.management.base import BaseCommand, CommandError from django.db import IntegrityError, transaction from django.contrib.auth.models import User from annotate.models import * class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--run', action='store_true', default=False, help='') pass @transaction.atomic def handle(self, *args, **options): #hours = options['hours'] + 24 * options['days'] #check_and_import(hours, not options['run'], options['check']) run = options['run'] for line in sys.stdin: fs = line.strip().split('\t') if len(fs) == 1: path = fs[0] meta = path elif len(fs) == 2: path, meta = fs else: raise Exception("bad line: %s" % line) if run: Image.objects.create(path = path, meta = meta) else: print(path, meta) pass pass pass<|fim▁end|>
<|file_name|>io.rs<|end_file_name|><|fim▁begin|>use std::sync::Arc; use std::collections::HashMap; use std::ops::{Deref, DerefMut}; use serde_json; use futures::{self, future, Future}; use calls::{RemoteProcedure, Metadata, RpcMethodSimple, RpcMethod, RpcNotificationSimple, RpcNotification}; use middleware::{self, Middleware}; use types::{Error, ErrorCode, Version}; use types::{Request, Response, Call, Output}; /// A type representing middleware or RPC response before serialization. pub type FutureResponse = Box<Future<Item=Option<Response>, Error=()> + Send>; /// A type representing future string response. pub type FutureResult<F> = future::Map< future::Either<future::FutureResult<Option<Response>, ()>, F>, fn(Option<Response>) -> Option<String>, >; /// A type representing a result of a single method call. pub type FutureOutput = future::Either< Box<Future<Item=Option<Output>, Error=()> + Send>, future::FutureResult<Option<Output>, ()>, >; /// `IoHandler` json-rpc protocol compatibility #[derive(Debug, Clone, Copy)] pub enum Compatibility { /// Compatible only with JSON-RPC 1.x V1, /// Compatible only with JSON-RPC 2.0 V2, /// Compatible with both Both, } impl Default for Compatibility { fn default() -> Self { Compatibility::V2 } } impl Compatibility { fn is_version_valid(&self, version: Option<Version>) -> bool { match (*self, version) { (Compatibility::V1, None) | (Compatibility::V2, Some(Version::V2)) | (Compatibility::Both, _) => true, _ => false, } } fn default_version(&self) -> Option<Version> { match *self { Compatibility::V1 => None, Compatibility::V2 | Compatibility::Both => Some(Version::V2), } } } /// Request handler /// /// By default compatible only with jsonrpc v2 #[derive(Debug)] pub struct MetaIoHandler<T: Metadata, S: Middleware<T> = middleware::Noop> { middleware: S, compatibility: Compatibility, methods: HashMap<String, RemoteProcedure<T>>, } impl<T: Metadata> Default for MetaIoHandler<T> { fn default() -> Self { MetaIoHandler::with_compatibility(Default::default()) } } impl<T: Metadata> MetaIoHandler<T> { /// Creates new `MetaIoHandler` compatible with specified protocol version. pub fn with_compatibility(compatibility: Compatibility) -> Self { MetaIoHandler { compatibility: compatibility, middleware: Default::default(), methods: Default::default(), } } } impl<T: Metadata, S: Middleware<T>> MetaIoHandler<T, S> { /// Creates new `MetaIoHandler` pub fn new(compatibility: Compatibility, middleware: S) -> Self { MetaIoHandler { compatibility: compatibility, middleware: middleware, methods: Default::default(), } } /// Creates new `MetaIoHandler` with specified middleware. pub fn with_middleware(middleware: S) -> Self { MetaIoHandler { compatibility: Default::default(), middleware: middleware, methods: Default::default(), } } /// Adds an alias to a method. pub fn add_alias(&mut self, alias: &str, other: &str) { self.methods.insert( alias.into(), RemoteProcedure::Alias(other.into()), ); } /// Adds new supported asynchronous method pub fn add_method<F>(&mut self, name: &str, method: F) where F: RpcMethodSimple, { self.add_method_with_meta(name, move |params, _meta| { method.call(params) }) } /// Adds new supported notification pub fn add_notification<F>(&mut self, name: &str, notification: F) where F: RpcNotificationSimple, { self.add_notification_with_meta(name, move |params, _meta| notification.execute(params)) } /// Adds new supported asynchronous method with metadata support. pub fn add_method_with_meta<F>(&mut self, name: &str, method: F) where F: RpcMethod<T>, { self.methods.insert( name.into(), RemoteProcedure::Method(Arc::new(method)), ); } /// Adds new supported notification with metadata support.<|fim▁hole|> pub fn add_notification_with_meta<F>(&mut self, name: &str, notification: F) where F: RpcNotification<T>, { self.methods.insert( name.into(), RemoteProcedure::Notification(Arc::new(notification)), ); } /// Extend this `MetaIoHandler` with methods defined elsewhere. pub fn extend_with<F>(&mut self, methods: F) where F: Into<HashMap<String, RemoteProcedure<T>>> { self.methods.extend(methods.into()) } /// Handle given request synchronously - will block until response is available. /// If you have any asynchronous methods in your RPC it is much wiser to use /// `handle_request` instead and deal with asynchronous requests in a non-blocking fashion. pub fn handle_request_sync(&self, request: &str, meta: T) -> Option<String> { self.handle_request(request, meta).wait().expect("Handler calls can never fail.") } /// Handle given request asynchronously. pub fn handle_request(&self, request: &str, meta: T) -> FutureResult<S::Future> { use self::future::Either::{A, B}; fn as_string(response: Option<Response>) -> Option<String> { let res = response.map(write_response); debug!(target: "rpc", "Response: {}.", match res { Some(ref res) => res, None => "None", }); res } trace!(target: "rpc", "Request: {}.", request); let request = read_request(request); let result = match request { Err(error) => A(futures::finished(Some(Response::from(error, self.compatibility.default_version())))), Ok(request) => B(self.handle_rpc_request(request, meta)), }; result.map(as_string) } /// Handle deserialized RPC request. pub fn handle_rpc_request(&self, request: Request, meta: T) -> S::Future { use self::future::Either::{A, B}; self.middleware.on_request(request, meta, |request, meta| match request { Request::Single(call) => { A(self.handle_call(call, meta).map(|output| output.map(Response::Single))) }, Request::Batch(calls) => { let futures: Vec<_> = calls.into_iter().map(move |call| self.handle_call(call, meta.clone())).collect(); B(futures::future::join_all(futures).map(|outs| { let outs: Vec<_> = outs.into_iter().filter_map(|v| v).collect(); if outs.is_empty() { None } else { Some(Response::Batch(outs)) } })) }, }) } /// Handle single call asynchronously. pub fn handle_call(&self, call: Call, meta: T) -> FutureOutput { use self::future::Either::{A, B}; match call { Call::MethodCall(method) => { let params = method.params; let id = method.id; let jsonrpc = method.jsonrpc; let valid_version = self.compatibility.is_version_valid(jsonrpc); let call_method = |method: &Arc<RpcMethod<T>>| { let method = method.clone(); futures::lazy(move || method.call(params, meta)) }; let result = match (valid_version, self.methods.get(&method.method)) { (false, _) => Err(Error::invalid_version()), (true, Some(&RemoteProcedure::Method(ref method))) => Ok(call_method(method)), (true, Some(&RemoteProcedure::Alias(ref alias))) => match self.methods.get(alias) { Some(&RemoteProcedure::Method(ref method)) => Ok(call_method(method)), _ => Err(Error::method_not_found()), }, (true, _) => Err(Error::method_not_found()), }; match result { Ok(result) => A(Box::new( result.then(move |result| futures::finished(Some(Output::from(result, id, jsonrpc)))) )), Err(err) => B(futures::finished(Some(Output::from(Err(err), id, jsonrpc)))), } }, Call::Notification(notification) => { let params = notification.params; let jsonrpc = notification.jsonrpc; if !self.compatibility.is_version_valid(jsonrpc) { return B(futures::finished(None)); } match self.methods.get(&notification.method) { Some(&RemoteProcedure::Notification(ref notification)) => { notification.execute(params, meta); }, Some(&RemoteProcedure::Alias(ref alias)) => { if let Some(&RemoteProcedure::Notification(ref notification)) = self.methods.get(alias) { notification.execute(params, meta); } }, _ => {}, } B(futures::finished(None)) }, Call::Invalid { id } => { B(futures::finished(Some(Output::invalid_request(id, self.compatibility.default_version())))) }, } } } /// Simplified `IoHandler` with no `Metadata` associated with each request. #[derive(Debug, Default)] pub struct IoHandler<M: Metadata = ()>(MetaIoHandler<M>); // Type inference helper impl IoHandler { /// Creates new `IoHandler` without any metadata. pub fn new() -> Self { IoHandler::default() } /// Creates new `IoHandler` without any metadata compatible with specified protocol version. pub fn with_compatibility(compatibility: Compatibility) -> Self { IoHandler(MetaIoHandler::with_compatibility(compatibility)) } } impl<M: Metadata + Default> IoHandler<M> { /// Handle given string request asynchronously. pub fn handle_request(&self, request: &str) -> FutureResult<FutureResponse> { self.0.handle_request(request, M::default()) } /// Handle deserialized RPC request asynchronously. pub fn handle_rpc_request(&self, request: Request) -> FutureResponse { self.0.handle_rpc_request(request, M::default()) } /// Handle single Call asynchronously. pub fn handle_call(&self, call: Call) -> FutureOutput { self.0.handle_call(call, M::default()) } /// Handle given request synchronously - will block until response is available. /// If you have any asynchronous methods in your RPC it is much wiser to use /// `handle_request` instead and deal with asynchronous requests in a non-blocking fashion. pub fn handle_request_sync(&self, request: &str) -> Option<String> { self.0.handle_request_sync(request, M::default()) } } impl<M: Metadata> Deref for IoHandler<M> { type Target = MetaIoHandler<M>; fn deref(&self) -> &Self::Target { &self.0 } } impl<M: Metadata> DerefMut for IoHandler<M> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl From<IoHandler> for MetaIoHandler<()> { fn from(io: IoHandler) -> Self { io.0 } } fn read_request(request_str: &str) -> Result<Request, Error> { serde_json::from_str(request_str).map_err(|_| Error::new(ErrorCode::ParseError)) } fn write_response(response: Response) -> String { // this should never fail serde_json::to_string(&response).unwrap() } #[cfg(test)] mod tests { use futures; use types::{Value}; use super::{IoHandler, Compatibility}; #[test] fn test_io_handler() { let mut io = IoHandler::new(); io.add_method("say_hello", |_| { Ok(Value::String("hello".to_string())) }); let request = r#"{"jsonrpc": "2.0", "method": "say_hello", "params": [42, 23], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"hello","id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_string())); } #[test] fn test_io_handler_1dot0() { let mut io = IoHandler::with_compatibility(Compatibility::Both); io.add_method("say_hello", |_| { Ok(Value::String("hello".to_string())) }); let request = r#"{"method": "say_hello", "params": [42, 23], "id": 1}"#; let response = r#"{"result":"hello","id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_string())); } #[test] fn test_async_io_handler() { let mut io = IoHandler::new(); io.add_method("say_hello", |_| { futures::finished(Value::String("hello".to_string())) }); let request = r#"{"jsonrpc": "2.0", "method": "say_hello", "params": [42, 23], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"hello","id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_string())); } #[test] fn test_notification() { use std::sync::Arc; use std::sync::atomic; let mut io = IoHandler::new(); let called = Arc::new(atomic::AtomicBool::new(false)); let c = called.clone(); io.add_notification("say_hello", move |_| { c.store(true, atomic::Ordering::SeqCst); }); let request = r#"{"jsonrpc": "2.0", "method": "say_hello", "params": [42, 23]}"#; assert_eq!(io.handle_request_sync(request), None); assert_eq!(called.load(atomic::Ordering::SeqCst), true); } #[test] fn test_method_not_found() { let io = IoHandler::new(); let request = r#"{"jsonrpc": "2.0", "method": "say_hello", "params": [42, 23], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","error":{"code":-32601,"message":"Method not found"},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_string())); } #[test] fn test_method_alias() { let mut io = IoHandler::new(); io.add_method("say_hello", |_| { Ok(Value::String("hello".to_string())) }); io.add_alias("say_hello_alias", "say_hello"); let request = r#"{"jsonrpc": "2.0", "method": "say_hello_alias", "params": [42, 23], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"hello","id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_string())); } #[test] fn test_notification_alias() { use std::sync::Arc; use std::sync::atomic; let mut io = IoHandler::new(); let called = Arc::new(atomic::AtomicBool::new(false)); let c = called.clone(); io.add_notification("say_hello", move |_| { c.store(true, atomic::Ordering::SeqCst); }); io.add_alias("say_hello_alias", "say_hello"); let request = r#"{"jsonrpc": "2.0", "method": "say_hello_alias", "params": [42, 23]}"#; assert_eq!(io.handle_request_sync(request), None); assert_eq!(called.load(atomic::Ordering::SeqCst), true); } #[test] fn test_send_sync() { fn is_send_sync<T>(_obj: T) -> bool where T: Send + Sync { true } let io = IoHandler::new(); assert!(is_send_sync(io)) } }<|fim▁end|>
<|file_name|>calculator.rs<|end_file_name|><|fim▁begin|>// Calculator trait and a couple of basic implementations. // // Eli Bendersky [https://eli.thegreenplace.net] // This code is in the public domain. pub trait Calculator { fn new() -> Self; fn add(&self, a: u32, b: u32) -> u32; } pub struct Foo {}<|fim▁hole|>impl Calculator for Foo { fn new() -> Self { Self {} } fn add(&self, a: u32, b: u32) -> u32 { a + b } } pub struct Bar {} impl Calculator for Bar { fn new() -> Self { Self {} } fn add(&self, a: u32, b: u32) -> u32 { if b == 0 { a } else { self.add(a, b - 1) + 1 } } }<|fim▁end|>
<|file_name|>Secure but True .cpp<|end_file_name|><|fim▁begin|>// // Created by 孙启龙 on 2017/8/18. // #include<cstdio> #include<iostream> #include<cstring> #include<algorithm> #include<cmath> #include<vector> #include<queue> #include<map> #include<set> #include<ctime> using namespace std; typedef long long ll; #define INF 0x3f3f3f3f #define maxn 1111 char c[] = {'A', 'H', 'I', 'M', 'O', 'T', 'U', 'V', 'W', 'X', 'Y'}, s[maxn]; int T, k, n, a[111]; void solve() { ll temp = 0; int flag = 0; for (int i = n - 1; i >= 0; i--) { temp = 11ll * temp + 10 - a[s[i]]; if (temp >= k) { flag = 1; break; } } if (!flag) { k -= temp + 1; n++;<|fim▁hole|> for (int i = 0; i < n; i++) s[i] = 'A'; solve(); } else { int up = 0; for (int i = 0; i < n; i++) { up += a[s[i]] + k % 11; s[i] = c[up % 11]; up /= 11, k /= 11; } for (int i = n - 1; i >= 0; i--) printf("%c", s[i]); printf("\n"); } } int main() { for (int i = 0; i < 11; i++) a[c[i]] = i; scanf("%d", &T); while (T--) { scanf("%d %s", &k, s); n = strlen(s); for (int i = 0, j = n - 1; i < j; i++, j--) swap(s[i], s[j]); solve(); } return 0; }<|fim▁end|>
<|file_name|>get_model_evaluation_text_classification_sample.py<|end_file_name|><|fim▁begin|># Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # [START aiplatform_get_model_evaluation_text_classification_sample] from google.cloud import aiplatform def get_model_evaluation_text_classification_sample( project: str, model_id: str, evaluation_id: str, location: str = "us-central1", api_endpoint: str = "us-central1-aiplatform.googleapis.com", ): """ To obtain evaluation_id run the following commands where LOCATION is the region where the model is stored, PROJECT is the project ID, and MODEL_ID is the ID of your model. model_client = aiplatform.gapic.ModelServiceClient( client_options={ 'api_endpoint':'LOCATION-aiplatform.googleapis.com' } ) evaluations = model_client.list_model_evaluations(parent='projects/PROJECT/locations/LOCATION/models/MODEL_ID') print("evaluations:", evaluations) """ # The AI Platform services require regional API endpoints. client_options = {"api_endpoint": api_endpoint} # Initialize client that will be used to create and send requests. # This client only needs to be created once, and can be reused for multiple requests. client = aiplatform.gapic.ModelServiceClient(client_options=client_options) name = client.model_evaluation_path( project=project, location=location, model=model_id, evaluation=evaluation_id )<|fim▁hole|># [END aiplatform_get_model_evaluation_text_classification_sample]<|fim▁end|>
response = client.get_model_evaluation(name=name) print("response:", response)
<|file_name|>ui.py<|end_file_name|><|fim▁begin|>import os from sublime import active_window from sublime import find_resources from sublime import load_settings from sublime import save_settings import sublime_plugin def _load_preferences(): return load_settings('Preferences.sublime-settings') def _save_preferences(): return save_settings('Preferences.sublime-settings') class ClearWindowCommand(sublime_plugin.WindowCommand): def run(self): if self.window.is_sidebar_visible(): self.window.set_sidebar_visible(False) if self.window.is_minimap_visible(): self.window.set_minimap_visible(False) if self.window.is_menu_visible(): self.window.set_menu_visible(False) if self.window.is_status_bar_visible(): self.window.set_status_bar_visible(False) self.window.run_command('resize_groups_almost_equally') preferences = _load_preferences() preferences.set('indent_guide_options', []) preferences.set('line_numbers', False) preferences.set('draw_white_space', 'selection') preferences.set('rulers', []) _save_preferences() self.window.run_command('sort_user_settings') class EnableColorSchemeCommand(sublime_plugin.ApplicationCommand): def run(self): self.color_schemes = [] for color_scheme in find_resources('*.tmTheme'): ignore = False<|fim▁hole|> if not ignore: self.color_schemes.append(color_scheme) if len(self.color_schemes) > 1: color_scheme = _load_preferences().get('color_scheme') if color_scheme not in self.color_schemes: self.color_schemes.insert(0, color_scheme) self.window = active_window() self.window.show_quick_panel( self.color_schemes, self.on_done, 0, self.color_schemes.index(color_scheme), self.on_select ) def on_select(self, index): if index == -1: return color_scheme = self.color_schemes[index] for group in range(0, self.window.num_groups()): active_view_in_group = self.window.active_view_in_group(group) if active_view_in_group: active_view_in_group.settings().set('color_scheme', color_scheme) def on_done(self, index): if index == -1: for view in self.window.views(): view.settings().erase('color_scheme') return color_scheme = self.color_schemes[index] preferences = _load_preferences() preferences.set('color_scheme', color_scheme) _save_preferences() for view in self.window.views(): view.settings().erase('color_scheme') class EnableThemeCommand(sublime_plugin.ApplicationCommand): def run(self): self.themes = [] for theme in find_resources('*.sublime-theme'): ignore = False for exclude in ['Addon', 'tests']: if exclude in theme: ignore = True if not ignore: self.themes.append(os.path.basename(theme)) if len(self.themes) > 1: active_window().show_quick_panel(self.themes, self.on_done) def on_done(self, index): if index == -1: return theme = self.themes[index] preferences = _load_preferences() preferences.set('theme', theme) _save_preferences() class OverlayOpenFileCommand(sublime_plugin.WindowCommand): """Open File; Inspired by Vim CtrlP (https://kien.github.io/ctrlp.vim).""" def run(self, tab=None, split=None, vsplit=None): """ Open file from overlay. :param tab: Open the selected file in a new tab :param split: Open the selected file in a horizontal split :param vsplit: Open the selected file in a vertical split Defaults to opening in a new tab. """ transient_view = self.window.transient_view_in_group(self.window.active_group()) if not transient_view: return fname = transient_view.file_name() if not fname: return if vsplit: self.open_file_in_vertical_split(fname) elif split: self.open_file_in_horizontal_split(fname) elif tab: self.open_file_in_tab(fname) else: self.open_file_in_tab(fname) self.window.run_command('hide_overlay') def is_enabled(self): view = self.window.active_view() if view: return bool(view.settings().get('polyfill.experimental_features')) return False def open_file_in_vertical_split(self, fname): self.window.open_file(fname) self.window.run_command('create_pane_with_file', {'direction': 'right'}) def open_file_in_horizontal_split(self, fname): self.window.open_file(fname) self.window.run_command('create_pane_with_file', {'direction': 'down'}) def open_file_in_tab(self, fname): self.window.open_file(fname) class PolyfillSetLayoutCommand(sublime_plugin.WindowCommand): def run(self, cols, rows, cells): num_groups_before = self.window.num_groups() active_group_before = self.window.active_group() self.window.run_command('set_layout', { 'cols': cols, 'rows': rows, 'cells': cells }) if num_groups_before == self.window.num_groups(): # Fix issue where group focus moves when it probably shouldn't. # When the layout is not changed then the focus shouldn't change # either. Previously, if the active view before the layout change # is transient ST would move the cursor focus to a group with a # non-transient view. This can be disorienting and interrupt flow # because where the cursor focus has moved to is not always clear. self.window.focus_group(active_group_before) return if len(self.window.views_in_group(active_group_before)) < 2: # Only move the active view before layout change to the new group # if it doesn't leave the previous group without any views. return view = self.window.active_view_in_group(active_group_before) self.window.set_view_index(view, self.window.active_group(), 0) class ResetWindowCommand(sublime_plugin.WindowCommand): def run(self): self.window.run_command('reset_font_size') view = self.window.active_view() font_size = view.settings().get('font_size_default') if view else None if font_size: preferences = _load_preferences() preferences.set('font_size', font_size) _save_preferences() if not self.window.is_sidebar_visible(): self.window.set_sidebar_visible(True) if not self.window.is_minimap_visible(): self.window.set_minimap_visible(True) if not self.window.is_menu_visible(): self.window.set_menu_visible(True) if not self.window.is_status_bar_visible(): self.window.set_status_bar_visible(True) self.window.run_command('resize_groups_almost_equally') class ResizeGroupsAlmostEquallyCommand(sublime_plugin.WindowCommand): """ Resize groups equally. Make all groups (almost) equally high and wide, but use 'winheight' and 'winwidth' for the current window. Windows with 'winfixheight' set keep their height and windows with 'winfixwidth' set keep their width. @xxx winheight option @xxx winwidth option @xxx winfixheight option @xxx winfixwidth option """ def run(self): layout = self.window.layout() col_count = len(layout['cols']) row_count = len(layout['rows']) def equalise(count): size = round(1.0 / (count - 1), 2) vals = [0.0] for i in range(1, count - 1): vals.append(round(size * i, 2)) vals.append(1.0) return vals if col_count > 2: layout['cols'] = equalise(col_count) if row_count > 2: layout['rows'] = equalise(row_count) if col_count > 2 or row_count > 2: self.window.set_layout(layout)<|fim▁end|>
for exclude in ['(SL)', 'Color Highlighter', 'tests']: if exclude in color_scheme: ignore = True
<|file_name|>QuerySessionReportType.java<|end_file_name|><|fim▁begin|>/** * SAHARA Scheduling Server * * Schedules and assigns local laboratory rigs. * * @license See LICENSE in the top level directory for complete license terms. * * Copyright (c) 2009, University of Technology, Sydney * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the University of Technology, Sydney nor the names * of its contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * @author Tania Machet (tmachet) * @date 13 December 2010 */ package au.edu.uts.eng.remotelabs.schedserver.reports.intf.types; import java.io.Serializable; import java.util.ArrayList; import java.util.Calendar; import javax.xml.namespace.QName; import javax.xml.stream.XMLStreamException; import javax.xml.stream.XMLStreamReader; import javax.xml.stream.XMLStreamWriter; import org.apache.axiom.om.OMConstants; import org.apache.axiom.om.OMDataSource; import org.apache.axiom.om.OMElement; import org.apache.axiom.om.OMFactory; import org.apache.axiom.om.impl.llom.OMSourcedElementImpl; import org.apache.axis2.databinding.ADBBean; import org.apache.axis2.databinding.ADBDataSource; import org.apache.axis2.databinding.ADBException; import org.apache.axis2.databinding.utils.BeanUtil; import org.apache.axis2.databinding.utils.ConverterUtil; import org.apache.axis2.databinding.utils.reader.ADBXMLStreamReaderImpl; import org.apache.axis2.databinding.utils.writer.MTOMAwareXMLStreamWriter; /** * QuerySessionReportType bean class. */ public class QuerySessionReportType implements ADBBean { /* * This type was generated from the piece of schema that had * name = QuerySessionReportType * Namespace URI = http://remotelabs.eng.uts.edu.au/schedserver/reports * Namespace Prefix = ns1 */ private static final long serialVersionUID = -5121246029757741056L; private static String generatePrefix(final String namespace) { if (namespace.equals("http://remotelabs.eng.uts.edu.au/schedserver/reports")) { return "ns1"; } return BeanUtil.getUniquePrefix(); } protected RequestorType requestor; public RequestorType getRequestor() { return this.requestor; } public void setRequestor(final RequestorType param) { this.requestor = param; } protected QueryFilterType querySelect; public QueryFilterType getQuerySelect() { return this.querySelect; } public void setQuerySelect(final QueryFilterType param) { this.querySelect = param; } protected QueryFilterType queryConstraints; protected boolean queryConstraintsTracker = false; public QueryFilterType getQueryConstraints() { return this.queryConstraints; } public void setQueryConstraints(final QueryFilterType param) { this.queryConstraints = param; this.queryConstraintsTracker = param != null; } protected Calendar startTime; protected boolean startTimeTracker = false; public Calendar getStartTime() { return this.startTime; } public void setStartTime(final Calendar param) { this.startTime = param; this.startTimeTracker = param != null; } protected Calendar endTime; protected boolean endTimeTracker = false; public Calendar getEndTime() { return this.endTime; } public void setEndTime(final Calendar param) { this.endTime = param; this.endTimeTracker = param != null; } protected PaginationType pagination; protected boolean paginationTracker = false; public PaginationType getPagination() { return this.pagination; } public void setPagination(final PaginationType param) { this.pagination = param; this.paginationTracker = param != null; } public static boolean isReaderMTOMAware(final XMLStreamReader reader) { boolean isReaderMTOMAware = false; try { isReaderMTOMAware = Boolean.TRUE.equals(reader.getProperty(OMConstants.IS_DATA_HANDLERS_AWARE)); } catch (final IllegalArgumentException e) { isReaderMTOMAware = false; } return isReaderMTOMAware; } public OMElement getOMElement(final QName parentQName, final OMFactory factory) throws ADBException { final OMDataSource dataSource = new ADBDataSource(this, parentQName) { @Override public void serialize(final MTOMAwareXMLStreamWriter xmlWriter) throws XMLStreamException { QuerySessionReportType.this.serialize(this.parentQName, factory, xmlWriter); } }; return new OMSourcedElementImpl(parentQName, factory, dataSource); } @Override public void serialize(final QName parentQName, final OMFactory factory, final MTOMAwareXMLStreamWriter xmlWriter) throws XMLStreamException, ADBException { this.serialize(parentQName, factory, xmlWriter, false); } @Override public void serialize(final QName parentQName, final OMFactory factory, final MTOMAwareXMLStreamWriter xmlWriter, final boolean serializeType) throws XMLStreamException, ADBException { String prefix = parentQName.getPrefix(); String namespace = parentQName.getNamespaceURI(); if ((namespace != null) && (namespace.trim().length() > 0)) { final String writerPrefix = xmlWriter.getPrefix(namespace); if (writerPrefix != null) { xmlWriter.writeStartElement(namespace, parentQName.getLocalPart()); } else { if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); } xmlWriter.writeStartElement(prefix, parentQName.getLocalPart(), namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } } else { xmlWriter.writeStartElement(parentQName.getLocalPart()); } if (serializeType) { final String namespacePrefix = this.registerPrefix(xmlWriter, "http://remotelabs.eng.uts.edu.au/schedserver/reports"); if ((namespacePrefix != null) && (namespacePrefix.trim().length() > 0)) { this.writeAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance", "type", namespacePrefix + ":QuerySessionReportType", xmlWriter); } else { this.writeAttribute("xsi", "http://www.w3.org/2001/XMLSchema-instance", "type", "QuerySessionReportType", xmlWriter); } } if (this.requestor == null) { <|fim▁hole|> if (this.querySelect == null) { throw new ADBException("querySelect cannot be null!!"); } this.querySelect.serialize(new QName("", "querySelect"), factory, xmlWriter); if (this.queryConstraintsTracker) { if (this.queryConstraints == null) { throw new ADBException("queryConstraints cannot be null!!"); } this.queryConstraints.serialize(new QName("", "queryConstraints"), factory, xmlWriter); } if (this.startTimeTracker) { namespace = ""; if (!namespace.equals("")) { prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); xmlWriter.writeStartElement(prefix, "startTime", namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } else { xmlWriter.writeStartElement(namespace, "startTime"); } } else { xmlWriter.writeStartElement("startTime"); } if (this.startTime == null) { throw new ADBException("startTime cannot be null!!"); } else { xmlWriter.writeCharacters(ConverterUtil.convertToString(this.startTime)); } xmlWriter.writeEndElement(); } if (this.endTimeTracker) { namespace = ""; if (!namespace.equals("")) { prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); xmlWriter.writeStartElement(prefix, "endTime", namespace); xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } else { xmlWriter.writeStartElement(namespace, "endTime"); } } else { xmlWriter.writeStartElement("endTime"); } if (this.endTime == null) { throw new ADBException("endTime cannot be null!!"); } else { xmlWriter.writeCharacters(ConverterUtil.convertToString(this.endTime)); } xmlWriter.writeEndElement(); } if (this.paginationTracker) { if (this.pagination == null) { throw new ADBException("pagination cannot be null!!"); } this.pagination.serialize(new QName("", "pagination"), factory, xmlWriter); } xmlWriter.writeEndElement(); } private void writeAttribute(final String prefix, final String namespace, final String attName, final String attValue, final XMLStreamWriter xmlWriter) throws XMLStreamException { if (xmlWriter.getPrefix(namespace) == null) { xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } xmlWriter.writeAttribute(namespace, attName, attValue); } private String registerPrefix(final XMLStreamWriter xmlWriter, final String namespace) throws XMLStreamException { String prefix = xmlWriter.getPrefix(namespace); if (prefix == null) { prefix = QuerySessionReportType.generatePrefix(namespace); while (xmlWriter.getNamespaceContext().getNamespaceURI(prefix) != null) { prefix = BeanUtil.getUniquePrefix(); } xmlWriter.writeNamespace(prefix, namespace); xmlWriter.setPrefix(prefix, namespace); } return prefix; } @Override public XMLStreamReader getPullParser(final QName qName) throws ADBException { final ArrayList<Serializable> elementList = new ArrayList<Serializable>(); elementList.add(new QName("", "requestor")); if (this.requestor == null) { throw new ADBException("requestor cannot be null!!"); } elementList.add(this.requestor); elementList.add(new QName("", "querySelect")); if (this.querySelect == null) { throw new ADBException("querySelect cannot be null!!"); } elementList.add(this.querySelect); if (this.queryConstraintsTracker) { elementList.add(new QName("", "queryConstraints")); if (this.queryConstraints == null) { throw new ADBException("queryConstraints cannot be null!!"); } elementList.add(this.queryConstraints); } if (this.startTimeTracker) { elementList.add(new QName("", "startTime")); if (this.startTime != null) { elementList.add(ConverterUtil.convertToString(this.startTime)); } else { throw new ADBException("startTime cannot be null!!"); } } if (this.endTimeTracker) { elementList.add(new QName("", "endTime")); if (this.endTime != null) { elementList.add(ConverterUtil.convertToString(this.endTime)); } else { throw new ADBException("endTime cannot be null!!"); } } if (this.paginationTracker) { elementList.add(new QName("", "pagination")); if (this.pagination == null) { throw new ADBException("pagination cannot be null!!"); } elementList.add(this.pagination); } return new ADBXMLStreamReaderImpl(qName, elementList.toArray(), new Object[0]); } public static class Factory { public static QuerySessionReportType parse(final XMLStreamReader reader) throws Exception { final QuerySessionReportType object = new QuerySessionReportType(); try { while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.getAttributeValue("http://www.w3.org/2001/XMLSchema-instance", "type") != null) { final String fullTypeName = reader.getAttributeValue("http://www.w3.org/2001/XMLSchema-instance", "type"); if (fullTypeName != null) { String nsPrefix = null; if (fullTypeName.indexOf(":") > -1) { nsPrefix = fullTypeName.substring(0, fullTypeName.indexOf(":")); } nsPrefix = nsPrefix == null ? "" : nsPrefix; final String type = fullTypeName.substring(fullTypeName.indexOf(":") + 1); if (!"QuerySessionReportType".equals(type)) { final String nsUri = reader.getNamespaceContext().getNamespaceURI(nsPrefix); return (QuerySessionReportType) ExtensionMapper.getTypeObject(nsUri, type, reader); } } } reader.next(); while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "requestor").equals(reader.getName())) { object.setRequestor(RequestorType.Factory.parse(reader)); reader.next(); } else { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "querySelect").equals(reader.getName())) { object.setQuerySelect(QueryFilterType.Factory.parse(reader)); reader.next(); } else { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "queryConstraints").equals(reader.getName())) { object.setQueryConstraints(QueryFilterType.Factory.parse(reader)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "startTime").equals(reader.getName())) { final String content = reader.getElementText(); object.setStartTime(ConverterUtil.convertToDateTime(content)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "endTime").equals(reader.getName())) { final String content = reader.getElementText(); object.setEndTime(ConverterUtil.convertToDateTime(content)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement() && new QName("", "pagination").equals(reader.getName())) { object.setPagination(PaginationType.Factory.parse(reader)); reader.next(); } while (!reader.isStartElement() && !reader.isEndElement()) { reader.next(); } if (reader.isStartElement()) { throw new ADBException("Unexpected subelement " + reader.getLocalName()); } } catch (final XMLStreamException e) { throw new Exception(e); } return object; } } }<|fim▁end|>
throw new ADBException("requestor cannot be null!!"); } this.requestor.serialize(new QName("", "requestor"), factory, xmlWriter);
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>import chanutils.torrent from chanutils import get_json, movie_title_year from playitem import TorrentPlayItem, PlayItemList _SEARCH_URL = 'https://yts.mx/api/v2/list_movies.json' _FEEDLIST = [ {'title':'Latest', 'url':'https://yts.mx/api/v2/list_movies.json?limit=50'}, {'title':'Highest Rated', 'url':'https://yts.mx/api/v2/list_movies.json?sort_by=rating&limit=50'}, {'title':'Action', 'url':'https://yts.mx/api/v2/list_movies.json?genre=action&sort_by=rating&limit=50'}, {'title':'Adventure', 'url':'https://yts.mx/api/v2/list_movies.json?genre=adventure&sort_by=rating&limit=50'}, {'title':'Animation', 'url':'https://yts.mx/api/v2/list_movies.json?genre=animation&sort_by=rating&limit=50'}, {'title':'Biography', 'url':'https://yts.mx/api/v2/list_movies.json?genre=biography&sort_by=rating&limit=50'}, {'title':'Comedy', 'url':'https://yts.mx/api/v2/list_movies.json?genre=comedy&sort_by=rating&limit=50'}, {'title':'Crime', 'url':'https://yts.mx/api/v2/list_movies.json?genre=crime&sort_by=rating&limit=50'}, {'title':'Documentary', 'url':'https://yts.mx/api/v2/list_movies.json?genre=documentary&sort_by=rating&limit=50'}, {'title':'Drama', 'url':'https://yts.mx/api/v2/list_movies.json?genre=drama&sort_by=rating&limit=50'}, {'title':'Family', 'url':'https://yts.mx/api/v2/list_movies.json?genre=family&sort_by=rating&limit=50'}, {'title':'Fantasy', 'url':'https://yts.mx/api/v2/list_movies.json?genre=fantasy&sort_by=rating&limit=50'}, {'title':'Film-Noir', 'url':'https://yts.mx/api/v2/list_movies.json?genre=film-noir&sort_by=rating&limit=50'}, {'title':'History', 'url':'https://yts.mx/api/v2/list_movies.json?genre=history&sort_by=rating&limit=50'}, {'title':'Horror', 'url':'https://yts.mx/api/v2/list_movies.json?genre=horror&sort_by=rating&limit=50'}, {'title':'Music', 'url':'https://yts.mx/api/v2/list_movies.json?genre=music&sort_by=rating&limit=50'}, {'title':'Musical', 'url':'https://yts.mx/api/v2/list_movies.json?genre=musical&sort_by=rating&limit=50'}, {'title':'Mystery', 'url':'https://yts.mx/api/v2/list_movies.json?genre=mystery&sort_by=rating&limit=50'}, {'title':'Romance', 'url':'https://yts.mx/api/v2/list_movies.json?genre=romance&sort_by=rating&limit=50'}, {'title':'Sci-Fi', 'url':'https://yts.mx/api/v2/list_movies.json?genre=sci-fi&sort_by=rating&limit=50'}, {'title':'Sport', 'url':'https://yts.mx/api/v2/list_movies.json?genre=sport&sort_by=rating&limit=50'}, {'title':'Thriller', 'url':'https://yts.mx/api/v2/list_movies.json?genre=thriller&sort_by=rating&limit=50'}, {'title':'War', 'url':'https://yts.mx/api/v2/list_movies.json?genre=war&sort_by=rating&limit=50'}, {'title':'Western', 'url':'https://yts.mx/api/v2/list_movies.json?genre=western&sort_by=rating&limit=50'}, ] def name(): return 'YTS Torrents' def image(): return 'icon.png' def description(): return "YTS Torrents Channel (<a target='_blank' href='https://yts.mx'>https://yts.mx</a>)." def feedlist(): return _FEEDLIST def feed(idx): data = get_json(_FEEDLIST[idx]['url'], proxy=True) return _extract(data) def search(q): params = {'query_term':q, 'limit':50}<|fim▁hole|> results = PlayItemList() if 'data' not in data: return results if 'movies' not in data['data']: return results rtree = data['data']['movies'] for r in rtree: if 'torrents' not in r: continue torrent = _smallest_size(r['torrents']) title = r['title_long'] img = r['medium_cover_image'] # Proxy img = "https://img.yts.mx" + img[14:] url = torrent['url'] size = torrent['size'] seeds = torrent['seeds'] peers = torrent['peers'] subtitle = chanutils.torrent.subtitle(size, seeds, peers) rating = str(r['rating']) if rating[-1] == '0': rating = rating[:-1] imdb = "<a target='_blank' href='http://www.imdb.com/title/" + r['imdb_code'] + "/'>IMDB Rating: " + rating + "</a>" synopsis = imdb subs = movie_title_year(title) subs['imdb'] = r['imdb_code'] results.add(TorrentPlayItem(title, img, url, subtitle, synopsis, subs)) return results def _smallest_size(torrlist): size = torrlist[0]['size_bytes'] torrent = torrlist[0] for t in torrlist: if t['size_bytes'] < size: size = t['size_bytes'] torrent = t return torrent<|fim▁end|>
data = get_json(_SEARCH_URL, params=params, proxy=True) return _extract(data) def _extract(data):
<|file_name|>set_language.ts<|end_file_name|><|fim▁begin|>import {statementType} from "../_utils"; import * as Statements from "../../../src/abap/2_statements/statements"; const tests = [ "SET LANGUAGE SY-LANGU.",<|fim▁hole|>statementType(tests, "SET LANGUAGE", Statements.SetLanguage);<|fim▁end|>
];
<|file_name|>subclassing.py<|end_file_name|><|fim▁begin|>"""============================= Subclassing ndarray in python ============================= Introduction ------------ Subclassing ndarray is relatively simple, but it has some complications compared to other Python objects. On this page we explain the machinery that allows you to subclass ndarray, and the implications for implementing a subclass. ndarrays and object creation ============================ Subclassing ndarray is complicated by the fact that new instances of ndarray classes can come about in three different ways. These are: #. Explicit constructor call - as in ``MySubClass(params)``. This is the usual route to Python instance creation. #. View casting - casting an existing ndarray as a given subclass #. New from template - creating a new instance from a template instance. Examples include returning slices from a subclassed array, creating return types from ufuncs, and copying arrays. See :ref:`new-from-template` for more details The last two are characteristics of ndarrays - in order to support things like array slicing. The complications of subclassing ndarray are due to the mechanisms numpy has to support these latter two routes of instance creation. .. _view-casting: View casting ------------ *View casting* is the standard ndarray mechanism by which you take an ndarray of any subclass, and return a view of the array as another (specified) subclass: >>> import numpy as np >>> # create a completely useless ndarray subclass >>> class C(np.ndarray): pass >>> # create a standard ndarray >>> arr = np.zeros((3,)) >>> # take a view of it, as our useless subclass >>> c_arr = arr.view(C) >>> type(c_arr) <class 'C'> .. _new-from-template: Creating new from template -------------------------- New instances of an ndarray subclass can also come about by a very similar mechanism to :ref:`view-casting`, when numpy finds it needs to create a new instance from a template instance. The most obvious place this has to happen is when you are taking slices of subclassed arrays. For example: >>> v = c_arr[1:] >>> type(v) # the view is of type 'C' <class 'C'> >>> v is c_arr # but it's a new instance False The slice is a *view* onto the original ``c_arr`` data. So, when we take a view from the ndarray, we return a new ndarray, of the same class, that points to the data in the original. There are other points in the use of ndarrays where we need such views, such as copying arrays (``c_arr.copy()``), creating ufunc output arrays (see also :ref:`array-wrap`), and reducing methods (like ``c_arr.mean()``. Relationship of view casting and new-from-template -------------------------------------------------- These paths both use the same machinery. We make the distinction here, because they result in different input to your methods. Specifically, :ref:`view-casting` means you have created a new instance of your array type from any potential subclass of ndarray. :ref:`new-from-template` means you have created a new instance of your class from a pre-existing instance, allowing you - for example - to copy across attributes that are particular to your subclass. Implications for subclassing ---------------------------- If we subclass ndarray, we need to deal not only with explicit construction of our array type, but also :ref:`view-casting` or :ref:`new-from-template`. NumPy has the machinery to do this, and this machinery that makes subclassing slightly non-standard. There are two aspects to the machinery that ndarray uses to support views and new-from-template in subclasses. The first is the use of the ``ndarray.__new__`` method for the main work of object initialization, rather then the more usual ``__init__`` method. The second is the use of the ``__array_finalize__`` method to allow subclasses to clean up after the creation of views and new instances from templates. A brief Python primer on ``__new__`` and ``__init__`` ===================================================== ``__new__`` is a standard Python method, and, if present, is called before ``__init__`` when we create a class instance. See the `python __new__ documentation <https://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail. For example, consider the following Python code: .. testcode:: class C(object): def __new__(cls, *args): print('Cls in __new__:', cls) print('Args in __new__:', args) # The `object` type __new__ method takes a single argument. return object.__new__(cls) def __init__(self, *args): print('type(self) in __init__:', type(self)) print('Args in __init__:', args) meaning that we get: >>> c = C('hello') Cls in __new__: <class 'C'> Args in __new__: ('hello',) type(self) in __init__: <class 'C'> Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class as first argument, and the passed argument, which is the string ``'hello'``. After python calls ``__new__``, it usually (see below) calls our ``__init__`` method, with the output of ``__new__`` as the first argument (now a class instance), and the passed arguments following. As you can see, the object can be initialized in the ``__new__`` method or the ``__init__`` method, or both, and in fact ndarray does not have an ``__init__`` method, because all the initialization is done in the ``__new__`` method. Why use ``__new__`` rather than just the usual ``__init__``? Because in some cases, as for ndarray, we want to be able to return an object of some other class. Consider the following: .. testcode:: class D(C): def __new__(cls, *args): print('D cls is:', cls) print('D args in __new__:', args) return C.__new__(C, *args) def __init__(self, *args): # we never get here print('In D __init__') meaning that: >>> obj = D('hello') D cls is: <class 'D'> D args in __new__: ('hello',) Cls in __new__: <class 'C'> Args in __new__: ('hello',) >>> type(obj) <class 'C'> The definition of ``C`` is the same as before, but for ``D``, the ``__new__`` method returns an instance of class ``C`` rather than ``D``. Note that the ``__init__`` method of ``D`` does not get called. In general, when the ``__new__`` method returns an object of class other than the class in which it is defined, the ``__init__`` method of that class is not called. This is how subclasses of the ndarray class are able to return views that preserve the class type. When taking a view, the standard ndarray machinery creates the new ndarray object with something like:: obj = ndarray.__new__(subtype, shape, ... where ``subdtype`` is the subclass. Thus the returned view is of the same class as the subclass, rather than being of class ``ndarray``. That solves the problem of returning views of the same type, but now we have a new problem. The machinery of ndarray can set the class this way, in its standard methods for taking views, but the ndarray ``__new__`` method knows nothing of what we have done in our own ``__new__`` method in order to set attributes, and so on. (Aside - why not call ``obj = subdtype.__new__(...`` then? Because we may not have a ``__new__`` method with the same call signature). The role of ``__array_finalize__`` ================================== ``__array_finalize__`` is the mechanism that numpy provides to allow subclasses to handle the various ways that new instances get created. Remember that subclass instances can come about in these three ways: #. explicit constructor call (``obj = MySubClass(params)``). This will call the usual sequence of ``MySubClass.__new__`` then (if it exists) ``MySubClass.__init__``. #. :ref:`view-casting` #. :ref:`new-from-template` Our ``MySubClass.__new__`` method only gets called in the case of the explicit constructor call, so we can't rely on ``MySubClass.__new__`` or ``MySubClass.__init__`` to deal with the view casting and new-from-template. It turns out that ``MySubClass.__array_finalize__`` *does* get called for all three methods of object creation, so this is where our object creation housekeeping usually goes. * For the explicit constructor call, our subclass will need to create a new ndarray instance of its own class. In practice this means that we, the authors of the code, will need to make a call to ``ndarray.__new__(MySubClass,...)``, a class-hierarchy prepared call to ``super(MySubClass, cls).__new__(cls, ...)``, or do view casting of an existing array (see below) * For view casting and new-from-template, the equivalent of ``ndarray.__new__(MySubClass,...`` is called, at the C level. The arguments that ``__array_finalize__`` receives differ for the three methods of instance creation above. The following code allows us to look at the call sequences and arguments: .. testcode:: import numpy as np class C(np.ndarray): def __new__(cls, *args, **kwargs): print('In __new__ with class %s' % cls) return super(C, cls).__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): # in practice you probably will not need or want an __init__ # method for your subclass print('In __init__ with class %s' % self.__class__) def __array_finalize__(self, obj): print('In array_finalize:') print(' self type is %s' % type(self)) print(' obj type is %s' % type(obj)) Now: >>> # Explicit constructor >>> c = C((10,))<|fim▁hole|>In __new__ with class <class 'C'> In array_finalize: self type is <class 'C'> obj type is <type 'NoneType'> In __init__ with class <class 'C'> >>> # View casting >>> a = np.arange(10) >>> cast_a = a.view(C) In array_finalize: self type is <class 'C'> obj type is <type 'numpy.ndarray'> >>> # Slicing (example of new-from-template) >>> cv = c[:1] In array_finalize: self type is <class 'C'> obj type is <class 'C'> The signature of ``__array_finalize__`` is:: def __array_finalize__(self, obj): One sees that the ``super`` call, which goes to ``ndarray.__new__``, passes ``__array_finalize__`` the new object, of our own class (``self``) as well as the object from which the view has been taken (``obj``). As you can see from the output above, the ``self`` is always a newly created instance of our subclass, and the type of ``obj`` differs for the three instance creation methods: * When called from the explicit constructor, ``obj`` is ``None`` * When called from view casting, ``obj`` can be an instance of any subclass of ndarray, including our own. * When called in new-from-template, ``obj`` is another instance of our own subclass, that we might use to update the new ``self`` instance. Because ``__array_finalize__`` is the only method that always sees new instances being created, it is the sensible place to fill in instance defaults for new object attributes, among other tasks. This may be clearer with an example. Simple example - adding an extra attribute to ndarray ----------------------------------------------------- .. testcode:: import numpy as np class InfoArray(np.ndarray): def __new__(subtype, shape, dtype=float, buffer=None, offset=0, strides=None, order=None, info=None): # Create the ndarray instance of our type, given the usual # ndarray input arguments. This will call the standard # ndarray constructor, but return an object of our type. # It also triggers a call to InfoArray.__array_finalize__ obj = super(InfoArray, subtype).__new__(subtype, shape, dtype, buffer, offset, strides, order) # set the new 'info' attribute to the value passed obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # ``self`` is a new object resulting from # ndarray.__new__(InfoArray, ...), therefore it only has # attributes that the ndarray.__new__ constructor gave it - # i.e. those of a standard ndarray. # # We could have got to the ndarray.__new__ call in 3 ways: # From an explicit constructor - e.g. InfoArray(): # obj is None # (we're in the middle of the InfoArray.__new__ # constructor, and self.info will be set when we return to # InfoArray.__new__) if obj is None: return # From view casting - e.g arr.view(InfoArray): # obj is arr # (type(obj) can be InfoArray) # From new-from-template - e.g infoarr[:3] # type(obj) is InfoArray # # Note that it is here, rather than in the __new__ method, # that we set the default value for 'info', because this # method sees all creation of default objects - with the # InfoArray.__new__ constructor, but also with # arr.view(InfoArray). self.info = getattr(obj, 'info', None) # We do not need to return anything Using the object looks like this: >>> obj = InfoArray(shape=(3,)) # explicit constructor >>> type(obj) <class 'InfoArray'> >>> obj.info is None True >>> obj = InfoArray(shape=(3,), info='information') >>> obj.info 'information' >>> v = obj[1:] # new-from-template - here - slicing >>> type(v) <class 'InfoArray'> >>> v.info 'information' >>> arr = np.arange(10) >>> cast_arr = arr.view(InfoArray) # view casting >>> type(cast_arr) <class 'InfoArray'> >>> cast_arr.info is None True This class isn't very useful, because it has the same constructor as the bare ndarray object, including passing in buffers and shapes and so on. We would probably prefer the constructor to be able to take an already formed ndarray from the usual numpy calls to ``np.array`` and return an object. Slightly more realistic example - attribute added to existing array ------------------------------------------------------------------- Here is a class that takes a standard ndarray that already exists, casts as our type, and adds an extra attribute. .. testcode:: import numpy as np class RealisticInfoArray(np.ndarray): def __new__(cls, input_array, info=None): # Input array is an already formed ndarray instance # We first cast to be our class type obj = np.asarray(input_array).view(cls) # add the new attribute to the created instance obj.info = info # Finally, we must return the newly created object: return obj def __array_finalize__(self, obj): # see InfoArray.__array_finalize__ for comments if obj is None: return self.info = getattr(obj, 'info', None) So: >>> arr = np.arange(5) >>> obj = RealisticInfoArray(arr, info='information') >>> type(obj) <class 'RealisticInfoArray'> >>> obj.info 'information' >>> v = obj[1:] >>> type(v) <class 'RealisticInfoArray'> >>> v.info 'information' .. _array-ufunc: ``__array_ufunc__`` for ufuncs ------------------------------ .. versionadded:: 1.13 A subclass can override what happens when executing numpy ufuncs on it by overriding the default ``ndarray.__array_ufunc__`` method. This method is executed *instead* of the ufunc and should return either the result of the operation, or :obj:`NotImplemented` if the operation requested is not implemented. The signature of ``__array_ufunc__`` is:: def __array_ufunc__(ufunc, method, *inputs, **kwargs): - *ufunc* is the ufunc object that was called. - *method* is a string indicating how the Ufunc was called, either ``"__call__"`` to indicate it was called directly, or one of its :ref:`methods<ufuncs.methods>`: ``"reduce"``, ``"accumulate"``, ``"reduceat"``, ``"outer"``, or ``"at"``. - *inputs* is a tuple of the input arguments to the ``ufunc`` - *kwargs* contains any optional or keyword arguments passed to the function. This includes any ``out`` arguments, which are always contained in a tuple. A typical implementation would convert any inputs or outputs that are instances of one's own class, pass everything on to a superclass using ``super()``, and finally return the results after possible back-conversion. An example, taken from the test case ``test_ufunc_override_with_super`` in ``core/tests/test_umath.py``, is the following. .. testcode:: input numpy as np class A(np.ndarray): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): args = [] in_no = [] for i, input_ in enumerate(inputs): if isinstance(input_, A): in_no.append(i) args.append(input_.view(np.ndarray)) else: args.append(input_) outputs = kwargs.pop('out', None) out_no = [] if outputs: out_args = [] for j, output in enumerate(outputs): if isinstance(output, A): out_no.append(j) out_args.append(output.view(np.ndarray)) else: out_args.append(output) kwargs['out'] = tuple(out_args) else: outputs = (None,) * ufunc.nout info = {} if in_no: info['inputs'] = in_no if out_no: info['outputs'] = out_no results = super(A, self).__array_ufunc__(ufunc, method, *args, **kwargs) if results is NotImplemented: return NotImplemented if method == 'at': if isinstance(inputs[0], A): inputs[0].info = info return if ufunc.nout == 1: results = (results,) results = tuple((np.asarray(result).view(A) if output is None else output) for result, output in zip(results, outputs)) if results and isinstance(results[0], A): results[0].info = info return results[0] if len(results) == 1 else results So, this class does not actually do anything interesting: it just converts any instances of its own to regular ndarray (otherwise, we'd get infinite recursion!), and adds an ``info`` dictionary that tells which inputs and outputs it converted. Hence, e.g., >>> a = np.arange(5.).view(A) >>> b = np.sin(a) >>> b.info {'inputs': [0]} >>> b = np.sin(np.arange(5.), out=(a,)) >>> b.info {'outputs': [0]} >>> a = np.arange(5.).view(A) >>> b = np.ones(1).view(A) >>> c = a + b >>> c.info {'inputs': [0, 1]} >>> a += b >>> a.info {'inputs': [0, 1], 'outputs': [0]} Note that another approach would be to to use ``getattr(ufunc, methods)(*inputs, **kwargs)`` instead of the ``super`` call. For this example, the result would be identical, but there is a difference if another operand also defines ``__array_ufunc__``. E.g., lets assume that we evalulate ``np.add(a, b)``, where ``b`` is an instance of another class ``B`` that has an override. If you use ``super`` as in the example, ``ndarray.__array_ufunc__`` will notice that ``b`` has an override, which means it cannot evaluate the result itself. Thus, it will return `NotImplemented` and so will our class ``A``. Then, control will be passed over to ``b``, which either knows how to deal with us and produces a result, or does not and returns `NotImplemented`, raising a ``TypeError``. If instead, we replace our ``super`` call with ``getattr(ufunc, method)``, we effectively do ``np.add(a.view(np.ndarray), b)``. Again, ``B.__array_ufunc__`` will be called, but now it sees an ``ndarray`` as the other argument. Likely, it will know how to handle this, and return a new instance of the ``B`` class to us. Our example class is not set up to handle this, but it might well be the best approach if, e.g., one were to re-implement ``MaskedArray`` using ``__array_ufunc__``. As a final note: if the ``super`` route is suited to a given class, an advantage of using it is that it helps in constructing class hierarchies. E.g., suppose that our other class ``B`` also used the ``super`` in its ``__array_ufunc__`` implementation, and we created a class ``C`` that depended on both, i.e., ``class C(A, B)`` (with, for simplicity, not another ``__array_ufunc__`` override). Then any ufunc on an instance of ``C`` would pass on to ``A.__array_ufunc__``, the ``super`` call in ``A`` would go to ``B.__array_ufunc__``, and the ``super`` call in ``B`` would go to ``ndarray.__array_ufunc__``, thus allowing ``A`` and ``B`` to collaborate. .. _array-wrap: ``__array_wrap__`` for ufuncs and other functions ------------------------------------------------- Prior to numpy 1.13, the behaviour of ufuncs could only be tuned using ``__array_wrap__`` and ``__array_prepare__``. These two allowed one to change the output type of a ufunc, but, in contrast to ``__array_ufunc__``, did not allow one to make any changes to the inputs. It is hoped to eventually deprecate these, but ``__array_wrap__`` is also used by other numpy functions and methods, such as ``squeeze``, so at the present time is still needed for full functionality. Conceptually, ``__array_wrap__`` "wraps up the action" in the sense of allowing a subclass to set the type of the return value and update attributes and metadata. Let's show how this works with an example. First we return to the simpler example subclass, but with a different name and some print statements: .. testcode:: import numpy as np class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def __array_finalize__(self, obj): print('In __array_finalize__:') print(' self is %s' % repr(self)) print(' obj is %s' % repr(obj)) if obj is None: return self.info = getattr(obj, 'info', None) def __array_wrap__(self, out_arr, context=None): print('In __array_wrap__:') print(' self is %s' % repr(self)) print(' arr is %s' % repr(out_arr)) # then just call the parent return super(MySubClass, self).__array_wrap__(self, out_arr, context) We run a ufunc on an instance of our new array: >>> obj = MySubClass(np.arange(5), info='spam') In __array_finalize__: self is MySubClass([0, 1, 2, 3, 4]) obj is array([0, 1, 2, 3, 4]) >>> arr2 = np.arange(5)+1 >>> ret = np.add(arr2, obj) In __array_wrap__: self is MySubClass([0, 1, 2, 3, 4]) arr is array([1, 3, 5, 7, 9]) In __array_finalize__: self is MySubClass([1, 3, 5, 7, 9]) obj is MySubClass([0, 1, 2, 3, 4]) >>> ret MySubClass([1, 3, 5, 7, 9]) >>> ret.info 'spam' Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method with arguments ``self`` as ``obj``, and ``out_arr`` as the (ndarray) result of the addition. In turn, the default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the result to class ``MySubClass``, and called ``__array_finalize__`` - hence the copying of the ``info`` attribute. This has all happened at the C level. But, we could do anything we wanted: .. testcode:: class SillySubClass(np.ndarray): def __array_wrap__(self, arr, context=None): return 'I lost your data' >>> arr1 = np.arange(5) >>> obj = arr1.view(SillySubClass) >>> arr2 = np.arange(5) >>> ret = np.multiply(obj, arr2) >>> ret 'I lost your data' So, by defining a specific ``__array_wrap__`` method for our subclass, we can tweak the output from ufuncs. The ``__array_wrap__`` method requires ``self``, then an argument - which is the result of the ufunc - and an optional parameter *context*. This parameter is returned by ufuncs as a 3-element tuple: (name of the ufunc, arguments of the ufunc, domain of the ufunc), but is not set by other numpy functions. Though, as seen above, it is possible to do otherwise, ``__array_wrap__`` should return an instance of its containing class. See the masked array subclass for an implementation. In addition to ``__array_wrap__``, which is called on the way out of the ufunc, there is also an ``__array_prepare__`` method which is called on the way into the ufunc, after the output arrays are created but before any computation has been performed. The default implementation does nothing but pass through the array. ``__array_prepare__`` should not attempt to access the array data or resize the array, it is intended for setting the output array type, updating attributes and metadata, and performing any checks based on the input that may be desired before computation begins. Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or subclass thereof or raise an error. Extra gotchas - custom ``__del__`` methods and ndarray.base ----------------------------------------------------------- One of the problems that ndarray solves is keeping track of memory ownership of ndarrays and their views. Consider the case where we have created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``. The two objects are looking at the same memory. NumPy keeps track of where the data came from for a particular array or view, with the ``base`` attribute: >>> # A normal ndarray, that owns its own data >>> arr = np.zeros((4,)) >>> # In this case, base is None >>> arr.base is None True >>> # We take a view >>> v1 = arr[1:] >>> # base now points to the array that it derived from >>> v1.base is arr True >>> # Take a view of a view >>> v2 = v1[1:] >>> # base points to the view it derived from >>> v2.base is v1 True In general, if the array owns its own memory, as for ``arr`` in this case, then ``arr.base`` will be None - there are some exceptions to this - see the numpy book for more details. The ``base`` attribute is useful in being able to tell whether we have a view or the original array. This in turn can be useful if we need to know whether or not to do some specific cleanup when the subclassed array is deleted. For example, we may only want to do the cleanup if the original array is deleted, but not the views. For an example of how this can work, have a look at the ``memmap`` class in ``numpy.core``. Subclassing and Downstream Compatibility ---------------------------------------- When sub-classing ``ndarray`` or creating duck-types that mimic the ``ndarray`` interface, it is your responsibility to decide how aligned your APIs will be with those of numpy. For convenience, many numpy functions that have a corresponding ``ndarray`` method (e.g., ``sum``, ``mean``, ``take``, ``reshape``) work by checking if the first argument to a function has a method of the same name. If it exists, the method is called instead of coercing the arguments to a numpy array. For example, if you want your sub-class or duck-type to be compatible with numpy's ``sum`` function, the method signature for this object's ``sum`` method should be the following: .. testcode:: def sum(self, axis=None, dtype=None, out=None, keepdims=False): ... This is the exact same method signature for ``np.sum``, so now if a user calls ``np.sum`` on this object, numpy will call the object's own ``sum`` method and pass in these arguments enumerated above in the signature, and no errors will be raised because the signatures are completely compatible with each other. If, however, you decide to deviate from this signature and do something like this: .. testcode:: def sum(self, axis=None, dtype=None): ... This object is no longer compatible with ``np.sum`` because if you call ``np.sum``, it will pass in unexpected arguments ``out`` and ``keepdims``, causing a TypeError to be raised. If you wish to maintain compatibility with numpy and its subsequent versions (which might add new keyword arguments) but do not want to surface all of numpy's arguments, your function's signature should accept ``**kwargs``. For example: .. testcode:: def sum(self, axis=None, dtype=None, **unused_kwargs): ... This object is now compatible with ``np.sum`` again because any extraneous arguments (i.e. keywords that are not ``axis`` or ``dtype``) will be hidden away in the ``**unused_kwargs`` parameter. """ from __future__ import division, absolute_import, print_function<|fim▁end|>
<|file_name|>fix_omp.cpp<|end_file_name|><|fim▁begin|>// clang-format off /* ---------------------------------------------------------------------- LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator https://www.lammps.org/, Sandia National Laboratories Steve Plimpton, [email protected] Copyright (2003) Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. This software is distributed under the GNU General Public License. See the README file in the top-level LAMMPS directory. ------------------------------------------------------------------------- */<|fim▁hole|> /* ---------------------------------------------------------------------- Contributing author: Axel Kohlmeyer (Temple U) OpenMP based threading support for LAMMPS ------------------------------------------------------------------------- */ #include "fix_omp.h" #include "thr_data.h" #include "atom.h" #include "comm.h" #include "error.h" #include "force.h" #include "neighbor.h" #include "neigh_request.h" #include "universe.h" #include "update.h" #include "pair_hybrid.h" #include "bond_hybrid.h" #include "angle_hybrid.h" #include "dihedral_hybrid.h" #include "improper_hybrid.h" #include "kspace.h" #include <cstring> #include "omp_compat.h" #if defined(_OPENMP) #include <omp.h> #endif #include "suffix.h" using namespace LAMMPS_NS; using namespace FixConst; static int get_tid() { int tid = 0; #if defined(_OPENMP) tid = omp_get_thread_num(); #endif return tid; } /* ---------------------------------------------------------------------- */ FixOMP::FixOMP(LAMMPS *lmp, int narg, char **arg) : Fix(lmp, narg, arg), thr(nullptr), last_omp_style(nullptr), last_pair_hybrid(nullptr), _nthr(-1), _neighbor(true), _mixed(false), _reduced(true), _pair_compute_flag(false), _kspace_compute_flag(false) { if (narg < 4) error->all(FLERR,"Illegal package omp command"); int nthreads = 1; if (narg > 3) { #if defined(_OPENMP) if (strcmp(arg[3],"0") == 0) #pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(nthreads) nthreads = omp_get_num_threads(); else nthreads = utils::inumeric(FLERR,arg[3],false,lmp); #endif } #if defined(_OPENMP) if (nthreads < 1) error->all(FLERR,"Illegal number of OpenMP threads requested"); int reset_thr = 0; #endif if (nthreads != comm->nthreads) { #if defined(_OPENMP) reset_thr = 1; omp_set_num_threads(nthreads); #endif comm->nthreads = nthreads; } // optional keywords int iarg = 4; while (iarg < narg) { if (strcmp(arg[iarg],"neigh") == 0) { if (iarg+2 > narg) error->all(FLERR,"Illegal package omp command"); _neighbor = utils::logical(FLERR,arg[iarg+1],false,lmp) != 0; iarg += 2; } else error->all(FLERR,"Illegal package omp command"); } // print summary of settings if (comm->me == 0) { #if defined(_OPENMP) const char * const nmode = _neighbor ? "multi-threaded" : "serial"; if (reset_thr) utils::logmesg(lmp, "set {} OpenMP thread(s) per MPI task\n", nthreads); utils::logmesg(lmp, "using {} neighbor list subroutines\n", nmode); #else error->warning(FLERR,"OpenMP support not enabled during compilation; " "using 1 thread only."); #endif } // allocate list for per thread accumulator manager class instances // and then have each thread create an instance of this class to // encourage the OS to use storage that is "close" to each thread's CPU. thr = new ThrData *[nthreads]; _nthr = nthreads; #if defined(_OPENMP) #pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(lmp) #endif { const int tid = get_tid(); Timer *t = new Timer(lmp); thr[tid] = new ThrData(tid,t); } } /* ---------------------------------------------------------------------- */ FixOMP::~FixOMP() { for (int i=0; i < _nthr; ++i) delete thr[i]; delete[] thr; } /* ---------------------------------------------------------------------- */ int FixOMP::setmask() { int mask = 0; mask |= PRE_FORCE; mask |= PRE_FORCE_RESPA; mask |= MIN_PRE_FORCE; return mask; } /* ---------------------------------------------------------------------- */ void FixOMP::init() { // OPENMP package cannot be used with atom_style template if (atom->molecular == Atom::TEMPLATE) error->all(FLERR,"OPENMP package does not (yet) work with " "atom_style template"); // adjust number of data objects when the number of OpenMP // threads has been changed somehow const int nthreads = comm->nthreads; if (_nthr != nthreads) { if (comm->me == 0) utils::logmesg(lmp,"Re-init OPENMP for {} OpenMP thread(s)\n", nthreads); for (int i=0; i < _nthr; ++i) delete thr[i]; thr = new ThrData *[nthreads]; _nthr = nthreads; #if defined(_OPENMP) #pragma omp parallel LMP_DEFAULT_NONE #endif { const int tid = get_tid(); Timer *t = new Timer(lmp); thr[tid] = new ThrData(tid,t); } } // reset per thread timer for (int i=0; i < nthreads; ++i) { thr[i]->_timer_active=1; thr[i]->timer(Timer::RESET); thr[i]->_timer_active=-1; } if (utils::strmatch(update->integrate_style,"^respa") && !utils::strmatch(update->integrate_style,"^respa/omp")) error->all(FLERR,"Must use respa/omp for r-RESPA with /omp styles"); if (force->pair && force->pair->compute_flag) _pair_compute_flag = true; else _pair_compute_flag = false; if (force->kspace && force->kspace->compute_flag) _kspace_compute_flag = true; else _kspace_compute_flag = false; int check_hybrid, kspace_split; last_pair_hybrid = nullptr; last_omp_style = nullptr; const char *last_omp_name = nullptr; const char *last_hybrid_name = nullptr; const char *last_force_name = nullptr; // support for verlet/split operation. // kspace_split == 0 : regular processing // kspace_split < 0 : master partition, does not do kspace // kspace_split > 0 : slave partition, only does kspace if (strstr(update->integrate_style,"verlet/split") != nullptr) { if (universe->iworld == 0) kspace_split = -1; else kspace_split = 1; } else { kspace_split = 0; } // determine which is the last force style with OpenMP // support as this is the one that has to reduce the forces #define CheckStyleForOMP(name) \ check_hybrid = 0; \ if (force->name) { \ if ( (strcmp(force->name ## _style,"hybrid") == 0) || \ (strcmp(force->name ## _style,"hybrid/overlay") == 0) ) \ check_hybrid=1; \ if (force->name->suffix_flag & Suffix::OMP) { \ last_force_name = (const char *) #name; \ last_omp_name = force->name ## _style; \ last_omp_style = (void *) force->name; \ } \ } #define CheckHybridForOMP(name,Class) \ if (check_hybrid) { \ Class ## Hybrid *style = (Class ## Hybrid *) force->name; \ for (int i=0; i < style->nstyles; i++) { \ if (style->styles[i]->suffix_flag & Suffix::OMP) { \ last_force_name = (const char *) #name; \ last_omp_name = style->keywords[i]; \ last_omp_style = style->styles[i]; \ } \ } \ } if (_pair_compute_flag && (kspace_split <= 0)) { CheckStyleForOMP(pair); CheckHybridForOMP(pair,Pair); if (check_hybrid) { last_pair_hybrid = last_omp_style; last_hybrid_name = last_omp_name; } CheckStyleForOMP(bond); CheckHybridForOMP(bond,Bond); CheckStyleForOMP(angle); CheckHybridForOMP(angle,Angle); CheckStyleForOMP(dihedral); CheckHybridForOMP(dihedral,Dihedral); CheckStyleForOMP(improper); CheckHybridForOMP(improper,Improper); } if (_kspace_compute_flag && (kspace_split >= 0)) { CheckStyleForOMP(kspace); } #undef CheckStyleForOMP #undef CheckHybridForOMP set_neighbor_omp(); // diagnostic output if (comm->me == 0) { if (last_omp_style) { if (last_pair_hybrid) utils::logmesg(lmp,"Hybrid pair style last /omp style {}\n",last_hybrid_name); utils::logmesg(lmp,"Last active /omp style is {}_style {}\n",last_force_name,last_omp_name); } else { utils::logmesg(lmp,"No /omp style for force computation currently active\n"); } } } /* ---------------------------------------------------------------------- */ void FixOMP::set_neighbor_omp() { // select or deselect multi-threaded neighbor // list build depending on setting in package omp. // NOTE: since we are at the top of the list of // fixes, we cannot adjust neighbor lists from // other fixes. those have to be re-implemented // as /omp fix styles. :-( const int neigh_omp = _neighbor ? 1 : 0; const int nrequest = neighbor->nrequest; // flag *all* neighbor list requests as OPENMP threaded, // but skip lists already flagged as INTEL threaded for (int i = 0; i < nrequest; ++i) if (! neighbor->requests[i]->intel) neighbor->requests[i]->omp = neigh_omp; } /* ---------------------------------------------------------------------- */ void FixOMP::setup(int) { // we are post the force compute in setup. turn on timers for (int i=0; i < _nthr; ++i) thr[i]->_timer_active=0; } /* ---------------------------------------------------------------------- */ // adjust size and clear out per thread accumulator arrays void FixOMP::pre_force(int) { const int nall = atom->nlocal + atom->nghost; double **f = atom->f; double **torque = atom->torque; double *erforce = atom->erforce; double *desph = atom->desph; double *drho = atom->drho; #if defined(_OPENMP) #pragma omp parallel LMP_DEFAULT_NONE LMP_SHARED(f,torque,erforce,desph,drho) #endif { const int tid = get_tid(); thr[tid]->check_tid(tid); thr[tid]->init_force(nall,f,torque,erforce,desph,drho); } // end of omp parallel region _reduced = false; } /* ---------------------------------------------------------------------- */ double FixOMP::memory_usage() { double bytes = (double)_nthr * (sizeof(ThrData *) + sizeof(ThrData)); bytes += (double)_nthr * thr[0]->memory_usage(); return bytes; }<|fim▁end|>
<|file_name|>expr.rs<|end_file_name|><|fim▁begin|>use super::pat::{RecoverColon, RecoverComma, PARAM_EXPECTED}; use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{ AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions, TokenType, }; use super::{SemiColonMode, SeqSep, TokenExpectType, TrailingToken}; use crate::maybe_recover_from_interpolated_ty_qpath; use ast::token::DelimToken; use rustc_ast::ptr::P; use rustc_ast::token::{self, Token, TokenKind}; use rustc_ast::tokenstream::Spacing; use rustc_ast::util::classify; use rustc_ast::util::literal::LitError; use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity}; use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, ExprField, Lit, UnOp, DUMMY_NODE_ID}; use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty, TyKind}; use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits}; use rustc_ast_pretty::pprust; use rustc_errors::{Applicability, DiagnosticBuilder, PResult}; use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP; use rustc_session::lint::BuiltinLintDiagnostics; use rustc_span::edition::LATEST_STABLE_EDITION; use rustc_span::source_map::{self, Span, Spanned}; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{BytePos, Pos}; use std::mem; /// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression /// dropped into the token stream, which happens while parsing the result of /// macro expansion). Placement of these is not as complex as I feared it would /// be. The important thing is to make sure that lookahead doesn't balk at /// `token::Interpolated` tokens. macro_rules! maybe_whole_expr { ($p:expr) => { if let token::Interpolated(nt) = &$p.token.kind { match &**nt { token::NtExpr(e) | token::NtLiteral(e) => { let e = e.clone(); $p.bump(); return Ok(e); } token::NtPath(path) => { let path = path.clone(); $p.bump(); return Ok($p.mk_expr( $p.prev_token.span, ExprKind::Path(None, path), AttrVec::new(), )); } token::NtBlock(block) => { let block = block.clone(); $p.bump(); return Ok($p.mk_expr( $p.prev_token.span, ExprKind::Block(block, None), AttrVec::new(), )); } _ => {} }; } }; } #[derive(Debug)] pub(super) enum LhsExpr { NotYetParsed, AttributesParsed(AttrWrapper), AlreadyParsed(P<Expr>), } impl From<Option<AttrWrapper>> for LhsExpr { /// Converts `Some(attrs)` into `LhsExpr::AttributesParsed(attrs)` /// and `None` into `LhsExpr::NotYetParsed`. /// /// This conversion does not allocate. fn from(o: Option<AttrWrapper>) -> Self { if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed } } } impl From<P<Expr>> for LhsExpr { /// Converts the `expr: P<Expr>` into `LhsExpr::AlreadyParsed(expr)`. /// /// This conversion does not allocate. fn from(expr: P<Expr>) -> Self { LhsExpr::AlreadyParsed(expr) } } impl<'a> Parser<'a> { /// Parses an expression. #[inline] pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> { self.current_closure.take(); self.parse_expr_res(Restrictions::empty(), None) } /// Parses an expression, forcing tokens to be collected pub fn parse_expr_force_collect(&mut self) -> PResult<'a, P<Expr>> { self.collect_tokens_no_attrs(|this| this.parse_expr()) } pub fn parse_anon_const_expr(&mut self) -> PResult<'a, AnonConst> { self.parse_expr().map(|value| AnonConst { id: DUMMY_NODE_ID, value }) } fn parse_expr_catch_underscore(&mut self) -> PResult<'a, P<Expr>> { match self.parse_expr() { Ok(expr) => Ok(expr), Err(mut err) => match self.token.ident() { Some((Ident { name: kw::Underscore, .. }, false)) if self.look_ahead(1, |t| t == &token::Comma) => { // Special-case handling of `foo(_, _, _)` err.emit(); self.bump(); Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new())) } _ => Err(err), }, } } /// Parses a sequence of expressions delimited by parentheses. fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> { self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore()).map(|(r, _)| r) } /// Parses an expression, subject to the given restrictions. #[inline] pub(super) fn parse_expr_res( &mut self, r: Restrictions, already_parsed_attrs: Option<AttrWrapper>, ) -> PResult<'a, P<Expr>> { self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs)) } /// Parses an associative expression. /// /// This parses an expression accounting for associativity and precedence of the operators in /// the expression. #[inline] fn parse_assoc_expr( &mut self, already_parsed_attrs: Option<AttrWrapper>, ) -> PResult<'a, P<Expr>> { self.parse_assoc_expr_with(0, already_parsed_attrs.into()) } /// Parses an associative expression with operators of at least `min_prec` precedence. pub(super) fn parse_assoc_expr_with( &mut self, min_prec: usize, lhs: LhsExpr, ) -> PResult<'a, P<Expr>> { let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs { expr } else { let attrs = match lhs { LhsExpr::AttributesParsed(attrs) => Some(attrs), _ => None, }; if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) { return self.parse_prefix_range_expr(attrs); } else { self.parse_prefix_expr(attrs)? } }; let last_type_ascription_set = self.last_type_ascription.is_some(); if !self.should_continue_as_assoc_expr(&lhs) { self.last_type_ascription = None; return Ok(lhs); } self.expected_tokens.push(TokenType::Operator); while let Some(op) = self.check_assoc_op() { // Adjust the span for interpolated LHS to point to the `$lhs` token // and not to what it refers to. let lhs_span = match self.prev_token.kind { TokenKind::Interpolated(..) => self.prev_token.span, _ => lhs.span, }; let cur_op_span = self.token.span; let restrictions = if op.node.is_assign_like() { self.restrictions & Restrictions::NO_STRUCT_LITERAL } else { self.restrictions }; let prec = op.node.precedence(); if prec < min_prec { break; } // Check for deprecated `...` syntax if self.token == token::DotDotDot && op.node == AssocOp::DotDotEq { self.err_dotdotdot_syntax(self.token.span); } if self.token == token::LArrow { self.err_larrow_operator(self.token.span); } self.bump(); if op.node.is_comparison() { if let Some(expr) = self.check_no_chained_comparison(&lhs, &op)? { return Ok(expr); } } if (op.node == AssocOp::Equal || op.node == AssocOp::NotEqual) && self.token.kind == token::Eq && self.prev_token.span.hi() == self.token.span.lo() { // Look for JS' `===` and `!==` and recover 😇 let sp = op.span.to(self.token.span); let sugg = match op.node { AssocOp::Equal => "==", AssocOp::NotEqual => "!=", _ => unreachable!(), }; self.struct_span_err(sp, &format!("invalid comparison operator `{}=`", sugg)) .span_suggestion_short( sp, &format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg), sugg.to_string(), Applicability::MachineApplicable, ) .emit(); self.bump(); } let op = op.node; // Special cases: if op == AssocOp::As { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue; } else if op == AssocOp::Colon { lhs = self.parse_assoc_op_ascribe(lhs, lhs_span)?; continue; } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { // If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. lhs = self.parse_range_expr(prec, lhs, op, cur_op_span)?; break; } let fixity = op.fixity(); let prec_adjustment = match fixity { Fixity::Right => 0, Fixity::Left => 1, // We currently have no non-associative operators that are not handled above by // the special cases. The code is here only for future convenience. Fixity::None => 1, }; let rhs = self.with_res(restrictions - Restrictions::STMT_EXPR, |this| { this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed) })?; let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span); lhs = match op { AssocOp::Add | AssocOp::Subtract | AssocOp::Multiply | AssocOp::Divide | AssocOp::Modulus | AssocOp::LAnd | AssocOp::LOr | AssocOp::BitXor | AssocOp::BitAnd | AssocOp::BitOr | AssocOp::ShiftLeft | AssocOp::ShiftRight | AssocOp::Equal | AssocOp::Less | AssocOp::LessEqual | AssocOp::NotEqual | AssocOp::Greater | AssocOp::GreaterEqual => { let ast_op = op.to_ast_binop().unwrap(); let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary, AttrVec::new()) } AssocOp::Assign => { self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new()) } AssocOp::AssignOp(k) => { let aop = match k { token::Plus => BinOpKind::Add, token::Minus => BinOpKind::Sub, token::Star => BinOpKind::Mul, token::Slash => BinOpKind::Div, token::Percent => BinOpKind::Rem, token::Caret => BinOpKind::BitXor, token::And => BinOpKind::BitAnd, token::Or => BinOpKind::BitOr, token::Shl => BinOpKind::Shl, token::Shr => BinOpKind::Shr, }; let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr, AttrVec::new()) } AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => { self.span_bug(span, "AssocOp should have been handled by special case") } }; if let Fixity::None = fixity { break; } } if last_type_ascription_set { self.last_type_ascription = None; } Ok(lhs) } fn should_continue_as_assoc_expr(&mut self, lhs: &Expr) -> bool { match (self.expr_is_complete(lhs), AssocOp::from_token(&self.token)) { // Semi-statement forms are odd: // See https://github.com/rust-lang/rust/issues/29071 (true, None) => false, (false, _) => true, // Continue parsing the expression. // An exhaustive check is done in the following block, but these are checked first // because they *are* ambiguous but also reasonable looking incorrect syntax, so we // want to keep their span info to improve diagnostics in these cases in a later stage. (true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3` (true, Some(AssocOp::Subtract)) | // `{ 42 } -5` (true, Some(AssocOp::Add)) // `{ 42 } + 42 // If the next token is a keyword, then the tokens above *are* unambiguously incorrect: // `if x { a } else { b } && if y { c } else { d }` if !self.look_ahead(1, |t| t.is_used_keyword()) => { // These cases are ambiguous and can't be identified in the parser alone. let sp = self.sess.source_map().start_point(self.token.span); self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span); false } (true, Some(AssocOp::LAnd)) => { // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`. Separated from the // above due to #74233. // These cases are ambiguous and can't be identified in the parser alone. let sp = self.sess.source_map().start_point(self.token.span); self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span); false } (true, Some(ref op)) if !op.can_continue_expr_unambiguously() => false, (true, Some(_)) => { self.error_found_expr_would_be_stmt(lhs); true } } } /// We've found an expression that would be parsed as a statement, /// but the next token implies this should be parsed as an expression. /// For example: `if let Some(x) = x { x } else { 0 } / 2`. fn error_found_expr_would_be_stmt(&self, lhs: &Expr) { let mut err = self.struct_span_err( self.token.span, &format!("expected expression, found `{}`", pprust::token_to_string(&self.token),), ); err.span_label(self.token.span, "expected expression"); self.sess.expr_parentheses_needed(&mut err, lhs.span); err.emit(); } /// Possibly translate the current token to an associative operator. /// The method does not advance the current token. /// /// Also performs recovery for `and` / `or` which are mistaken for `&&` and `||` respectively. fn check_assoc_op(&self) -> Option<Spanned<AssocOp>> { let (op, span) = match (AssocOp::from_token(&self.token), self.token.ident()) { // When parsing const expressions, stop parsing when encountering `>`. ( Some( AssocOp::ShiftRight | AssocOp::Greater | AssocOp::GreaterEqual | AssocOp::AssignOp(token::BinOpToken::Shr), ), _, ) if self.restrictions.contains(Restrictions::CONST_EXPR) => { return None; } (Some(op), _) => (op, self.token.span), (None, Some((Ident { name: sym::and, span }, false))) => { self.error_bad_logical_op("and", "&&", "conjunction"); (AssocOp::LAnd, span) } (None, Some((Ident { name: sym::or, span }, false))) => { self.error_bad_logical_op("or", "||", "disjunction"); (AssocOp::LOr, span) } _ => return None, }; Some(source_map::respan(span, op)) } /// Error on `and` and `or` suggesting `&&` and `||` respectively. fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) { self.struct_span_err(self.token.span, &format!("`{}` is not a logical operator", bad)) .span_suggestion_short( self.token.span, &format!("use `{}` to perform logical {}", good, english), good.to_string(), Applicability::MachineApplicable, ) .note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators") .emit(); } /// Checks if this expression is a successfully parsed statement. fn expr_is_complete(&self, e: &Expr) -> bool { self.restrictions.contains(Restrictions::STMT_EXPR) && !classify::expr_requires_semi_to_be_stmt(e) } /// Parses `x..y`, `x..=y`, and `x..`/`x..=`. /// The other two variants are handled in `parse_prefix_range_expr` below. fn parse_range_expr( &mut self, prec: usize, lhs: P<Expr>, op: AssocOp, cur_op_span: Span, ) -> PResult<'a, P<Expr>> { let rhs = if self.is_at_start_of_range_notation_rhs() { Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?) } else { None }; let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span); let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span); let limits = if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let range = self.mk_range(Some(lhs), rhs, limits); Ok(self.mk_expr(span, range, AttrVec::new())) } fn is_at_start_of_range_notation_rhs(&self) -> bool { if self.token.can_begin_expr() { // Parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`. if self.token == token::OpenDelim(token::Brace) { return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); } true } else { false } } /// Parses prefix-forms of range notation: `..expr`, `..`, `..=expr`. fn parse_prefix_range_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> { // Check for deprecated `...` syntax. if self.token == token::DotDotDot { self.err_dotdotdot_syntax(self.token.span); } debug_assert!( [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind), "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq", self.token ); let limits = match self.token.kind { token::DotDot => RangeLimits::HalfOpen, _ => RangeLimits::Closed, }; let op = AssocOp::from_token(&self.token); // FIXME: `parse_prefix_range_expr` is called when the current // token is `DotDot`, `DotDotDot`, or `DotDotEq`. If we haven't already // parsed attributes, then trying to parse them here will always fail. // We should figure out how we want attributes on range expressions to work. let attrs = self.parse_or_use_outer_attributes(attrs)?; self.collect_tokens_for_expr(attrs, |this, attrs| { let lo = this.token.span; this.bump(); let (span, opt_end) = if this.is_at_start_of_range_notation_rhs() { // RHS must be parsed with more associativity than the dots. this.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed) .map(|x| (lo.to(x.span), Some(x)))? } else { (lo, None) }; let range = this.mk_range(None, opt_end, limits); Ok(this.mk_expr(span, range, attrs.into())) }) } /// Parses a prefix-unary-operator expr. fn parse_prefix_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(attrs)?; let lo = self.token.span; macro_rules! make_it { ($this:ident, $attrs:expr, |this, _| $body:expr) => { $this.collect_tokens_for_expr($attrs, |$this, attrs| { let (hi, ex) = $body?; Ok($this.mk_expr(lo.to(hi), ex, attrs.into())) }) }; } let this = self; // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr() match this.token.uninterpolate().kind { token::Not => make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Not)), // `!expr` token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `~expr` token::BinOp(token::Minus) => { make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Neg)) } // `-expr` token::BinOp(token::Star) => { make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Deref)) } // `*expr` token::BinOp(token::And) | token::AndAnd => { make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo)) } token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => { let mut err = this.struct_span_err(lo, "leading `+` is not supported"); err.span_label(lo, "unexpected `+`"); // a block on the LHS might have been intended to be an expression instead if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) { this.sess.expr_parentheses_needed(&mut err, *sp); } else { err.span_suggestion_verbose( lo, "try removing the `+`", "".to_string(), Applicability::MachineApplicable, ); } err.emit(); this.bump(); this.parse_prefix_expr(None) } // `+expr` token::Ident(..) if this.token.is_keyword(kw::Box) => { make_it!(this, attrs, |this, _| this.parse_box_expr(lo)) } token::Ident(..) if this.is_mistaken_not_ident_negation() => { make_it!(this, attrs, |this, _| this.recover_not_expr(lo)) } _ => return this.parse_dot_or_call_expr(Some(attrs)), } } fn parse_prefix_expr_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> { self.bump(); let expr = self.parse_prefix_expr(None); let (span, expr) = self.interpolated_or_expr_span(expr)?; Ok((lo.to(span), expr)) } fn parse_unary_expr(&mut self, lo: Span, op: UnOp) -> PResult<'a, (Span, ExprKind)> { let (span, expr) = self.parse_prefix_expr_common(lo)?; Ok((span, self.mk_unary(op, expr))) } // Recover on `!` suggesting for bitwise negation instead. fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { self.struct_span_err(lo, "`~` cannot be used as a unary operator") .span_suggestion_short( lo, "use `!` to perform bitwise not", "!".to_owned(), Applicability::MachineApplicable, ) .emit(); self.parse_unary_expr(lo, UnOp::Not) } /// Parse `box expr`. fn parse_box_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { let (span, expr) = self.parse_prefix_expr_common(lo)?; self.sess.gated_spans.gate(sym::box_syntax, span); Ok((span, ExprKind::Box(expr))) } fn is_mistaken_not_ident_negation(&self) -> bool { let token_cannot_continue_expr = |t: &Token| match t.uninterpolate().kind { // These tokens can start an expression after `!`, but // can't continue an expression after an ident token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw), token::Literal(..) | token::Pound => true, _ => t.is_whole_expr(), }; self.token.is_ident_named(sym::not) && self.look_ahead(1, token_cannot_continue_expr) } /// Recover on `not expr` in favor of `!expr`. fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { // Emit the error... let not_token = self.look_ahead(1, |t| t.clone()); self.struct_span_err( not_token.span, &format!("unexpected {} after identifier", super::token_descr(&not_token)), ) .span_suggestion_short( // Span the `not` plus trailing whitespace to avoid // trailing whitespace after the `!` in our suggestion self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)), "use `!` to perform logical negation", "!".to_owned(), Applicability::MachineApplicable, ) .emit(); // ...and recover! self.parse_unary_expr(lo, UnOp::Not) } /// Returns the span of expr, if it was not interpolated or the span of the interpolated token. fn interpolated_or_expr_span( &self, expr: PResult<'a, P<Expr>>, ) -> PResult<'a, (Span, P<Expr>)> { expr.map(|e| { ( match self.prev_token.kind { TokenKind::Interpolated(..) => self.prev_token.span, _ => e.span, }, e, ) }) } fn parse_assoc_op_cast( &mut self, lhs: P<Expr>, lhs_span: Span, expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind, ) -> PResult<'a, P<Expr>> { let mk_expr = |this: &mut Self, lhs: P<Expr>, rhs: P<Ty>| { this.mk_expr( this.mk_expr_sp(&lhs, lhs_span, rhs.span), expr_kind(lhs, rhs), AttrVec::new(), ) }; // Save the state of the parser before parsing type normally, in case there is a // LessThan comparison after this cast. let parser_snapshot_before_type = self.clone(); let cast_expr = match self.parse_ty_no_plus() { Ok(rhs) => mk_expr(self, lhs, rhs), Err(mut type_err) => { // Rewind to before attempting to parse the type with generics, to recover // from situations like `x as usize < y` in which we first tried to parse // `usize < y` as a type with generic arguments. let parser_snapshot_after_type = mem::replace(self, parser_snapshot_before_type); // Check for typo of `'a: loop { break 'a }` with a missing `'`. match (&lhs.kind, &self.token.kind) { ( // `foo: ` ExprKind::Path(None, ast::Path { segments, .. }), TokenKind::Ident(kw::For | kw::Loop | kw::While, false), ) if segments.len() == 1 => { let snapshot = self.clone(); let label = Label { ident: Ident::from_str_and_span( &format!("'{}", segments[0].ident), segments[0].ident.span, ), }; match self.parse_labeled_expr(label, AttrVec::new(), false) { Ok(expr) => { type_err.cancel(); self.struct_span_err(label.ident.span, "malformed loop label") .span_suggestion( label.ident.span, "use the correct loop label format", label.ident.to_string(), Applicability::MachineApplicable, ) .emit(); return Ok(expr); } Err(mut err) => { err.cancel(); *self = snapshot; } } } _ => {} } match self.parse_path(PathStyle::Expr) { Ok(path) => { let (op_noun, op_verb) = match self.token.kind { token::Lt => ("comparison", "comparing"), token::BinOp(token::Shl) => ("shift", "shifting"), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, // but `parse_path` returns `Ok` on them due to error recovery. // Return original error and parser state. *self = parser_snapshot_after_type; return Err(type_err); } }; // Successfully parsed the type path leaving a `<` yet to parse. type_err.cancel(); // Report non-fatal diagnostics, keep `x as usize` as an expression // in AST and continue parsing. let msg = format!( "`<` is interpreted as a start of generic arguments for `{}`, not a {}", pprust::path_to_string(&path), op_noun, ); let span_after_type = parser_snapshot_after_type.token.span; let expr = mk_expr(self, lhs, self.mk_ty(path.span, TyKind::Path(None, path))); self.struct_span_err(self.token.span, &msg) .span_label( self.look_ahead(1, |t| t.span).to(span_after_type), "interpreted as generic arguments", ) .span_label(self.token.span, format!("not interpreted as {}", op_noun)) .multipart_suggestion( &format!("try {} the cast value", op_verb), vec![ (expr.span.shrink_to_lo(), "(".to_string()), (expr.span.shrink_to_hi(), ")".to_string()), ], Applicability::MachineApplicable, ) .emit(); expr } Err(mut path_err) => { // Couldn't parse as a path, return original error and parser state. path_err.cancel(); *self = parser_snapshot_after_type; return Err(type_err); } } } }; self.parse_and_disallow_postfix_after_cast(cast_expr) } /// Parses a postfix operators such as `.`, `?`, or index (`[]`) after a cast, /// then emits an error and returns the newly parsed tree. /// The resulting parse tree for `&x as T[0]` has a precedence of `((&x) as T)[0]`. fn parse_and_disallow_postfix_after_cast( &mut self, cast_expr: P<Expr>, ) -> PResult<'a, P<Expr>> { // Save the memory location of expr before parsing any following postfix operators. // This will be compared with the memory location of the output expression. // If they different we can assume we parsed another expression because the existing expression is not reallocated. let addr_before = &*cast_expr as *const _ as usize; let span = cast_expr.span; let with_postfix = self.parse_dot_or_call_expr_with_(cast_expr, span)?; let changed = addr_before != &*with_postfix as *const _ as usize; // Check if an illegal postfix operator has been added after the cast. // If the resulting expression is not a cast, or has a different memory location, it is an illegal postfix operator. if !matches!(with_postfix.kind, ExprKind::Cast(_, _) | ExprKind::Type(_, _)) || changed { let msg = format!( "casts cannot be followed by {}", match with_postfix.kind { ExprKind::Index(_, _) => "indexing", ExprKind::Try(_) => "?", ExprKind::Field(_, _) => "a field access", ExprKind::MethodCall(_, _, _) => "a method call", ExprKind::Call(_, _) => "a function call", ExprKind::Await(_) => "`.await`", ExprKind::Err => return Ok(with_postfix), _ => unreachable!("parse_dot_or_call_expr_with_ shouldn't produce this"), } ); let mut err = self.struct_span_err(span, &msg); // If type ascription is "likely an error", the user will already be getting a useful // help message, and doesn't need a second. if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) { self.maybe_annotate_with_ascription(&mut err, false); } else { let suggestions = vec![ (span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), ")".to_string()), ]; err.multipart_suggestion( "try surrounding the expression in parentheses", suggestions, Applicability::MachineApplicable, ); } err.emit(); }; Ok(with_postfix) } fn parse_assoc_op_ascribe(&mut self, lhs: P<Expr>, lhs_span: Span) -> PResult<'a, P<Expr>> { let maybe_path = self.could_ascription_be_path(&lhs.kind); self.last_type_ascription = Some((self.prev_token.span, maybe_path)); let lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?; self.sess.gated_spans.gate(sym::type_ascription, lhs.span); Ok(lhs) } /// Parse `& mut? <expr>` or `& raw [ const | mut ] <expr>`. fn parse_borrow_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> { self.expect_and()?; let has_lifetime = self.token.is_lifetime() && self.look_ahead(1, |t| t != &token::Colon); let lifetime = has_lifetime.then(|| self.expect_lifetime()); // For recovery, see below. let (borrow_kind, mutbl) = self.parse_borrow_modifiers(lo); let expr = self.parse_prefix_expr(None); let (hi, expr) = self.interpolated_or_expr_span(expr)?; let span = lo.to(hi); if let Some(lt) = lifetime { self.error_remove_borrow_lifetime(span, lt.ident.span); } Ok((span, ExprKind::AddrOf(borrow_kind, mutbl, expr))) } fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) { self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes") .span_label(lt_span, "annotated with lifetime here") .span_suggestion( lt_span, "remove the lifetime annotation", String::new(), Applicability::MachineApplicable, ) .emit(); } /// Parse `mut?` or `raw [ const | mut ]`. fn parse_borrow_modifiers(&mut self, lo: Span) -> (ast::BorrowKind, ast::Mutability) { if self.check_keyword(kw::Raw) && self.look_ahead(1, Token::is_mutability) { // `raw [ const | mut ]`. let found_raw = self.eat_keyword(kw::Raw); assert!(found_raw); let mutability = self.parse_const_or_mut().unwrap(); self.sess.gated_spans.gate(sym::raw_ref_op, lo.to(self.prev_token.span)); (ast::BorrowKind::Raw, mutability) } else { // `mut?` (ast::BorrowKind::Ref, self.parse_mutability()) } } /// Parses `a.b` or `a(13)` or `a[4]` or just `a`. fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> { let attrs = self.parse_or_use_outer_attributes(attrs)?; self.collect_tokens_for_expr(attrs, |this, attrs| { let base = this.parse_bottom_expr(); let (span, base) = this.interpolated_or_expr_span(base)?; this.parse_dot_or_call_expr_with(base, span, attrs) }) } pub(super) fn parse_dot_or_call_expr_with( &mut self, e0: P<Expr>, lo: Span, mut attrs: Vec<ast::Attribute>, ) -> PResult<'a, P<Expr>> { // Stitch the list of outer attributes onto the return value. // A little bit ugly, but the best way given the current code // structure self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| { expr.map(|mut expr| { attrs.extend::<Vec<_>>(expr.attrs.into()); expr.attrs = attrs.into(); expr }) }) } fn parse_dot_or_call_expr_with_(&mut self, mut e: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { loop { if self.eat(&token::Question) { // `expr?` e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new()); continue; } if self.eat(&token::Dot) { // expr.f e = self.parse_dot_suffix_expr(lo, e)?; continue; } if self.expr_is_complete(&e) { return Ok(e); } e = match self.token.kind { token::OpenDelim(token::Paren) => self.parse_fn_call_expr(lo, e), token::OpenDelim(token::Bracket) => self.parse_index_expr(lo, e)?, _ => return Ok(e), } } } fn look_ahead_type_ascription_as_field(&mut self) -> bool { self.look_ahead(1, |t| t.is_ident()) && self.look_ahead(2, |t| t == &token::Colon) && self.look_ahead(3, |t| t.can_begin_expr()) } fn parse_dot_suffix_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> { match self.token.uninterpolate().kind { token::Ident(..) => self.parse_dot_suffix(base, lo), token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => { Ok(self.parse_tuple_field_access_expr(lo, base, symbol, suffix, None)) } token::Literal(token::Lit { kind: token::Float, symbol, suffix }) => { Ok(self.parse_tuple_field_access_expr_float(lo, base, symbol, suffix)) } _ => { self.error_unexpected_after_dot(); Ok(base) } } } fn error_unexpected_after_dot(&self) { // FIXME Could factor this out into non_fatal_unexpected or something. let actual = pprust::token_to_string(&self.token); self.struct_span_err(self.token.span, &format!("unexpected token: `{}`", actual)).emit(); } // We need an identifier or integer, but the next token is a float. // Break the float into components to extract the identifier or integer. // FIXME: With current `TokenCursor` it's hard to break tokens into more than 2 // parts unless those parts are processed immediately. `TokenCursor` should either // support pushing "future tokens" (would be also helpful to `break_and_eat`), or // we should break everything including floats into more basic proc-macro style // tokens in the lexer (probably preferable). fn parse_tuple_field_access_expr_float( &mut self, lo: Span, base: P<Expr>, float: Symbol, suffix: Option<Symbol>, ) -> P<Expr> { #[derive(Debug)] enum FloatComponent { IdentLike(String), Punct(char), } use FloatComponent::*; let float_str = float.as_str(); let mut components = Vec::new(); let mut ident_like = String::new(); for c in float_str.chars() { if c == '_' || c.is_ascii_alphanumeric() { ident_like.push(c); } else if matches!(c, '.' | '+' | '-') { if !ident_like.is_empty() { components.push(IdentLike(mem::take(&mut ident_like))); } components.push(Punct(c)); } else { panic!("unexpected character in a float token: {:?}", c) } } if !ident_like.is_empty() { components.push(IdentLike(ident_like)); } // With proc macros the span can refer to anything, the source may be too short, // or too long, or non-ASCII. It only makes sense to break our span into components // if its underlying text is identical to our float literal. let span = self.token.span; let can_take_span_apart = || self.span_to_snippet(span).as_deref() == Ok(float_str).as_deref(); match &*components { // 1e2 [IdentLike(i)] => { self.parse_tuple_field_access_expr(lo, base, Symbol::intern(&i), suffix, None) } // 1. [IdentLike(i), Punct('.')] => { let (ident_span, dot_span) = if can_take_span_apart() { let (span, ident_len) = (span.data(), BytePos::from_usize(i.len())); let ident_span = span.with_hi(span.lo + ident_len); let dot_span = span.with_lo(span.lo + ident_len); (ident_span, dot_span) } else { (span, span) }; assert!(suffix.is_none()); let symbol = Symbol::intern(&i); self.token = Token::new(token::Ident(symbol, false), ident_span); let next_token = (Token::new(token::Dot, dot_span), self.token_spacing); self.parse_tuple_field_access_expr(lo, base, symbol, None, Some(next_token)) } // 1.2 | 1.2e3 [IdentLike(i1), Punct('.'), IdentLike(i2)] => { let (ident1_span, dot_span, ident2_span) = if can_take_span_apart() { let (span, ident1_len) = (span.data(), BytePos::from_usize(i1.len())); let ident1_span = span.with_hi(span.lo + ident1_len); let dot_span = span .with_lo(span.lo + ident1_len) .with_hi(span.lo + ident1_len + BytePos(1)); let ident2_span = self.token.span.with_lo(span.lo + ident1_len + BytePos(1)); (ident1_span, dot_span, ident2_span) } else { (span, span, span) }; let symbol1 = Symbol::intern(&i1); self.token = Token::new(token::Ident(symbol1, false), ident1_span); // This needs to be `Spacing::Alone` to prevent regressions. // See issue #76399 and PR #76285 for more details let next_token1 = (Token::new(token::Dot, dot_span), Spacing::Alone); let base1 = self.parse_tuple_field_access_expr(lo, base, symbol1, None, Some(next_token1)); let symbol2 = Symbol::intern(&i2); let next_token2 = Token::new(token::Ident(symbol2, false), ident2_span); self.bump_with((next_token2, self.token_spacing)); // `.` self.parse_tuple_field_access_expr(lo, base1, symbol2, suffix, None) } // 1e+ | 1e- (recovered) [IdentLike(_), Punct('+' | '-')] | // 1e+2 | 1e-2 [IdentLike(_), Punct('+' | '-'), IdentLike(_)] | // 1.2e+ | 1.2e- [IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-')] | // 1.2e+3 | 1.2e-3 [IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-'), IdentLike(_)] => { // See the FIXME about `TokenCursor` above. self.error_unexpected_after_dot(); base } _ => panic!("unexpected components in a float token: {:?}", components), } } fn parse_tuple_field_access_expr( &mut self, lo: Span, base: P<Expr>, field: Symbol, suffix: Option<Symbol>, next_token: Option<(Token, Spacing)>, ) -> P<Expr> { match next_token { Some(next_token) => self.bump_with(next_token), None => self.bump(), } let span = self.prev_token.span; let field = ExprKind::Field(base, Ident::new(field, span)); self.expect_no_suffix(span, "a tuple index", suffix); self.mk_expr(lo.to(span), field, AttrVec::new()) } /// Parse a function call expression, `expr(...)`. fn parse_fn_call_expr(&mut self, lo: Span, fun: P<Expr>) -> P<Expr> { let snapshot = if self.token.kind == token::OpenDelim(token::Paren) && self.look_ahead_type_ascription_as_field() { Some((self.clone(), fun.kind.clone())) } else { None }; let open_paren = self.token.span; let mut seq = self.parse_paren_expr_seq().map(|args| { self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new()) }); if let Some(expr) = self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot) { return expr; } self.recover_seq_parse_error(token::Paren, lo, seq) } /// If we encounter a parser state that looks like the user has written a `struct` literal with /// parentheses instead of braces, recover the parser state and provide suggestions. #[instrument(skip(self, seq, snapshot), level = "trace")] fn maybe_recover_struct_lit_bad_delims( &mut self, lo: Span, open_paren: Span, seq: &mut PResult<'a, P<Expr>>, snapshot: Option<(Self, ExprKind)>, ) -> Option<P<Expr>> { match (seq.as_mut(), snapshot) { (Err(ref mut err), Some((mut snapshot, ExprKind::Path(None, path)))) => { let name = pprust::path_to_string(&path); snapshot.bump(); // `(` match snapshot.parse_struct_fields(path, false, token::Paren) { Ok((fields, ..)) if snapshot.eat(&token::CloseDelim(token::Paren)) => { // We have are certain we have `Enum::Foo(a: 3, b: 4)`, suggest // `Enum::Foo { a: 3, b: 4 }` or `Enum::Foo(3, 4)`. *self = snapshot; let close_paren = self.prev_token.span; let span = lo.to(self.prev_token.span); err.cancel(); self.struct_span_err( span, "invalid `struct` delimiters or `fn` call arguments", ) .multipart_suggestion( &format!("if `{}` is a struct, use braces as delimiters", name), vec![(open_paren, " { ".to_string()), (close_paren, " }".to_string())], Applicability::MaybeIncorrect, ) .multipart_suggestion( &format!("if `{}` is a function, use the arguments directly", name), fields .into_iter() .map(|field| (field.span.until(field.expr.span), String::new())) .collect(), Applicability::MaybeIncorrect, ) .emit(); return Some(self.mk_expr_err(span)); } Ok(_) => {} Err(mut err) => err.emit(), } } _ => {} } None } /// Parse an indexing expression `expr[...]`. fn parse_index_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> { self.bump(); // `[` let index = self.parse_expr()?; self.expect(&token::CloseDelim(token::Bracket))?; Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new())) } /// Assuming we have just parsed `.`, continue parsing into an expression. fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> { if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) { return Ok(self.mk_await_expr(self_arg, lo)); } let fn_span_lo = self.token.span; let mut segment = self.parse_path_segment(PathStyle::Expr, None)?; self.check_trailing_angle_brackets(&segment, &[&token::OpenDelim(token::Paren)]); self.check_turbofish_missing_angle_brackets(&mut segment); if self.check(&token::OpenDelim(token::Paren)) { // Method call `expr.f()` let mut args = self.parse_paren_expr_seq()?; args.insert(0, self_arg); let fn_span = fn_span_lo.to(self.prev_token.span); let span = lo.to(self.prev_token.span); Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new())) } else { // Field access `expr.f` if let Some(args) = segment.args { self.struct_span_err( args.span(), "field expressions cannot have generic arguments", ) .emit(); } let span = lo.to(self.prev_token.span); Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new())) } } /// At the bottom (top?) of the precedence hierarchy, /// Parses things like parenthesized exprs, macros, `return`, etc. /// /// N.B., this does not parse outer attributes, and is private because it only works /// correctly if called from `parse_dot_or_call_expr()`. fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> { maybe_recover_from_interpolated_ty_qpath!(self, true); maybe_whole_expr!(self); // Outer attributes are already parsed and will be // added to the return value after the fact. // // Therefore, prevent sub-parser from parsing // attributes by giving them an empty "already-parsed" list. let attrs = AttrVec::new(); // Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`. let lo = self.token.span; if let token::Literal(_) = self.token.kind { // This match arm is a special-case of the `_` match arm below and // could be removed without changing functionality, but it's faster // to have it here, especially for programs with large constants. self.parse_lit_expr(attrs) } else if self.check(&token::OpenDelim(token::Paren)) { self.parse_tuple_parens_expr(attrs) } else if self.check(&token::OpenDelim(token::Brace)) { self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs) } else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) { self.parse_closure_expr(attrs) } else if self.check(&token::OpenDelim(token::Bracket)) { self.parse_array_or_repeat_expr(attrs, token::Bracket) } else if self.check_path() { self.parse_path_start_expr(attrs) } else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) { self.parse_closure_expr(attrs) } else if self.eat_keyword(kw::If) { self.parse_if_expr(attrs) } else if self.check_keyword(kw::For) { if self.choose_generics_over_qpath(1) { // NOTE(Centril, eddyb): DO NOT REMOVE! Beyond providing parser recovery, // this is an insurance policy in case we allow qpaths in (tuple-)struct patterns. // When `for <Foo as Bar>::Proj in $expr $block` is wanted, // you can disambiguate in favor of a pattern with `(...)`. self.recover_quantified_closure_expr(attrs) } else { assert!(self.eat_keyword(kw::For)); self.parse_for_expr(None, self.prev_token.span, attrs) } } else if self.eat_keyword(kw::While) { self.parse_while_expr(None, self.prev_token.span, attrs) } else if let Some(label) = self.eat_label() { self.parse_labeled_expr(label, attrs, true) } else if self.eat_keyword(kw::Loop) { self.parse_loop_expr(None, self.prev_token.span, attrs) } else if self.eat_keyword(kw::Continue) { let kind = ExprKind::Continue(self.eat_label()); Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs)) } else if self.eat_keyword(kw::Match) { let match_sp = self.prev_token.span; self.parse_match_expr(attrs).map_err(|mut err| { err.span_label(match_sp, "while parsing this match expression"); err }) } else if self.eat_keyword(kw::Unsafe) { self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs) } else if self.check_inline_const(0) { self.parse_const_block(lo.to(self.token.span), false) } else if self.is_do_catch_block() { self.recover_do_catch(attrs) } else if self.is_try_block() { self.expect_keyword(kw::Try)?; self.parse_try_block(lo, attrs) } else if self.eat_keyword(kw::Return) { self.parse_return_expr(attrs) } else if self.eat_keyword(kw::Break) { self.parse_break_expr(attrs) } else if self.eat_keyword(kw::Yield) { self.parse_yield_expr(attrs) } else if self.eat_keyword(kw::Let) { self.parse_let_expr(attrs) } else if self.eat_keyword(kw::Underscore) { self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span); Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs)) } else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) { // Don't complain about bare semicolons after unclosed braces // recovery in order to keep the error count down. Fixing the // delimiters will possibly also fix the bare semicolon found in // expression context. For example, silence the following error: // // error: expected expression, found `;` // --> file.rs:2:13 // | // 2 | foo(bar(; // | ^ expected expression self.bump(); Ok(self.mk_expr_err(self.token.span)) } else if self.token.uninterpolated_span().rust_2018() { // `Span::rust_2018()` is somewhat expensive; don't get it repeatedly. if self.check_keyword(kw::Async) { if self.is_async_block() { // Check for `async {` and `async move {`. self.parse_async_block(attrs) } else { self.parse_closure_expr(attrs) } } else if self.eat_keyword(kw::Await) { self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs) } else { self.parse_lit_expr(attrs) } } else { self.parse_lit_expr(attrs) } } fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; match self.parse_opt_lit() { Some(literal) => { let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs); self.maybe_recover_from_bad_qpath(expr, true) } None => self.try_macro_suggestion(), } } fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; self.expect(&token::OpenDelim(token::Paren))?; let (es, trailing_comma) = match self.parse_seq_to_end( &token::CloseDelim(token::Paren), SeqSep::trailing_allowed(token::Comma), |p| p.parse_expr_catch_underscore(), ) { Ok(x) => x, Err(err) => return Ok(self.recover_seq_parse_error(token::Paren, lo, Err(err))), }; let kind = if es.len() == 1 && !trailing_comma { // `(e)` is parenthesized `e`. ExprKind::Paren(es.into_iter().next().unwrap()) } else { // `(e,)` is a tuple with only one field, `e`. ExprKind::Tup(es) }; let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); self.maybe_recover_from_bad_qpath(expr, true) } fn parse_array_or_repeat_expr( &mut self, attrs: AttrVec, close_delim: token::DelimToken, ) -> PResult<'a, P<Expr>> { let lo = self.token.span; self.bump(); // `[` or other open delim let close = &token::CloseDelim(close_delim); let kind = if self.eat(close) { // Empty vector ExprKind::Array(Vec::new()) } else { // Non-empty vector let first_expr = self.parse_expr()?; if self.eat(&token::Semi) { // Repeating array syntax: `[ 0; 512 ]` let count = self.parse_anon_const_expr()?; self.expect(close)?; ExprKind::Repeat(first_expr, count) } else if self.eat(&token::Comma) { // Vector with two or more elements. let sep = SeqSep::trailing_allowed(token::Comma); let (remaining_exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?; let mut exprs = vec![first_expr]; exprs.extend(remaining_exprs); ExprKind::Array(exprs) } else { // Vector with one element self.expect(close)?; ExprKind::Array(vec![first_expr]) } }; let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); self.maybe_recover_from_bad_qpath(expr, true) } fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let (qself, path) = if self.eat_lt() { let (qself, path) = self.parse_qpath(PathStyle::Expr)?; (Some(qself), path) } else { (None, self.parse_path(PathStyle::Expr)?) }; let lo = path.span; // `!`, as an operator, is prefix, so we know this isn't that. let (hi, kind) = if self.eat(&token::Not) { // MACRO INVOCATION expression if qself.is_some() { self.struct_span_err(path.span, "macros cannot use qualified paths").emit(); } let mac = MacCall { path, args: self.parse_mac_args()?, prior_type_ascription: self.last_type_ascription, }; (self.prev_token.span, ExprKind::MacCall(mac)) } else if self.check(&token::OpenDelim(token::Brace)) { if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) { if qself.is_some() { self.sess.gated_spans.gate(sym::more_qualified_paths, path.span); } return expr; } else { (path.span, ExprKind::Path(qself, path)) } } else { (path.span, ExprKind::Path(qself, path)) }; let expr = self.mk_expr(lo.to(hi), kind, attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Parse `'label: $expr`. The label is already parsed. fn parse_labeled_expr( &mut self, label: Label, attrs: AttrVec, consume_colon: bool, ) -> PResult<'a, P<Expr>> { let lo = label.ident.span; let label = Some(label); let ate_colon = self.eat(&token::Colon); let expr = if self.eat_keyword(kw::While) { self.parse_while_expr(label, lo, attrs) } else if self.eat_keyword(kw::For) { self.parse_for_expr(label, lo, attrs) } else if self.eat_keyword(kw::Loop) { self.parse_loop_expr(label, lo, attrs) } else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() { self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs) } else { let msg = "expected `while`, `for`, `loop` or `{` after a label"; self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit(); // Continue as an expression in an effort to recover on `'label: non_block_expr`. self.parse_expr() }?; if !ate_colon && consume_colon { self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span); } Ok(expr) } fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) { self.struct_span_err(span, "labeled expression must be followed by `:`") .span_label(lo, "the label") .span_suggestion_short( lo.shrink_to_hi(), "add `:` after the label", ": ".to_string(), Applicability::MachineApplicable, ) .note("labels are used before loops and blocks, allowing e.g., `break 'label` to them") .emit(); } /// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead. fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; self.bump(); // `do` self.bump(); // `catch` let span_dc = lo.to(self.prev_token.span); self.struct_span_err(span_dc, "found removed `do catch` syntax") .span_suggestion( span_dc, "replace with the new syntax", "try".to_string(), Applicability::MachineApplicable, ) .note("following RFC #2388, the new non-placeholder syntax is `try`") .emit(); self.parse_try_block(lo, attrs) } /// Parse an expression if the token can begin one. fn parse_expr_opt(&mut self) -> PResult<'a, Option<P<Expr>>> { Ok(if self.token.can_begin_expr() { Some(self.parse_expr()?) } else { None }) } /// Parse `"return" expr?`. fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.prev_token.span; let kind = ExprKind::Ret(self.parse_expr_opt()?); let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Parse `"break" (('label (:? expr)?) | expr?)` with `"break"` token already eaten. /// If the label is followed immediately by a `:` token, the label and `:` are /// parsed as part of the expression (i.e. a labeled loop). The language team has /// decided in #87026 to require parentheses as a visual aid to avoid confusion if /// the break expression of an unlabeled break is a labeled loop (as in /// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value /// expression only gets a warning for compatibility reasons; and a labeled break /// with a labeled loop does not even get a warning because there is no ambiguity. fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.prev_token.span; let mut label = self.eat_label(); let kind = if label.is_some() && self.token == token::Colon { // The value expression can be a labeled loop, see issue #86948, e.g.: // `loop { break 'label: loop { break 'label 42; }; }` let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?; self.struct_span_err( lexpr.span, "parentheses are required around this expression to avoid confusion with a labeled break expression", ) .multipart_suggestion( "wrap the expression in parentheses", vec![ (lexpr.span.shrink_to_lo(), "(".to_string()), (lexpr.span.shrink_to_hi(), ")".to_string()), ], Applicability::MachineApplicable, ) .emit(); Some(lexpr) } else if self.token != token::OpenDelim(token::Brace) || !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) { let expr = self.parse_expr_opt()?; if let Some(ref expr) = expr { if label.is_some() && matches!( expr.kind, ExprKind::While(_, _, None) | ExprKind::ForLoop(_, _, _, None) | ExprKind::Loop(_, None) | ExprKind::Block(_, None) ) { self.sess.buffer_lint_with_diagnostic( BREAK_WITH_LABEL_AND_LOOP, lo.to(expr.span), ast::CRATE_NODE_ID, "this labeled break expression is easy to confuse with an unlabeled break with a labeled value expression", BuiltinLintDiagnostics::BreakWithLabelAndLoop(expr.span), ); } } expr } else { None }; let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Parse `"yield" expr?`. fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.prev_token.span; let kind = ExprKind::Yield(self.parse_expr_opt()?); let span = lo.to(self.prev_token.span); self.sess.gated_spans.gate(sym::generators, span); let expr = self.mk_expr(span, kind, attrs); self.maybe_recover_from_bad_qpath(expr, true) } /// Returns a string literal if the next token is a string literal. /// In case of error returns `Some(lit)` if the next token is a literal with a wrong kind, /// and returns `None` if the next token is not literal at all. pub fn parse_str_lit(&mut self) -> Result<ast::StrLit, Option<Lit>> { match self.parse_opt_lit() { Some(lit) => match lit.kind { ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit { style, symbol: lit.token.symbol, suffix: lit.token.suffix, span: lit.span, symbol_unescaped, }), _ => Err(Some(lit)), }, None => Err(None), } } pub(super) fn parse_lit(&mut self) -> PResult<'a, Lit> { self.parse_opt_lit().ok_or_else(|| { if let token::Interpolated(inner) = &self.token.kind { let expr = match inner.as_ref() { token::NtExpr(expr) => Some(expr), token::NtLiteral(expr) => Some(expr), _ => None, }; if let Some(expr) = expr { if matches!(expr.kind, ExprKind::Err) { self.diagnostic() .delay_span_bug(self.token.span, &"invalid interpolated expression"); return self.diagnostic().struct_dummy(); } } } let msg = format!("unexpected token: {}", super::token_descr(&self.token)); self.struct_span_err(self.token.span, &msg) }) } /// Matches `lit = true | false | token_lit`. /// Returns `None` if the next token is not a literal. pub(super) fn parse_opt_lit(&mut self) -> Option<Lit> { let mut recovered = None; if self.token == token::Dot { // Attempt to recover `.4` as `0.4`. We don't currently have any syntax where // dot would follow an optional literal, so we do this unconditionally. recovered = self.look_ahead(1, |next_token| { if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = next_token.kind { if self.token.span.hi() == next_token.span.lo() { let s = String::from("0.") + &symbol.as_str(); let kind = TokenKind::lit(token::Float, Symbol::intern(&s), suffix); return Some(Token::new(kind, self.token.span.to(next_token.span))); } } None }); if let Some(token) = &recovered { self.bump(); self.error_float_lits_must_have_int_part(&token); } } let token = recovered.as_ref().unwrap_or(&self.token); match Lit::from_token(token) { Ok(lit) => { self.bump(); Some(lit) } Err(LitError::NotLiteral) => None, Err(err) => { let span = token.span; let lit = match token.kind { token::Literal(lit) => lit, _ => unreachable!(), }; self.bump(); self.report_lit_error(err, lit, span); // Pack possible quotes and prefixes from the original literal into // the error literal's symbol so they can be pretty-printed faithfully. let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None); let symbol = Symbol::intern(&suffixless_lit.to_string()); let lit = token::Lit::new(token::Err, symbol, lit.suffix); Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!())) } } } fn error_float_lits_must_have_int_part(&self, token: &Token) { self.struct_span_err(token.span, "float literals must have an integer part") .span_suggestion( token.span, "must have an integer part", pprust::token_to_string(token).into(), Applicability::MachineApplicable, ) .emit(); } fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) { // Checks if `s` looks like i32 or u1234 etc. fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool { s.len() > 1 && s.starts_with(first_chars) && s[1..].chars().all(|c| c.is_ascii_digit()) } let token::Lit { kind, suffix, .. } = lit; match err { // `NotLiteral` is not an error by itself, so we don't report // it and give the parser opportunity to try something else. LitError::NotLiteral => {} // `LexerError` *is* an error, but it was already reported // by lexer, so here we don't report it the second time. LitError::LexerError => {} LitError::InvalidSuffix => { self.expect_no_suffix( span, &format!("{} {} literal", kind.article(), kind.descr()), suffix, ); } LitError::InvalidIntSuffix => { let suf = suffix.expect("suffix error with no suffix").as_str(); if looks_like_width_suffix(&['i', 'u'], &suf) { // If it looks like a width, try to be helpful. let msg = format!("invalid width `{}` for integer literal", &suf[1..]); self.struct_span_err(span, &msg) .help("valid widths are 8, 16, 32, 64 and 128") .emit(); } else { let msg = format!("invalid suffix `{}` for number literal", suf); self.struct_span_err(span, &msg) .span_label(span, format!("invalid suffix `{}`", suf)) .help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)") .emit(); } } LitError::InvalidFloatSuffix => { let suf = suffix.expect("suffix error with no suffix").as_str(); if looks_like_width_suffix(&['f'], &suf) { // If it looks like a width, try to be helpful. let msg = format!("invalid width `{}` for float literal", &suf[1..]); self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit(); } else { let msg = format!("invalid suffix `{}` for float literal", suf); self.struct_span_err(span, &msg) .span_label(span, format!("invalid suffix `{}`", suf)) .help("valid suffixes are `f32` and `f64`") .emit(); } } LitError::NonDecimalFloat(base) => { let descr = match base { 16 => "hexadecimal", 8 => "octal", 2 => "binary", _ => unreachable!(), }; self.struct_span_err(span, &format!("{} float literal is not supported", descr)) .span_label(span, "not supported") .emit(); } LitError::IntTooLarge => { self.struct_span_err(span, "integer literal is too large").emit(); } } } pub(super) fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<Symbol>) { if let Some(suf) = suffix { let mut err = if kind == "a tuple index" && [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suf) { // #59553: warn instead of reject out of hand to allow the fix to percolate // through the ecosystem when people fix their macros let mut err = self .sess .span_diagnostic .struct_span_warn(sp, &format!("suffixes on {} are invalid", kind)); err.note(&format!( "`{}` is *temporarily* accepted on tuple index fields as it was \ incorrectly accepted on stable for a few releases", suf, )); err.help( "on proc macros, you'll want to use `syn::Index::from` or \ `proc_macro::Literal::*_unsuffixed` for code that will desugar \ to tuple field access", ); err.note( "see issue #60210 <https://github.com/rust-lang/rust/issues/60210> \ for more information", ); err } else { self.struct_span_err(sp, &format!("suffixes on {} are invalid", kind)) }; err.span_label(sp, format!("invalid suffix `{}`", suf)); err.emit(); } } /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`). /// Keep this in sync with `Token::can_begin_literal_maybe_minus`. pub fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> { maybe_whole_expr!(self); let lo = self.token.span; let minus_present = self.eat(&token::BinOp(token::Minus)); let lit = self.parse_lit()?; let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new()); if minus_present { Ok(self.mk_expr( lo.to(self.prev_token.span), self.mk_unary(UnOp::Neg, expr), AttrVec::new(), )) } else { Ok(expr) } } fn is_array_like_block(&mut self) -> bool { self.look_ahead(1, |t| matches!(t.kind, TokenKind::Ident(..) | TokenKind::Literal(_))) && self.look_ahead(2, |t| t == &token::Comma) && self.look_ahead(3, |t| t.can_begin_expr()) } /// Emits a suggestion if it looks like the user meant an array but /// accidentally used braces, causing the code to be interpreted as a block /// expression. fn maybe_suggest_brackets_instead_of_braces( &mut self, lo: Span, attrs: AttrVec, ) -> Option<P<Expr>> { let mut snapshot = self.clone(); match snapshot.parse_array_or_repeat_expr(attrs, token::Brace) { Ok(arr) => { let hi = snapshot.prev_token.span; self.struct_span_err( arr.span, "this code is interpreted as a block expression, not an array", ) .multipart_suggestion( "try using [] instead of {}", vec![(lo, "[".to_owned()), (hi, "]".to_owned())], Applicability::MaybeIncorrect, ) .note("to define an array, one would use square brackets instead of curly braces") .emit(); *self = snapshot; Some(self.mk_expr_err(arr.span)) } Err(mut e) => { e.cancel(); None } } } /// Parses a block or unsafe block. pub(super) fn parse_block_expr( &mut self, opt_label: Option<Label>, lo: Span, blk_mode: BlockCheckMode, mut attrs: AttrVec, ) -> PResult<'a, P<Expr>> { if self.is_array_like_block() { if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo, attrs.clone()) { return Ok(arr); } } if let Some(label) = opt_label { self.sess.gated_spans.gate(sym::label_break_value, label.ident.span); } if self.token.is_whole_block() { self.struct_span_err(self.token.span, "cannot use a `block` macro fragment here") .span_label(lo.to(self.token.span), "the `block` fragment is within this context") .emit(); } let (inner_attrs, blk) = self.parse_block_common(lo, blk_mode)?; attrs.extend(inner_attrs); Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs)) } /// Recover on an explicitly quantified closure expression, e.g., `for<'a> |x: &'a u8| *x + 1`. fn recover_quantified_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; let _ = self.parse_late_bound_lifetime_defs()?; let span_for = lo.to(self.prev_token.span); let closure = self.parse_closure_expr(attrs)?; self.struct_span_err(span_for, "cannot introduce explicit parameters for a closure") .span_label(closure.span, "the parameters are attached to this closure") .span_suggestion( span_for, "remove the parameters", String::new(), Applicability::MachineApplicable, ) .emit(); Ok(self.mk_expr_err(lo.to(closure.span))) } /// Parses a closure expression (e.g., `move |args| expr`). fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; let movability = if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable }; let asyncness = if self.token.uninterpolated_span().rust_2018() { self.parse_asyncness() } else { Async::No }; let capture_clause = self.parse_capture_clause()?; let decl = self.parse_fn_block_decl()?; let decl_hi = self.prev_token.span; let mut body = match decl.output { FnRetTy::Default(_) => { let restrictions = self.restrictions - Restrictions::STMT_EXPR; self.parse_expr_res(restrictions, None)? } _ => { // If an explicit return type is given, require a block to appear (RFC 968). let body_lo = self.token.span; self.parse_block_expr(None, body_lo, BlockCheckMode::Default, AttrVec::new())? } }; if let Async::Yes { span, .. } = asyncness { // Feature-gate `async ||` closures. self.sess.gated_spans.gate(sym::async_closure, span); } if self.token.kind == TokenKind::Semi && self.token_cursor.frame.delim == DelimToken::Paren { // It is likely that the closure body is a block but where the // braces have been removed. We will recover and eat the next // statements later in the parsing process. body = self.mk_expr_err(body.span); } let body_span = body.span; let closure = self.mk_expr( lo.to(body.span), ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)), attrs, ); // Disable recovery for closure body let spans = ClosureSpans { whole_closure: closure.span, closing_pipe: decl_hi, body: body_span }; self.current_closure = Some(spans); Ok(closure) } /// Parses an optional `move` prefix to a closure-like construct. fn parse_capture_clause(&mut self) -> PResult<'a, CaptureBy> { if self.eat_keyword(kw::Move) { // Check for `move async` and recover if self.check_keyword(kw::Async) { let move_async_span = self.token.span.with_lo(self.prev_token.span.data().lo); Err(self.incorrect_move_async_order_found(move_async_span)) } else { Ok(CaptureBy::Value) } } else { Ok(CaptureBy::Ref) } } /// Parses the `|arg, arg|` header of a closure. fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> { let inputs = if self.eat(&token::OrOr) { Vec::new() } else { self.expect(&token::BinOp(token::Or))?; let args = self .parse_seq_to_before_tokens( &[&token::BinOp(token::Or), &token::OrOr], SeqSep::trailing_allowed(token::Comma), TokenExpectType::NoExpect, |p| p.parse_fn_block_param(), )? .0; self.expect_or()?; args }; let output = self.parse_ret_ty(AllowPlus::Yes, RecoverQPath::Yes, RecoverReturnSign::Yes)?; Ok(P(FnDecl { inputs, output })) } /// Parses a parameter in a closure header (e.g., `|arg, arg|`). fn parse_fn_block_param(&mut self) -> PResult<'a, Param> { let lo = self.token.span; let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let pat = this.parse_pat_no_top_alt(PARAM_EXPECTED)?; let ty = if this.eat(&token::Colon) { this.parse_ty()? } else { this.mk_ty(this.prev_token.span, TyKind::Infer) }; Ok(( Param { attrs: attrs.into(), ty, pat, span: lo.to(this.token.span), id: DUMMY_NODE_ID, is_placeholder: false, }, TrailingToken::MaybeComma, )) }) } /// Parses an `if` expression (`if` token already eaten). fn parse_if_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.prev_token.span; let cond = self.parse_cond_expr()?; // Verify that the parsed `if` condition makes sense as a condition. If it is a block, then // verify that the last statement is either an implicit return (no `;`) or an explicit // return. This won't catch blocks with an explicit `return`, but that would be caught by // the dead code lint. let thn = if self.eat_keyword(kw::Else) || !cond.returns() { self.error_missing_if_cond(lo, cond.span) } else { let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery. let not_block = self.token != token::OpenDelim(token::Brace); let block = self.parse_block().map_err(|mut err| { if not_block { err.span_label(lo, "this `if` expression has a condition, but no block"); if let ExprKind::Binary(_, _, ref right) = cond.kind { if let ExprKind::Block(_, _) = right.kind { err.help("maybe you forgot the right operand of the condition?"); } } } err })?; self.error_on_if_block_attrs(lo, false, block.span, &attrs); block }; let els = if self.eat_keyword(kw::Else) { Some(self.parse_else_expr()?) } else { None }; Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els), attrs)) } fn error_missing_if_cond(&self, lo: Span, span: Span) -> P<ast::Block> { let sp = self.sess.source_map().next_point(lo); self.struct_span_err(sp, "missing condition for `if` expression") .span_label(sp, "expected if condition here") .emit(); self.mk_block_err(span) } /// Parses the condition of a `if` or `while` expression. fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> { let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let ExprKind::Let(..) = cond.kind { // Remove the last feature gating of a `let` expression since it's stable. self.sess.gated_spans.ungate_last(sym::let_chains, cond.span); } Ok(cond) } /// Parses a `let $pat = $expr` pseudo-expression. /// The `let` token has already been eaten. fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.prev_token.span; let pat = self.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?; self.expect(&token::Eq)?; let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| { this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into()) })?; let span = lo.to(expr.span); self.sess.gated_spans.gate(sym::let_chains, span); Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span), attrs)) } /// Parses an `else { ... }` expression (`else` token already eaten). fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> { let ctx_span = self.prev_token.span; // `else` let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery. let expr = if self.eat_keyword(kw::If) { self.parse_if_expr(AttrVec::new())? } else { let blk = self.parse_block()?; self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()) }; self.error_on_if_block_attrs(ctx_span, true, expr.span, &attrs); Ok(expr) } fn error_on_if_block_attrs( &self, ctx_span: Span, is_ctx_else: bool, branch_span: Span, attrs: &[ast::Attribute], ) { let (span, last) = match attrs { [] => return, [x0 @ xn] | [x0, .., xn] => (x0.span.to(xn.span), xn.span), }; let ctx = if is_ctx_else { "else" } else { "if" }; self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches") .span_label(branch_span, "the attributes are attached to this branch") .span_label(ctx_span, format!("the branch belongs to this `{}`", ctx)) .span_suggestion( span, "remove the attributes", String::new(), Applicability::MachineApplicable, ) .emit(); } /// Parses `for <src_pat> in <src_expr> <src_loop_block>` (`for` token already eaten). fn parse_for_expr( &mut self, opt_label: Option<Label>, lo: Span, mut attrs: AttrVec, ) -> PResult<'a, P<Expr>> { // Record whether we are about to parse `for (`. // This is used below for recovery in case of `for ( $stuff ) $block` // in which case we will suggest `for $stuff $block`. let begin_paren = match self.token.kind { token::OpenDelim(token::Paren) => Some(self.token.span), _ => None, }; let pat = self.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?; if !self.eat_keyword(kw::In) { self.error_missing_in_for_loop(); } self.check_for_for_in_in_typo(self.prev_token.span); let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; let pat = self.recover_parens_around_for_head(pat, begin_paren); let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let kind = ExprKind::ForLoop(pat, expr, loop_block, opt_label); Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs)) } fn error_missing_in_for_loop(&mut self) { let (span, msg, sugg) = if self.token.is_ident_named(sym::of) { // Possibly using JS syntax (#75311). let span = self.token.span; self.bump(); (span, "try using `in` here instead", "in") } else { (self.prev_token.span.between(self.token.span), "try adding `in` here", " in ") }; self.struct_span_err(span, "missing `in` in `for` loop") .span_suggestion_short( span, msg, sugg.into(), // Has been misleading, at least in the past (closed Issue #48492). Applicability::MaybeIncorrect, ) .emit(); } /// Parses a `while` or `while let` expression (`while` token already eaten). fn parse_while_expr( &mut self, opt_label: Option<Label>, lo: Span, mut attrs: AttrVec, ) -> PResult<'a, P<Expr>> { let cond = self.parse_cond_expr()?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::While(cond, body, opt_label), attrs)) } /// Parses `loop { ... }` (`loop` token already eaten). fn parse_loop_expr( &mut self, opt_label: Option<Label>, lo: Span, mut attrs: AttrVec, ) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::Loop(body, opt_label), attrs)) } fn eat_label(&mut self) -> Option<Label> { self.token.lifetime().map(|ident| { self.bump(); Label { ident } }) } /// Parses a `match ... { ... }` expression (`match` token already eaten). fn parse_match_expr(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> { let match_span = self.prev_token.span; let lo = self.prev_token.span; let scrutinee = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?; if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) { if self.token == token::Semi { e.span_suggestion_short( match_span, "try removing this `match`", String::new(), Applicability::MaybeIncorrect, // speculative ); } return Err(e); } attrs.extend(self.parse_inner_attributes()?); let mut arms: Vec<Arm> = Vec::new(); while self.token != token::CloseDelim(token::Brace) { match self.parse_arm() { Ok(arm) => arms.push(arm), Err(mut e) => { // Recover by skipping to the end of the block. e.emit(); self.recover_stmt(); let span = lo.to(self.token.span); if self.token == token::CloseDelim(token::Brace) { self.bump(); } return Ok(self.mk_expr(span, ExprKind::Match(scrutinee, arms), attrs)); } } } let hi = self.token.span; self.bump(); Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs)) } /// Attempt to recover from match arm body with statements and no surrounding braces. fn parse_arm_body_missing_braces( &mut self, first_expr: &P<Expr>, arrow_span: Span, ) -> Option<P<Expr>> { if self.token.kind != token::Semi { return None; } let start_snapshot = self.clone(); let semi_sp = self.token.span; self.bump(); // `;` let mut stmts = vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))]; let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| { let span = stmts[0].span.to(stmts[stmts.len() - 1].span); let mut err = this.struct_span_err(span, "`match` arm body without braces"); let (these, s, are) = if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") }; err.span_label( span, &format!( "{these} statement{s} {are} not surrounded by a body", these = these, s = s, are = are ), ); err.span_label(arrow_span, "while parsing the `match` arm starting here"); if stmts.len() > 1 { err.multipart_suggestion( &format!("surround the statement{} with a body", s), vec![ (span.shrink_to_lo(), "{ ".to_string()), (span.shrink_to_hi(), " }".to_string()), ], Applicability::MachineApplicable, ); } else { err.span_suggestion( semi_sp, "use a comma to end a `match` arm expression", ",".to_string(), Applicability::MachineApplicable, ); } err.emit(); this.mk_expr_err(span) }; // We might have either a `,` -> `;` typo, or a block without braces. We need // a more subtle parsing strategy. loop { if self.token.kind == token::CloseDelim(token::Brace) { // We have reached the closing brace of the `match` expression. return Some(err(self, stmts)); } if self.token.kind == token::Comma { *self = start_snapshot; return None; } let pre_pat_snapshot = self.clone(); match self.parse_pat_no_top_alt(None) { Ok(_pat) => { if self.token.kind == token::FatArrow { // Reached arm end. *self = pre_pat_snapshot; return Some(err(self, stmts)); } } Err(mut err) => { err.cancel(); } } *self = pre_pat_snapshot; match self.parse_stmt_without_recovery(true, ForceCollect::No) { // Consume statements for as long as possible. Ok(Some(stmt)) => { stmts.push(stmt); } Ok(None) => { *self = start_snapshot; break; } // We couldn't parse either yet another statement missing it's // enclosing block nor the next arm's pattern or closing brace. Err(mut stmt_err) => { stmt_err.cancel(); *self = start_snapshot; break; } } } None } pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> { let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let lo = this.token.span; let pat = this.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?; let guard = if this.eat_keyword(kw::If) { let if_span = this.prev_token.span; let cond = this.parse_expr()?; if let ExprKind::Let(..) = cond.kind { // Remove the last feature gating of a `let` expression since it's stable. this.sess.gated_spans.ungate_last(sym::let_chains, cond.span); let span = if_span.to(cond.span); this.sess.gated_spans.gate(sym::if_let_guard, span); } Some(cond) } else { None }; let arrow_span = this.token.span; if let Err(mut err) = this.expect(&token::FatArrow) { // We might have a `=>` -> `=` or `->` typo (issue #89396). if TokenKind::FatArrow .similar_tokens() .map_or(false, |similar_tokens| similar_tokens.contains(&this.token.kind)) { err.span_suggestion( this.token.span, "try using a fat arrow here", "=>".to_string(), Applicability::MaybeIncorrect, ); err.emit(); this.bump(); } else { return Err(err); } } let arm_start_span = this.token.span; let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| { err.span_label(arrow_span, "while parsing the `match` arm starting here"); err })?; let require_comma = classify::expr_requires_semi_to_be_stmt(&expr) && this.token != token::CloseDelim(token::Brace); let hi = this.prev_token.span; if require_comma { let sm = this.sess.source_map(); if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) { let span = body.span; return Ok(( ast::Arm { attrs: attrs.into(), pat, guard, body, span, id: DUMMY_NODE_ID, is_placeholder: false, }, TrailingToken::None, )); } this.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]).map_err( |mut err| { match (sm.span_to_lines(expr.span), sm.span_to_lines(arm_start_span)) { (Ok(ref expr_lines), Ok(ref arm_start_lines)) if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 && this.token == token::FatArrow => { // We check whether there's any trailing code in the parse span, // if there isn't, we very likely have the following: // // X | &Y => "y" // | -- - missing comma // | | // | arrow_span // X | &X => "x" // | - ^^ self.token.span // | | // | parsed until here as `"y" & X` err.span_suggestion_short( arm_start_span.shrink_to_hi(), "missing a comma here to end this `match` arm", ",".to_owned(), Applicability::MachineApplicable, ); } _ => { err.span_label( arrow_span, "while parsing the `match` arm starting here", ); } } err }, )?; } else { this.eat(&token::Comma); } Ok(( ast::Arm { attrs: attrs.into(), pat, guard, body: expr, span: lo.to(hi), id: DUMMY_NODE_ID, is_placeholder: false, }, TrailingToken::None, )) }) } /// Parses a `try {...}` expression (`try` token already eaten). fn parse_try_block(&mut self, span_lo: Span, mut attrs: AttrVec) -> PResult<'a, P<Expr>> { let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); if self.eat_keyword(kw::Catch) { let mut error = self.struct_span_err( self.prev_token.span, "keyword `catch` cannot follow a `try` block", ); error.help("try using `match` on the result of the `try` block instead"); error.emit(); Err(error) } else { let span = span_lo.to(body.span); self.sess.gated_spans.gate(sym::try_blocks, span); Ok(self.mk_expr(span, ExprKind::TryBlock(body), attrs)) } } fn is_do_catch_block(&self) -> bool { self.token.is_keyword(kw::Do) && self.is_keyword_ahead(1, &[kw::Catch]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) && !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL) } fn is_try_block(&self) -> bool { self.token.is_keyword(kw::Try) && self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) && self.token.uninterpolated_span().rust_2018() } /// Parses an `async move? {...}` expression. fn parse_async_block(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> { let lo = self.token.span; self.expect_keyword(kw::Async)?; let capture_clause = self.parse_capture_clause()?; let (iattrs, body) = self.parse_inner_attrs_and_block()?; attrs.extend(iattrs); let kind = ExprKind::Async(capture_clause, DUMMY_NODE_ID, body); Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs)) } fn is_async_block(&self) -> bool { self.token.is_keyword(kw::Async) && (( // `async move {` self.is_keyword_ahead(1, &[kw::Move]) && self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace)) ) || ( // `async {` self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace)) )) } fn is_certainly_not_a_block(&self) -> bool { self.look_ahead(1, |t| t.is_ident()) && ( // `{ ident, ` cannot start a block. self.look_ahead(2, |t| t == &token::Comma) || self.look_ahead(2, |t| t == &token::Colon) && ( // `{ ident: token, ` cannot start a block. self.look_ahead(4, |t| t == &token::Comma) || // `{ ident: ` cannot start a block unless it's a type ascription `ident: Type`. self.look_ahead(3, |t| !t.can_begin_type()) ) ) } fn maybe_parse_struct_expr( &mut self, qself: Option<&ast::QSelf>, path: &ast::Path, attrs: &AttrVec, ) -> Option<PResult<'a, P<Expr>>> { let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL); if struct_allowed || self.is_certainly_not_a_block() { if let Err(err) = self.expect(&token::OpenDelim(token::Brace)) { return Some(Err(err)); } let expr = self.parse_struct_expr(qself.cloned(), path.clone(), attrs.clone(), true); if let (Ok(expr), false) = (&expr, struct_allowed) { // This is a struct literal, but we don't can't accept them here. self.error_struct_lit_not_allowed_here(path.span, expr.span); } return Some(expr); } None } fn error_struct_lit_not_allowed_here(&self, lo: Span, sp: Span) { self.struct_span_err(sp, "struct literals are not allowed here") .multipart_suggestion( "surround the struct literal with parentheses", vec![(lo.shrink_to_lo(), "(".to_string()), (sp.shrink_to_hi(), ")".to_string())], Applicability::MachineApplicable, ) .emit(); } pub(super) fn parse_struct_fields( &mut self, pth: ast::Path, recover: bool, close_delim: token::DelimToken, ) -> PResult<'a, (Vec<ExprField>, ast::StructRest, bool)> { let mut fields = Vec::new(); let mut base = ast::StructRest::None; let mut recover_async = false; let mut async_block_err = |e: &mut DiagnosticBuilder<'_>, span: Span| { recover_async = true; e.span_label(span, "`async` blocks are only allowed in Rust 2018 or later"); e.help(&format!("set `edition = \"{}\"` in `Cargo.toml`", LATEST_STABLE_EDITION)); e.note("for more on editions, read https://doc.rust-lang.org/edition-guide"); }; while self.token != token::CloseDelim(close_delim) { if self.eat(&token::DotDot) { let exp_span = self.prev_token.span; // We permit `.. }` on the left-hand side of a destructuring assignment. if self.check(&token::CloseDelim(close_delim)) { self.sess.gated_spans.gate(sym::destructuring_assignment, self.prev_token.span); base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi()); break; } match self.parse_expr() { Ok(e) => base = ast::StructRest::Base(e), Err(mut e) if recover => { e.emit(); self.recover_stmt(); } Err(e) => return Err(e), } self.recover_struct_comma_after_dotdot(exp_span); break; } let recovery_field = self.find_struct_error_after_field_looking_code(); let parsed_field = match self.parse_expr_field() { Ok(f) => Some(f), Err(mut e) => { if pth == kw::Async { async_block_err(&mut e, pth.span); } else { e.span_label(pth.span, "while parsing this struct"); } e.emit(); // If the next token is a comma, then try to parse // what comes next as additional fields, rather than // bailing out until next `}`. if self.token != token::Comma { self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); if self.token != token::Comma { break; } } None } }; match self.expect_one_of(&[token::Comma], &[token::CloseDelim(close_delim)]) { Ok(_) => { if let Some(f) = parsed_field.or(recovery_field) { // Only include the field if there's no parse error for the field name. fields.push(f); } } Err(mut e) => { if pth == kw::Async { async_block_err(&mut e, pth.span); } else { e.span_label(pth.span, "while parsing this struct"); if let Some(f) = recovery_field { fields.push(f); e.span_suggestion( self.prev_token.span.shrink_to_hi(), "try adding a comma", ",".into(), Applicability::MachineApplicable, ); } } if !recover { return Err(e); } e.emit(); self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore); self.eat(&token::Comma); } } } Ok((fields, base, recover_async)) } /// Precondition: already parsed the '{'. pub(super) fn parse_struct_expr( &mut self, qself: Option<ast::QSelf>, pth: ast::Path, attrs: AttrVec, recover: bool, ) -> PResult<'a, P<Expr>> { let lo = pth.span; let (fields, base, recover_async) = self.parse_struct_fields(pth.clone(), recover, token::Brace)?; let span = lo.to(self.token.span); self.expect(&token::CloseDelim(token::Brace))?; let expr = if recover_async { ExprKind::Err } else { ExprKind::Struct(P(ast::StructExpr { qself, path: pth, fields, rest: base })) }; Ok(self.mk_expr(span, expr, attrs)) } /// Use in case of error after field-looking code: `S { foo: () with a }`. fn find_struct_error_after_field_looking_code(&self) -> Option<ExprField> { match self.token.ident() { Some((ident, is_raw)) if (is_raw || !ident.is_reserved()) && self.look_ahead(1, |t| *t == token::Colon) => { Some(ast::ExprField { ident, span: self.token.span, expr: self.mk_expr_err(self.token.span), is_shorthand: false, attrs: AttrVec::new(), id: DUMMY_NODE_ID, is_placeholder: false, }) } _ => None, } } fn recover_struct_comma_after_dotdot(&mut self, span: Span) { if self.token != token::Comma { return; } self.struct_span_err( span.to(self.prev_token.span), "cannot use a comma after the base struct", ) .span_suggestion_short( self.token.span, "remove this comma", String::new(), Applicability::MachineApplicable, ) .note("the base struct must always be the last field") .emit(); self.recover_stmt(); } /// Parses `ident (COLON expr)?`. fn parse_expr_field(&mut self) -> PResult<'a, ExprField> { let attrs = self.parse_outer_attributes()?; self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let lo = this.token.span; // Check if a colon exists one ahead. This means we're parsing a fieldname. let is_shorthand = !this.look_ahead(1, |t| t == &token::Colon || t == &token::Eq); let (ident, expr) = if is_shorthand { // Mimic `x: x` for the `x` field shorthand. let ident = this.parse_ident_common(false)?; let path = ast::Path::from_ident(ident); (ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new())) } else { let ident = this.parse_field_name()?; this.error_on_eq_field_init(ident); this.bump(); // `:` (ident, this.parse_expr()?)<|fim▁hole|> Ok(( ast::ExprField { ident, span: lo.to(expr.span), expr, is_shorthand, attrs: attrs.into(), id: DUMMY_NODE_ID, is_placeholder: false, }, TrailingToken::MaybeComma, )) }) } /// Check for `=`. This means the source incorrectly attempts to /// initialize a field with an eq rather than a colon. fn error_on_eq_field_init(&self, field_name: Ident) { if self.token != token::Eq { return; } self.struct_span_err(self.token.span, "expected `:`, found `=`") .span_suggestion( field_name.span.shrink_to_hi().to(self.token.span), "replace equals symbol with a colon", ":".to_string(), Applicability::MachineApplicable, ) .emit(); } fn err_dotdotdot_syntax(&self, span: Span) { self.struct_span_err(span, "unexpected token: `...`") .span_suggestion( span, "use `..` for an exclusive range", "..".to_owned(), Applicability::MaybeIncorrect, ) .span_suggestion( span, "or `..=` for an inclusive range", "..=".to_owned(), Applicability::MaybeIncorrect, ) .emit(); } fn err_larrow_operator(&self, span: Span) { self.struct_span_err(span, "unexpected token: `<-`") .span_suggestion( span, "if you meant to write a comparison against a negative value, add a \ space in between `<` and `-`", "< -".to_string(), Applicability::MaybeIncorrect, ) .emit(); } fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::AssignOp(binop, lhs, rhs) } fn mk_range( &mut self, start: Option<P<Expr>>, end: Option<P<Expr>>, limits: RangeLimits, ) -> ExprKind { if end.is_none() && limits == RangeLimits::Closed { self.inclusive_range_with_incorrect_end(self.prev_token.span); ExprKind::Err } else { ExprKind::Range(start, end, limits) } } fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind { ExprKind::Unary(unop, expr) } fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind { ExprKind::Binary(binop, lhs, rhs) } fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind { ExprKind::Index(expr, idx) } fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind { ExprKind::Call(f, args) } fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> { let span = lo.to(self.prev_token.span); let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new()); self.recover_from_await_method_call(); await_expr } crate fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> { P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None }) } pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> { self.mk_expr(span, ExprKind::Err, AttrVec::new()) } /// Create expression span ensuring the span of the parent node /// is larger than the span of lhs and rhs, including the attributes. fn mk_expr_sp(&self, lhs: &P<Expr>, lhs_span: Span, rhs_span: Span) -> Span { lhs.attrs .iter() .find(|a| a.style == AttrStyle::Outer) .map_or(lhs_span, |a| a.span) .to(rhs_span) } fn collect_tokens_for_expr( &mut self, attrs: AttrWrapper, f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>, ) -> PResult<'a, P<Expr>> { self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| { let res = f(this, attrs)?; let trailing = if this.restrictions.contains(Restrictions::STMT_EXPR) && this.token.kind == token::Semi { TrailingToken::Semi } else { // FIXME - pass this through from the place where we know // we need a comma, rather than assuming that `#[attr] expr,` // always captures a trailing comma TrailingToken::MaybeComma }; Ok((res, trailing)) }) } }<|fim▁end|>
};
<|file_name|>wave_exact_sine.cc<|end_file_name|><|fim▁begin|>/* File produced by Kranc */ #define KRANC_C #include <assert.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "cctk.h" #include "cctk_Arguments.h" #include "cctk_Parameters.h" #include "GenericFD.h" #include "Differencing.h" #include "cctk_Loop.h" #include "loopcontrol.h" /* Define macros used in calculations */ #define INITVALUE (42) #define QAD(x) (SQR(SQR(x))) #define INV(x) ((1.0) / (x)) #define SQR(x) ((x) * (x)) #define CUB(x) ((x) * (x) * (x)) static void wave_exact_sine_Body(cGH const * restrict const cctkGH, int const dir, int const face, CCTK_REAL const normal[3], CCTK_REAL const tangentA[3], CCTK_REAL const tangentB[3], int const imin[3], int const imax[3], int const n_subblock_gfs, CCTK_REAL * restrict const subblock_gfs[]) { DECLARE_CCTK_ARGUMENTS; DECLARE_CCTK_PARAMETERS; /* Declare finite differencing variables */ /* Include user-supplied include files */ /* Initialise finite differencing variables */ ptrdiff_t const di = 1; ptrdiff_t const dj = CCTK_GFINDEX3D(cctkGH,0,1,0) - CCTK_GFINDEX3D(cctkGH,0,0,0); ptrdiff_t const dk = CCTK_GFINDEX3D(cctkGH,0,0,1) - CCTK_GFINDEX3D(cctkGH,0,0,0); ptrdiff_t const cdi = sizeof(CCTK_REAL) * di; ptrdiff_t const cdj = sizeof(CCTK_REAL) * dj; ptrdiff_t const cdk = sizeof(CCTK_REAL) * dk; CCTK_REAL const dx = ToReal(CCTK_DELTA_SPACE(0)); CCTK_REAL const dy = ToReal(CCTK_DELTA_SPACE(1)); CCTK_REAL const dz = ToReal(CCTK_DELTA_SPACE(2)); CCTK_REAL const dt = ToReal(CCTK_DELTA_TIME); CCTK_REAL const t = ToReal(cctk_time); CCTK_REAL const dxi = INV(dx); CCTK_REAL const dyi = INV(dy); CCTK_REAL const dzi = INV(dz); CCTK_REAL const khalf = 0.5; CCTK_REAL const kthird = 1/3.0; CCTK_REAL const ktwothird = 2.0/3.0; CCTK_REAL const kfourthird = 4.0/3.0; CCTK_REAL const keightthird = 8.0/3.0; CCTK_REAL const hdxi = 0.5 * dxi; CCTK_REAL const hdyi = 0.5 * dyi; CCTK_REAL const hdzi = 0.5 * dzi; /* Initialize predefined quantities */ CCTK_REAL const p1o1 = 1; CCTK_REAL const p1o12dx = 0.0833333333333333333333333333333*INV(dx); CCTK_REAL const p1o12dy = 0.0833333333333333333333333333333*INV(dy); CCTK_REAL const p1o12dz = 0.0833333333333333333333333333333*INV(dz); CCTK_REAL const p1o144dxdy = 0.00694444444444444444444444444444*INV(dx)*INV(dy); CCTK_REAL const p1o144dxdz = 0.00694444444444444444444444444444*INV(dx)*INV(dz); CCTK_REAL const p1o144dydz = 0.00694444444444444444444444444444*INV(dy)*INV(dz); CCTK_REAL const p1o2dx = 0.5*INV(dx); CCTK_REAL const p1o2dy = 0.5*INV(dy); CCTK_REAL const p1o2dz = 0.5*INV(dz); CCTK_REAL const p1o4dx2 = 0.25*INV(SQR(dx)); CCTK_REAL const p1o4dxdy = 0.25*INV(dx)*INV(dy); CCTK_REAL const p1o4dxdz = 0.25*INV(dx)*INV(dz); CCTK_REAL const p1o4dy2 = 0.25*INV(SQR(dy)); CCTK_REAL const p1o4dydz = 0.25*INV(dy)*INV(dz); CCTK_REAL const p1o4dz2 = 0.25*INV(SQR(dz)); CCTK_REAL const p1odx = INV(dx); CCTK_REAL const p1odx2 = INV(SQR(dx)); CCTK_REAL const p1odxdydz = INV(dx)*INV(dy)*INV(dz); CCTK_REAL const p1ody = INV(dy); CCTK_REAL const p1ody2 = INV(SQR(dy)); CCTK_REAL const p1odz = INV(dz); CCTK_REAL const p1odz2 = INV(SQR(dz)); CCTK_REAL const pm1o12dx2 = -0.0833333333333333333333333333333*INV(SQR(dx)); CCTK_REAL const pm1o12dy2 = -0.0833333333333333333333333333333*INV(SQR(dy)); CCTK_REAL const pm1o12dz2 = -0.0833333333333333333333333333333*INV(SQR(dz)); CCTK_REAL const pm1o2dx = -0.5*INV(dx); CCTK_REAL const pm1o2dy = -0.5*INV(dy); CCTK_REAL const pm1o2dz = -0.5*INV(dz); /* Assign local copies of arrays functions */ /* Calculate temporaries and arrays functions */ /* Copy local copies back to grid functions */ /* Loop over the grid points */ #pragma omp parallel CCTK_LOOP3 (wave_exact_sine, i,j,k, imin[0],imin[1],imin[2], imax[0],imax[1],imax[2], cctk_lsh[0],cctk_lsh[1],cctk_lsh[2]) { ptrdiff_t const index = di*i + dj*j + dk*k; /* Assign local copies of grid functions */ CCTK_REAL xL = x[index]; CCTK_REAL yL = y[index]; CCTK_REAL zL = z[index]; /* Include user supplied include files */ /* Precompute derivatives */ switch(fdOrder) { case 2: break; case 4: break; } /* Calculate temporaries and grid functions */ CCTK_REAL piconst = 3.1415926535897932385; CCTK_REAL phiExactL = Sin(2*piconst*INV(ToReal(periodicity))*(-(cctk_time*sqrt(SQR(ToReal(n1)) + SQR(ToReal(n2)) + SQR(ToReal(n3)))) + xL*ToReal(n1) + yL*ToReal(n2) + zL*ToReal(n3)))*ToReal(amplitude); CCTK_REAL piExactL = -2*piconst*Cos(2*piconst*INV(ToReal(periodicity))*(-(cctk_time*sqrt(SQR(ToReal(n1)) <|fim▁hole|> /* Copy local copies back to grid functions */ phiExact[index] = phiExactL; piExact[index] = piExactL; } CCTK_ENDLOOP3 (wave_exact_sine); } extern "C" void wave_exact_sine(CCTK_ARGUMENTS) { DECLARE_CCTK_ARGUMENTS; DECLARE_CCTK_PARAMETERS; if (verbose > 1) { CCTK_VInfo(CCTK_THORNSTRING,"Entering wave_exact_sine_Body"); } if (cctk_iteration % wave_exact_sine_calc_every != wave_exact_sine_calc_offset) { return; } const char *groups[] = {"Wave::exact","grid::coordinates"}; GenericFD_AssertGroupStorage(cctkGH, "wave_exact_sine", 2, groups); switch(fdOrder) { case 2: break; case 4: break; } GenericFD_LoopOverEverything(cctkGH, &wave_exact_sine_Body); if (verbose > 1) { CCTK_VInfo(CCTK_THORNSTRING,"Leaving wave_exact_sine_Body"); } }<|fim▁end|>
+ SQR(ToReal(n2)) + SQR(ToReal(n3)))) + xL*ToReal(n1) + yL*ToReal(n2) + zL*ToReal(n3)))*INV(ToReal(periodicity))*sqrt(SQR(ToReal(n1)) + SQR(ToReal(n2)) + SQR(ToReal(n3)))*ToReal(amplitude);
<|file_name|>gameCtrl.js<|end_file_name|><|fim▁begin|>define(function() { 'use strict'; /* @ngInject */ var gameCtrl = function($scope, commoditySrvc, citySrvc, accountSrvc, gameSrvc, tutorialSrvc, $modal) { //todo: find a better way to expose services to template this.gameSrvc = gameSrvc; this.commoditySrvc = commoditySrvc; this.citySrvc = citySrvc; this.accountSrvc = accountSrvc; this.tutorialOptions = tutorialSrvc.options; this.$modal = $modal; this.$scope = $scope; if (!citySrvc.currentCity) { commoditySrvc.setCitySpecialty(); citySrvc.getRandomCity(); commoditySrvc.updatePrices(); } //todo: figure out why this ctrl gets called twice on page load if (gameSrvc.initialLoad) { this.showModal('start'); gameSrvc.initialLoad = false; } }; gameCtrl.prototype.submitScore = function() { this.showModal('gameOver', this.accountSrvc.netWorth); this.gameSrvc.gameOver(); }; gameCtrl.prototype.goToCity = function(city) { this.citySrvc.setCurrentCity(city); this.commoditySrvc.updatePrices(); this.gameSrvc.reduceDaysLeft(); }; gameCtrl.prototype.buyItem = function(item, quantity) { this.commoditySrvc.buyCommodity(item, quantity); }; gameCtrl.prototype.sellItem = function(item) { this.commoditySrvc.sellCommodity(item); }; gameCtrl.prototype.setMarketHoverItem = function(item) { this.marketHoverItem = item.name; }; gameCtrl.prototype.resetMarketHoverItem = function() { this.marketHoverItem = ''; }; <|fim▁hole|> return city.name === this.citySrvc.currentCity.name; }; gameCtrl.prototype.getPotentialProfit = function(item) { var expectedProfit = 'unknown'; if (item.averageSellPrice) { expectedProfit = ((item.averageSellPrice - item.currentPrice) * item.maxQuantityPurchasable) / 100; } return expectedProfit; }; gameCtrl.prototype.openMenu = function() { this.showModal('gameMenu'); }; gameCtrl.prototype.showModal = function(type, score) { var templateUrl, self = this; switch (type) { case 'start': templateUrl = 'components/game/gameModalStart.tmpl.html'; break; case 'gameMenu': templateUrl = 'components/game/gameModalGameMenu.tmpl.html'; break; case 'gameOver': templateUrl = 'components/game/gameModalGameOver.tmpl.html'; break; } var modalInstance = this.$modal.open({ templateUrl: templateUrl, controller: 'gameModalInstanceCtrl', size: 'sm', resolve: { type: function() { return type; }, score: function() { return score; } } }); modalInstance.result.then(function(action) { switch (action) { case 'startTutorial': self.$scope.startTutorial(); break; case 'resetGame': self.gameSrvc.gameOver(true); break; } }, function() { }); }; return gameCtrl; });<|fim▁end|>
gameCtrl.prototype.isCurrentCity = function(city) {
<|file_name|>test_worker.py<|end_file_name|><|fim▁begin|># Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Test for distributed trial worker side. """ import os from cStringIO import StringIO from zope.interface.verify import verifyObject from twisted.trial.reporter import TestResult from twisted.trial.unittest import TestCase from twisted.trial._dist.worker import ( LocalWorker, LocalWorkerAMP, LocalWorkerTransport, WorkerProtocol) from twisted.trial._dist import managercommands, workercommands from twisted.scripts import trial from twisted.test.proto_helpers import StringTransport from twisted.internet.interfaces import ITransport, IAddress from twisted.internet.defer import fail, succeed from twisted.internet.main import CONNECTION_DONE from twisted.internet.error import ConnectionDone from twisted.python.failure import Failure from twisted.protocols.amp import AMP class FakeAMP(AMP): """ A fake amp protocol. """ class WorkerProtocolTestCase(TestCase): """ Tests for L{WorkerProtocol}. """ def setUp(self): """ Set up a transport, a result stream and a protocol instance. """ self.serverTransport = StringTransport() self.clientTransport = StringTransport() self.server = WorkerProtocol() self.server.makeConnection(self.serverTransport) self.client = FakeAMP() self.client.makeConnection(self.clientTransport) def test_run(self): """ Calling the L{workercommands.Run} command on the client returns a response with C{success} sets to C{True}. """ d = self.client.callRemote(workercommands.Run, testCase="doesntexist") def check(result): self.assertTrue(result['success']) d.addCallback(check) self.server.dataReceived(self.clientTransport.value()) self.clientTransport.clear() self.client.dataReceived(self.serverTransport.value()) self.serverTransport.clear() return d def test_start(self): """ The C{start} command changes the current path. """ curdir = os.path.realpath(os.path.curdir) self.addCleanup(os.chdir, curdir) self.server.start('..') self.assertNotEqual(os.path.realpath(os.path.curdir), curdir) class LocalWorkerAMPTestCase(TestCase): """ Test case for distributed trial's manager-side local worker AMP protocol """ def setUp(self): self.managerTransport = StringTransport() self.managerAMP = LocalWorkerAMP() self.managerAMP.makeConnection(self.managerTransport) self.result = TestResult() self.workerTransport = StringTransport() self.worker = AMP() self.worker.makeConnection(self.workerTransport) config = trial.Options() self.testName = "twisted.doesnexist" config['tests'].append(self.testName) self.testCase = trial._getSuite(config)._tests.pop() self.managerAMP.run(self.testCase, self.result) self.managerTransport.clear() def pumpTransports(self): """ Sends data from C{self.workerTransport} to C{self.managerAMP}, and then data from C{self.managerTransport} back to C{self.worker}. """ self.managerAMP.dataReceived(self.workerTransport.value()) self.workerTransport.clear() self.worker.dataReceived(self.managerTransport.value()) def test_runSuccess(self): """ Run a test, and succeed. """ results = [] d = self.worker.callRemote(managercommands.AddSuccess, testName=self.testName) d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertTrue(results) def test_runExpectedFailure(self): """ Run a test, and fail expectedly. """ results = [] d = self.worker.callRemote(managercommands.AddExpectedFailure, testName=self.testName, error='error', todo='todoReason') d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.expectedFailures[0][0]) self.assertTrue(results) def test_runError(self): """ Run a test, and encounter an error. """ results = [] d = self.worker.callRemote(managercommands.AddError, testName=self.testName, error='error', errorClass='exceptions.ValueError', frames=[]) d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.errors[0][0]) self.assertTrue(results) def test_runErrorWithFrames(self): """ L{LocalWorkerAMP._buildFailure} recreates the C{Failure.frames} from the C{frames} argument passed to C{AddError}. """ results = [] d = self.worker.callRemote(managercommands.AddError, testName=self.testName, error='error', errorClass='exceptions.ValueError', frames=["file.py", "invalid code", "3"]) d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.errors[0][0]) self.assertEqual( [('file.py', 'invalid code', 3, [], [])], self.result.errors[0][1].frames) self.assertTrue(results) def test_runFailure(self): """ Run a test, and fail. """ results = [] d = self.worker.callRemote(managercommands.AddFailure, testName=self.testName, fail='fail', failClass='exceptions.RuntimeError', frames=[]) d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.failures[0][0]) self.assertTrue(results) def test_runSkip(self): """ Run a test, but skip it. """ results = [] d = self.worker.callRemote(managercommands.AddSkip, testName=self.testName, reason='reason') d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.skips[0][0]) self.assertTrue(results) def test_runUnexpectedSuccesses(self): """ Run a test, and succeed unexpectedly. """ results = [] d = self.worker.callRemote(managercommands.AddUnexpectedSuccess, testName=self.testName, todo='todo') d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual(self.testCase, self.result.unexpectedSuccesses[0][0]) self.assertTrue(results) def test_testWrite(self): """ L{LocalWorkerAMP.testWrite} writes the data received to its test stream. """ results = [] stream = StringIO() self.managerAMP.setTestStream(stream) d = self.worker.callRemote(managercommands.TestWrite, out="Some output") d.addCallback(lambda result: results.append(result['success'])) self.pumpTransports() self.assertEqual("Some output\n", stream.getvalue()) self.assertTrue(results) def test_stopAfterRun(self): """ L{LocalWorkerAMP.run} calls C{stopTest} on its test result once the C{Run} commands has succeeded. """ result = object() stopped = [] def fakeCallRemote(command, testCase): return succeed(result) self.managerAMP.callRemote = fakeCallRemote class StopTestResult(TestResult): def stopTest(self, test): stopped.append(test) d = self.managerAMP.run(self.testCase, StopTestResult()) self.assertEqual([self.testCase], stopped) return d.addCallback(self.assertIdentical, result) class FakeAMProtocol(AMP): """ A fake implementation of L{AMP} for testing. """ id = 0 dataString = "" def dataReceived(self, data): self.dataString += data def setTestStream(self, stream): self.testStream = stream class FakeTransport(object): """ A fake process transport implementation for testing. """ dataString = "" calls = 0 def writeToChild(self, fd, data): self.dataString += data def loseConnection(self): self.calls += 1 class LocalWorkerTestCase(TestCase): """ Tests for L{LocalWorker} and L{LocalWorkerTransport}. """ def test_childDataReceived(self): """ L{LocalWorker.childDataReceived} forwards the received data to linked L{AMP} protocol if the right file descriptor, otherwise forwards to C{ProcessProtocol.childDataReceived}. """ fakeTransport = FakeTransport() localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log') localWorker.makeConnection(fakeTransport) localWorker._outLog = StringIO() localWorker.childDataReceived(4, "foo") localWorker.childDataReceived(1, "bar") self.assertEqual("foo", localWorker._ampProtocol.dataString) self.assertEqual("bar", localWorker._outLog.getvalue()) def test_outReceived(self): """ L{LocalWorker.outReceived} logs the output into its C{_outLog} log file. """ fakeTransport = FakeTransport() localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log') localWorker.makeConnection(fakeTransport) localWorker._outLog = StringIO() data = "The quick brown fox jumps over the lazy dog" localWorker.outReceived(data) self.assertEqual(data, localWorker._outLog.getvalue()) def test_errReceived(self): """ L{LocalWorker.errReceived} logs the errors into its C{_errLog} log file. """ fakeTransport = FakeTransport() localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log') localWorker.makeConnection(fakeTransport) localWorker._errLog = StringIO() data = "The quick brown fox jumps over the lazy dog" localWorker.errReceived(data) self.assertEqual(data, localWorker._errLog.getvalue()) def test_write(self): """ L{LocalWorkerTransport.write} forwards the written data to the given transport. """ transport = FakeTransport() localTransport = LocalWorkerTransport(transport) data = "The quick brown fox jumps over the lazy dog" localTransport.write(data) self.assertEqual(data, transport.dataString) def test_writeSequence(self): """ L{LocalWorkerTransport.writeSequence} forwards the written data to the given transport. """ transport = FakeTransport() localTransport = LocalWorkerTransport(transport) data = ("The quick ", "brown fox jumps ", "over the lazy dog") localTransport.writeSequence(data) self.assertEqual("".join(data), transport.dataString) def test_loseConnection(self): """ L{LocalWorkerTransport.loseConnection} forwards the call to the given transport. """ transport = FakeTransport() localTransport = LocalWorkerTransport(transport) localTransport.loseConnection() self.assertEqual(transport.calls, 1) def test_connectionLost(self): """ L{LocalWorker.connectionLost} closes the log streams. """ class FakeStream(object): callNumber = 0 def close(self): <|fim▁hole|> transport = FakeTransport() localWorker = LocalWorker(FakeAMProtocol(), '.', 'test.log') localWorker.makeConnection(transport) localWorker._outLog = FakeStream() localWorker._errLog = FakeStream() localWorker.connectionLost(None) self.assertEqual(localWorker._outLog.callNumber, 1) self.assertEqual(localWorker._errLog.callNumber, 1) def test_processEnded(self): """ L{LocalWorker.processEnded} calls C{connectionLost} on itself and on the L{AMP} protocol. """ class FakeStream(object): callNumber = 0 def close(self): self.callNumber += 1 transport = FakeTransport() protocol = FakeAMProtocol() localWorker = LocalWorker(protocol, '.', 'test.log') localWorker.makeConnection(transport) localWorker._outLog = FakeStream() localWorker.processEnded(Failure(CONNECTION_DONE)) self.assertEqual(localWorker._outLog.callNumber, 1) self.assertIdentical(None, protocol.transport) return self.assertFailure(localWorker.endDeferred, ConnectionDone) def test_addresses(self): """ L{LocalWorkerTransport.getPeer} and L{LocalWorkerTransport.getHost} return L{IAddress} objects. """ localTransport = LocalWorkerTransport(None) self.assertTrue(verifyObject(IAddress, localTransport.getPeer())) self.assertTrue(verifyObject(IAddress, localTransport.getHost())) def test_transport(self): """ L{LocalWorkerTransport} implements L{ITransport} to be able to be used by L{AMP}. """ localTransport = LocalWorkerTransport(None) self.assertTrue(verifyObject(ITransport, localTransport)) def test_startError(self): """ L{LocalWorker} swallows the exceptions returned by the L{AMP} protocol start method, as it generates unnecessary errors. """ def failCallRemote(command, directory): return fail(RuntimeError("oops")) transport = FakeTransport() protocol = FakeAMProtocol() protocol.callRemote = failCallRemote localWorker = LocalWorker(protocol, '.', 'test.log') localWorker.makeConnection(transport) self.assertEqual([], self.flushLoggedErrors(RuntimeError))<|fim▁end|>
self.callNumber += 1
<|file_name|>tabs.py<|end_file_name|><|fim▁begin|># Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import tabs from openstack_dashboard.api import base from openstack_dashboard.api import cinder from openstack_dashboard.api import keystone from openstack_dashboard.api import neutron from openstack_dashboard.api import nova from openstack_dashboard.dashboards.admin.info import constants from openstack_dashboard.dashboards.admin.info import tables class ServicesTab(tabs.TableTab): table_classes = (tables.ServicesTable,) name = _("Services") slug = "services"<|fim▁hole|> def get_services_data(self): request = self.tab_group.request services = [] for i, service in enumerate(request.user.service_catalog): service['id'] = i services.append( keystone.Service(service, request.user.services_region)) return services class NovaServicesTab(tabs.TableTab): table_classes = (tables.NovaServicesTable,) name = _("Compute Services") slug = "nova_services" template_name = constants.INFO_DETAIL_TEMPLATE_NAME permissions = ('openstack.services.compute',) def get_nova_services_data(self): try: services = nova.service_list(self.tab_group.request) except Exception: msg = _('Unable to get nova services list.') exceptions.check_message(["Connection", "refused"], msg) exceptions.handle(self.request, msg) services = [] return services class CinderServicesTab(tabs.TableTab): table_classes = (tables.CinderServicesTable,) name = _("Block Storage Services") slug = "cinder_services" template_name = constants.INFO_DETAIL_TEMPLATE_NAME permissions = ('openstack.services.volume',) def get_cinder_services_data(self): try: services = cinder.service_list(self.tab_group.request) except Exception: msg = _('Unable to get cinder services list.') exceptions.check_message(["Connection", "refused"], msg) exceptions.handle(self.request, msg) services = [] return services class NetworkAgentsTab(tabs.TableTab): table_classes = (tables.NetworkAgentsTable,) name = _("Network Agents") slug = "network_agents" template_name = constants.INFO_DETAIL_TEMPLATE_NAME def allowed(self, request): try: return (base.is_service_enabled(request, 'network') and neutron.is_extension_supported(request, 'agent')) except Exception: exceptions.handle(request, _('Unable to get network agents info.')) return False def get_network_agents_data(self): try: agents = neutron.agent_list(self.tab_group.request) except Exception: msg = _('Unable to get network agents list.') exceptions.check_message(["Connection", "refused"], msg) exceptions.handle(self.request, msg) agents = [] return agents class SystemInfoTabs(tabs.TabGroup): slug = "system_info" tabs = (ServicesTab, NovaServicesTab, CinderServicesTab, NetworkAgentsTab) sticky = True<|fim▁end|>
template_name = constants.INFO_DETAIL_TEMPLATE_NAME
<|file_name|>cell_manhattan_inv.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Noise-rs Developers. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! An example of using cell range noise extern crate noise; use noise::{cell2_manhattan_inv, cell3_manhattan_inv, cell4_manhattan_inv, Seed, Point2}; mod debug; fn main() { debug::render_png("cell2_manhattan_inv.png", &Seed::new(0), 1024, 1024, scaled_cell2_manhattan_inv); debug::render_png("cell3_manhattan_inv.png", &Seed::new(0), 1024, 1024, scaled_cell3_manhattan_inv); debug::render_png("cell4_manhattan_inv.png", &Seed::new(0), 1024, 1024, scaled_cell4_manhattan_inv);<|fim▁hole|> cell2_manhattan_inv(seed, &[point[0] / 16.0, point[1] / 16.0]) * 2.0 - 1.0 } fn scaled_cell3_manhattan_inv(seed: &Seed, point: &Point2<f64>) -> f64 { cell3_manhattan_inv(seed, &[point[0] / 16.0, point[1] / 16.0, point[0] / 16.0]) * 2.0 - 1.0 } fn scaled_cell4_manhattan_inv(seed: &Seed, point: &Point2<f64>) -> f64 { cell4_manhattan_inv(seed, &[point[0] / 16.0, point[1] / 16.0, point[0] / 16.0, point[1] / 16.0]) * 2.0 - 1.0 }<|fim▁end|>
println!("\nGenerated cell2_manhattan_inv.png, cell3_manhattan_inv.png and cell4_manhattan_inv.png"); } fn scaled_cell2_manhattan_inv(seed: &Seed, point: &Point2<f64>) -> f64 {
<|file_name|>UpdateBodyClassSaga.ts<|end_file_name|><|fim▁begin|>import * as C from "../../actions/ActionCreators"; import * as Routing from "react-router-redux"; import { Effect } from "redux-saga/effects"; import { takeLatest } from "redux-saga"; /** * Saga handler to load the strings<|fim▁hole|> let suffix: string = (action.payload as any).pathname.split("/").pop(); let className: string = "body--" + suffix; document.body.className = className; } } /** * Saga watcher to watch every latest LANGUAGE_CHANGED action */ function* updateBodyClassSaga(): IterableIterator<any> { yield takeLatest(Routing.LOCATION_CHANGE, updateBodyClass as any); } export default updateBodyClassSaga;<|fim▁end|>
*/ function* updateBodyClass(action: Routing.RouterAction): IterableIterator<Effect | any> { if (document && document.body) {
<|file_name|>ActionGrabFrame.cc<|end_file_name|><|fim▁begin|>/* Copyright (C) 2013 Edwin Velds This file is part of Polka 2. Polka 2 is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Polka 2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Polka 2. If not, see <http://www.gnu.org/licenses/>. */ #include "ActionGrabFrame.h" #include "AccelManager.h" namespace Polka { ActionGrabFrame::ActionGrabFrame() { set_can_focus(); add_events(Gdk::ENTER_NOTIFY_MASK | Gdk::BUTTON_PRESS_MASK | Gdk::KEY_PRESS_MASK); } ActionGrabFrame::~ActionGrabFrame() { } IntIntSignal ActionGrabFrame::signalButtonGrabbed() { return m_SignalButtonGrabbed; } IntIntSignal ActionGrabFrame::signalKeyGrabbed() {<|fim▁hole|> return m_SignalKeyGrabbed; } bool ActionGrabFrame::on_draw( const Cairo::RefPtr<Cairo::Context>& cr ) { Gtk::DrawingArea::on_draw(cr); // draw the frame const Gtk::Allocation& a = get_allocation(); auto sc = get_style_context(); sc->context_save(); // draw frame or focus if( has_focus() ) { sc->render_focus( cr, 0, 0, a.get_width(), a.get_height() ); } else { sc->add_class( GTK_STYLE_CLASS_FRAME ); sc->render_frame( cr, 0, 0, a.get_width(), a.get_height() ); } sc->context_restore(); return true; } bool ActionGrabFrame::on_enter_notify_event(GdkEventCrossing* event) { grab_focus(); return true; } bool ActionGrabFrame::on_button_press_event(GdkEventButton *event) { if( event->button < DBL_CLICK ) { int b = event->button + (event->type == GDK_2BUTTON_PRESS ? DBL_CLICK:0); m_SignalButtonGrabbed.emit( b, event->state & MOD_ALL ); } return true; } bool ActionGrabFrame::on_key_press_event (GdkEventKey* event) { if( !event->is_modifier ) { int k = gdk_keyval_to_upper(event->keyval); m_SignalKeyGrabbed.emit( k, event->state & MOD_ALL ); } return true; } } // namespace Polka<|fim▁end|>
<|file_name|>impl[t]-foreign[t]-for-t.rs<|end_file_name|><|fim▁begin|>// compile-flags:--crate-name=test // aux-build:coherence_lib.rs extern crate coherence_lib as lib; use lib::*; use std::rc::Rc; struct Local; impl<T> Remote1<T> for T {<|fim▁hole|>fn main() {}<|fim▁end|>
//~^ ERROR type parameter `T` must be used as the type parameter for some local type }
<|file_name|>cred-username.d.ts<|end_file_name|><|fim▁begin|>import { Cred } from './cred'; export class CredUsername { /** * * * @type {Cred} * @memberof CredUsername */ parent: Cred; /** * * * @type {string} * @memberof CredUsername */ username: string;<|fim▁hole|>}<|fim▁end|>
<|file_name|>filters-test.js<|end_file_name|><|fim▁begin|>import filterReducer from "../filters"; describe("store/reducers/ui/filters", () => { it("should return the initial state", () => { const state = filterReducer(undefined, { type: "SOME_ACTION" }); expect(state).toEqual({ project: {} }); }); it("should set the project filter correctly", () => { const action = {<|fim▁hole|> type: "SET_PROJECT_FILTERS", payload: { published: true } }; const state = filterReducer({}, action); expect(state).toEqual({ project: { published: true } }); }); });<|fim▁end|>
<|file_name|>export_dynpro.ts<|end_file_name|><|fim▁begin|>import {Statement} from "./_statement"; import {verNot, str, seq, IStatementRunnable} from "../combi"; import {Source} from "../expressions"; import {Version} from "../../version"; export class ExportDynpro extends Statement {<|fim▁hole|> const ret = seq(str("EXPORT DYNPRO"), new Source(), new Source(), new Source(), new Source(), str("ID"), new Source()); return verNot(Version.Cloud, ret); } }<|fim▁end|>
public getMatcher(): IStatementRunnable {
<|file_name|>bfe_sciencewise.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- ## ## This file is part of Invenio. ## Copyright (C) 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """BibFormat element - ScienceWise.info This elements displays a linking icon to ScienceWise.info for arXiv records. """ import cgi import re from invenio.config import CFG_SITE_URL, CFG_SITE_LANG from invenio.messages import gettext_set_language _RE_MODERN_ARXIV = re.compile('(arxiv:)?(?P<number>\d{4}.\d{4}(v\d+)?)') _RE_OLD_ARXIV = re.compile('(arxiv:)?(?P<number>\w+-\w+/\d{7}(v\d+)?)') _RE_BAD_OLD_ARXIV = re.compile('(arxiv:)?(?P<archive>\w+-\w+)-(?P<number>\d{7}(v\d+)?)') def format_element(bfo): """ If the record has an ArXiv reportnumber, displays a ScienceWise icon to bookmark it. """ _ = gettext_set_language(bfo.lang) for tag in ('037__a', '088__a'): for reportnumber in bfo.fields(tag): icon = create_sciencewise_icon(reportnumber) if icon:<|fim▁hole|> return icon return "" def get_arxiv_reportnumber(bfo): """ Return an ArXiv reportnumber (if any) from the corresponding record. Return empty string otherwise. """ for tag in ('037__a', '088__a'): for reportnumber in bfo.fields(tag): reportnumber = reportnumber.lower() for regexp in (_RE_MODERN_ARXIV, _RE_OLD_ARXIV): g = regexp.match(reportnumber) if g: return g.group('number') return "" def escape_values(bfo): """ Called by BibFormat in order to check if output of this element should be escaped. """ return 0 def create_sciencewise_url(reportnumber): """ If the reportnumber is a valid arXiv reportnumber return a ScienceWise.info URL. """ reportnumber = reportnumber.lower() g = _RE_BAD_OLD_ARXIV.match(reportnumber) if g: reportnumber = '%s/%s' % (g.group('archive'), g.group('number')) for regexp in (_RE_MODERN_ARXIV, _RE_OLD_ARXIV): g = regexp.match(reportnumber) if g: return "http://sciencewise.info/bookmarks/%s/add" % g.group('number') return "" def create_sciencewise_icon(reportnumber, lang=CFG_SITE_LANG): """ If the reportnumber is a valid arXiv reportnumber return a ScienceWise.info icon. """ _ = gettext_set_language(lang) reportnumber = reportnumber.lower() g = _RE_BAD_OLD_ARXIV.match(reportnumber) if g: reportnumber = '%s/%s' % (g.group('archive'), g.group('number')) for regexp in (_RE_MODERN_ARXIV, _RE_OLD_ARXIV): g = regexp.match(reportnumber) if g: return """\ <a href="http://sciencewise.info/bookmarks/%(id)s/add" target="_blank" title="%(title)s"><img src="%(siteurl)s/img/sciencewise.png" width="23" height="16" alt="ScienceWise.info icon" /></a>""" % { 'id': cgi.escape(g.group('number'), True), 'title': cgi.escape(_("Add this article to your ScienceWise.info bookmarks"), True), 'siteurl': cgi.escape(CFG_SITE_URL, True) } return ""<|fim▁end|>
<|file_name|>test_pyqt4.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- #----------------------------------------------------------------------------- # Copyright (c) 2013, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- import sys from PyQt4 import Qt from PyQt4 import QtCore from PyQt4 import QtGui class MyDialog(QtGui.QDialog): def __init__(self): super(MyDialog, self).__init__() self.label = Qt.QLabel( u"Press <ESC> to exit. Some non-ascii chars: řčšěíáŘ", self) self.setWindowTitle("Hello World from PyQt4") #self.resize(500, 300) self.show() def sizeHint(self): return self.label.sizeHint() def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Escape: self.close() def main(): app = Qt.QApplication(sys.argv) read_formats = ', '.join([unicode(format).lower() \<|fim▁hole|> print("Qt4 plugin paths: " + unicode(list(app.libraryPaths()))) print("Qt4 image read support: " + read_formats) print('Qt4 Libraries path: ' + unicode(QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.LibrariesPath))) ex = MyDialog() app.exec_() if __name__ == "__main__": main()<|fim▁end|>
for format in QtGui.QImageReader.supportedImageFormats()])
<|file_name|>connectors-bezier.js<|end_file_name|><|fim▁begin|>/* * jsPlumb * * Title:jsPlumb 2.0.2 * * Provides a way to visually connect elements on an HTML page, using SVG. * * This file contains the code for the Bezier connector type. * * Copyright (c) 2010 - 2015 jsPlumb ([email protected]) * * http://jsplumbtoolkit.com * http://github.com/sporritt/jsplumb * * Dual licensed under the MIT and GPL2 licenses. */ ; (function () { "use strict"; var root = this, _jp = root.jsPlumb, _ju = root.jsPlumbUtil; var Bezier = function (params) { params = params || {}; var _super = _jp.Connectors.AbstractConnector.apply(this, arguments), majorAnchor = params.curviness || 150, minorAnchor = 10; this.type = "Bezier"; this.getCurviness = function () { return majorAnchor; }; this._findControlPoint = function (point, sourceAnchorPosition, targetAnchorPosition, sourceEndpoint, targetEndpoint, soo, too) { // determine if the two anchors are perpendicular to each other in their orientation. we swap the control // points around if so (code could be tightened up) var perpendicular = soo[0] != too[0] || soo[1] == too[1], p = []; if (!perpendicular) { if (soo[0] === 0) // X p.push(sourceAnchorPosition[0] < targetAnchorPosition[0] ? point[0] + minorAnchor : point[0] - minorAnchor); else p.push(point[0] - (majorAnchor * soo[0])); if (soo[1] === 0) // Y p.push(sourceAnchorPosition[1] < targetAnchorPosition[1] ? point[1] + minorAnchor : point[1] - minorAnchor); else p.push(point[1] + (majorAnchor * too[1])); } else { if (too[0] === 0) // X p.push(targetAnchorPosition[0] < sourceAnchorPosition[0] ? point[0] + minorAnchor : point[0] - minorAnchor); else p.push(point[0] + (majorAnchor * too[0])); if (too[1] === 0) // Y p.push(targetAnchorPosition[1] < sourceAnchorPosition[1] ? point[1] + minorAnchor : point[1] - minorAnchor); else p.push(point[1] + (majorAnchor * soo[1])); } return p; }; this._compute = function (paintInfo, p) {<|fim▁hole|> _sx = sp[0] < tp[0] ? _w : 0, _sy = sp[1] < tp[1] ? _h : 0, _tx = sp[0] < tp[0] ? 0 : _w, _ty = sp[1] < tp[1] ? 0 : _h, _CP = this._findControlPoint([_sx, _sy], sp, tp, p.sourceEndpoint, p.targetEndpoint, paintInfo.so, paintInfo.to), _CP2 = this._findControlPoint([_tx, _ty], tp, sp, p.targetEndpoint, p.sourceEndpoint, paintInfo.to, paintInfo.so); _super.addSegment(this, "Bezier", { x1: _sx, y1: _sy, x2: _tx, y2: _ty, cp1x: _CP[0], cp1y: _CP[1], cp2x: _CP2[0], cp2y: _CP2[1] }); }; }; _ju.extend(Bezier, _jp.Connectors.AbstractConnector); _jp.registerConnectorType(Bezier, "Bezier"); }).call(this);<|fim▁end|>
var sp = p.sourcePos, tp = p.targetPos, _w = Math.abs(sp[0] - tp[0]), _h = Math.abs(sp[1] - tp[1]),
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Qizx Python API bindings :copyright: (c) 2015 by Michael Paddon<|fim▁hole|> QizxNotFoundError, QizxAccessControlError, QizxXMLDataError, QizxCompilationError, QizxEvaluationError, QizxTimeoutError, QizxImportError, UnexpectedResponseError, TransactionError ) __title__ = 'qizx' __version__ = '1.0.2' __author__ = "Michael Paddon" __license__ = 'MIT' __copyright__ = "Copyright 2015 Michael Paddon"<|fim▁end|>
:license: MIT, see LICENSE for more details. """ from .qizx import ( Client, QizxError, QizxBadRequestError, QizxServerError,
<|file_name|>datatable.ts<|end_file_name|><|fim▁begin|>import { NgModule, Component, ElementRef, AfterContentInit, AfterViewInit, AfterViewChecked, OnInit, OnDestroy, DoCheck, Input, ViewContainerRef, ViewChild, Output, SimpleChange, EventEmitter, ContentChild, ContentChildren, Renderer, IterableDiffers, QueryList, TemplateRef, ChangeDetectorRef, Inject, forwardRef, NgZone } from '@angular/core'; import { CommonModule } from '@angular/common'; import { FormsModule } from '@angular/forms' import { SharedModule } from '../common/shared'; import { PaginatorModule } from '../paginator/paginator'; import { Column, Header, Footer, HeaderColumnGroup, FooterColumnGroup, PrimeTemplate } from '../common/shared'; import { LazyLoadEvent, FilterMetadata, SortMeta } from '../common/api'; import { DomHandler } from '../dom/domhandler'; import { ObjectUtils } from '../utils/ObjectUtils'; import { Subscription } from 'rxjs/Subscription'; import { BlockableUI } from '../common/api'; import { GlobalEventsManager } from '../common/globalevent'; import * as FileSaver from 'file-saver'; import * as XLSX from 'xlsx-style'; @Component({ selector: 'p-dtRadioButton', template: ` <div class="ui-radiobutton ui-widget"> <div class="ui-helper-hidden-accessible"> <input type="radio" [checked]="checked"> </div> <div class="ui-radiobutton-box ui-widget ui-radiobutton-relative ui-state-default" (click)="handleClick($event)" (mouseenter)="hover=true" (mouseleave)="hover=false" [ngClass]="{'ui-state-hover':hover,'ui-state-active':checked}"> <span class="ui-radiobutton-icon" [ngClass]="{'fa fa-circle':checked}"></span> </div> </div> ` }) export class DTRadioButton { @Input() checked: boolean; @Output() onClick: EventEmitter<any> = new EventEmitter(); public hover: boolean; handleClick(event) { this.onClick.emit(event); } } @Component({ selector: 'p-dtCheckbox', template: ` <div class="ui-chkbox ui-widget"> <div class="ui-helper-hidden-accessible"> <input type="checkbox" [checked]="checked"> </div> <div class="ui-chkbox-box ui-widget ui-corner-all ui-state-default" (click)="handleClick($event)" (mouseover)="hover=true" (mouseout)="hover=false" [ngClass]="{'ui-state-hover':hover&&!disabled,'ui-state-active':checked&&!disabled,'ui-state-disabled':disabled}"> <span class="ui-chkbox-icon ui-c" [ngClass]="{'fa fa-check':checked}"></span> </div> </div> ` }) export class DTCheckbox { @Input() checked: boolean; @Input() disabled: boolean; @Output() onChange: EventEmitter<any> = new EventEmitter(); public hover: boolean; handleClick(event) { if (!this.disabled) { this.onChange.emit({ originalEvent: event, checked: !this.checked }); } } } @Component({ selector: 'p-rowExpansionLoader', template: `` }) export class RowExpansionLoader { @Input() template: TemplateRef<any>; @Input() rowData: any; constructor(public viewContainer: ViewContainerRef) { } ngOnInit() { let view = this.viewContainer.createEmbeddedView(this.template, { '\$implicit': this.rowData }); } } @Component({ selector: '[pColumnHeaders]', template: ` <ng-template ngFor let-col [ngForOf]="columns" let-lastCol="last"> <th #headerCell [ngStyle]="col.style" [class]="col.styleClass" [style.display]="col.hidden ? 'none' : 'table-cell'" (click)="dt.sort($event,col)" [attr.colspan]="col.colspan" [attr.rowspan]="col.rowspan" [ngClass]="{'ui-state-default ui-unselectable-text':true, 'ui-sortable-column': col.sortable, 'ui-state-active': dt.isSorted(col), 'ui-resizable-column': dt.resizableColumns, 'ui-selection-column':col.selectionMode}" (dragstart)="dt.onColumnDragStart($event)" (dragover)="dt.onColumnDragover($event)" (dragleave)="dt.onColumnDragleave($event)" (drop)="dt.onColumnDrop($event)" (mousedown)="dt.onHeaderMousedown($event,headerCell)" [attr.tabindex]="col.sortable ? tabindex : null" (keydown)="dt.onHeaderKeydown($event,col)"> <span class="ui-column-resizer" *ngIf="dt.resizableColumns && ((dt.columnResizeMode == 'fit' && !lastCol) || dt.columnResizeMode == 'expand')" (mousedown)="dt.initColumnResize($event)"></span> <span class="ui-column-title" *ngIf="!col.selectionMode&&!col.headerTemplate">{{col.header}}</span> <span class="ui-column-title" *ngIf="col.headerTemplate"> <p-columnHeaderTemplateLoader [column]="col"></p-columnHeaderTemplateLoader> </span> <span class="ui-sortable-column-icon fa fa-fw fa-sort" *ngIf="col.sortable" [ngClass]="{'fa-sort-desc': (dt.getSortOrder(col) == -1),'fa-sort-asc': (dt.getSortOrder(col) == 1)}"></span> <input type="text" class="ui-column-filter ui-inputtext ui-widget ui-state-default ui-corner-all" [attr.placeholder]="col.filterPlaceholder" *ngIf="col.filter&&!col.filterTemplate" [value]="dt.filters[col.field] ? dt.filters[col.field].value : ''" (click)="dt.onFilterInputClick($event)" (blur)="dt.onFilterKeyup($event,$event.target.value, col.field, col.filterMatchMode)" (keyup)="dt.onFilterKeyup($event,$event.target.value, col.field, col.filterMatchMode)"/> <p-columnFilterTemplateLoader [column]="col" *ngIf="col.filterTemplate"></p-columnFilterTemplateLoader> <p-dtCheckbox *ngIf="col.selectionMode=='multiple'" (onChange)="dt.toggleRowsWithCheckbox($event)" [checked]="dt.allSelected" [disabled]="dt.isEmpty()"></p-dtCheckbox> </th> </ng-template> ` }) export class ColumnHeaders { constructor( @Inject(forwardRef(() => DataTable)) public dt: DataTable) { } @Input("pColumnHeaders") columns: Column[]; } @Component({ selector: '[pColumnFooters]', template: ` <td *ngFor="let col of columns" [ngStyle]="col.style" [class]="col.styleClass" [attr.colspan]="col.colspan" [attr.rowspan]="col.rowspan" [ngClass]="{'ui-state-default':true}" [style.display]="col.hidden ? 'none' : 'table-cell'"> <span class="ui-column-footer" *ngIf="!col.footerTemplate">{{col.footer}}</span> <span class="ui-column-footer" *ngIf="col.footerTemplate"> <p-columnFooterTemplateLoader [column]="col"></p-columnFooterTemplateLoader> </span> </td> ` }) export class ColumnFooters { constructor( @Inject(forwardRef(() => DataTable)) public dt: DataTable) { } @Input("pColumnFooters") columns: Column[]; } @Component({ selector: '[pTableBody]', template: ` <ng-template ngFor let-rowData [ngForOf]="dt.dataToRender" let-even="even" let-odd="odd" let-rowIndex="index"> <tr #rowGroupElement class="ui-widget-header ui-rowgroup-header" *ngIf="dt.rowGroupMode=='subheader' && (rowIndex === 0||(dt.resolveFieldData(rowData,dt.groupField) !== dt.resolveFieldData(dt.dataToRender[rowIndex - 1], dt.groupField)))" (click)="dt.onRowGroupClick($event)" [ngStyle]="{'cursor': dt.sortableRowGroup ? 'pointer' : 'auto'}"> <td [attr.colspan]="columns.length"> <a href="#" *ngIf="dt.expandableRowGroups" (click)="dt.toggleRowGroup($event,rowData)"> <span class="fa fa-fw" [ngClass]="{'fa-chevron-circle-down':dt.isRowGroupExpanded(rowData), 'fa-chevron-circle-right': !dt.isRowGroupExpanded(rowData)}"></span> </a> <span class="ui-rowgroup-header-name"> <p-templateLoader [template]="dt.rowGroupHeaderTemplate" [data]="rowData"></p-templateLoader> </span> </td> </tr> <tr #rowElement *ngIf="!dt.expandableRowGroups||dt.isRowGroupExpanded(rowData)" [class]="dt.getRowStyleClass(rowData,rowIndex)" (click)="dt.handleRowClick($event, rowData)" (dblclick)="dt.rowDblclick($event,rowData)" (contextmenu)="dt.onRowRightClick($event,rowData)" (touchend)="dt.handleRowTouchEnd($event)" [ngClass]="{'ui-datatable-even':even&&dt.rowGroupMode!='rowspan','ui-datatable-odd':odd&&dt.rowGroupMode!='rowspan','ui-state-highlight': dt.isSelected(rowData)}"> <ng-template ngFor let-col [ngForOf]="columns" let-colIndex="index"> <td #cell *ngIf="!dt.rowGroupMode || (dt.rowGroupMode == 'subheader') || (dt.rowGroupMode=='rowspan' && ((dt.sortField==col.field && dt.rowGroupMetadata[dt.resolveFieldData(rowData,dt.sortField)].index == rowIndex) || (dt.sortField!=col.field)))" [ngStyle]="col.style" [class]="col.styleClass" [style.display]="col.hidden ? 'none' : 'table-cell'" [ngClass]="{'ui-editable-column':col.editable,'ui-selection-column':col.selectionMode}" (click)="dt.switchCellToEditMode(cell,col,rowData)" [attr.rowspan]="(dt.rowGroupMode=='rowspan' && dt.sortField == col.field && dt.rowGroupMetadata[dt.resolveFieldData(rowData,dt.sortField)].index == rowIndex) ? dt.rowGroupMetadata[dt.resolveFieldData(rowData,dt.sortField)].size : null"> <span class="ui-column-title" *ngIf="dt.responsive">{{col.header}}</span> <span class="ui-cell-data" *ngIf="!col.bodyTemplate && !col.expander && !col.selectionMode">{{dt.resolveFieldData(rowData,col.field)}}</span> <span class="ui-cell-data" *ngIf="col.bodyTemplate"> <p-columnBodyTemplateLoader [column]="col" [rowData]="rowData" [rowIndex]="rowIndex + dt.first"></p-columnBodyTemplateLoader> </span> <div class="ui-cell-editor" *ngIf="col.editable"> <input *ngIf="!col.editorTemplate" type="text" [(ngModel)]="rowData[col.field]" required="true" (keydown)="dt.onCellEditorKeydown($event, col, rowData, rowIndex)" class="ui-inputtext ui-widget ui-state-default ui-corner-all"/> <a *ngIf="col.editorTemplate" class="ui-cell-editor-proxy-focus" href="#" (focus)="dt.onCustomEditorFocusPrev($event, colIndex)"></a> <p-columnEditorTemplateLoader *ngIf="col.editorTemplate" [column]="col" [rowData]="rowData" [rowIndex]="rowIndex"></p-columnEditorTemplateLoader> <a *ngIf="col.editorTemplate" class="ui-cell-editor-proxy-focus" href="#" (focus)="dt.onCustomEditorFocusNext($event, colIndex)"></a> </div> <a href="#" *ngIf="col.expander" (click)="dt.toggleRow(rowData,$event)"> <span class="ui-row-toggler fa fa-fw ui-c" [ngClass]="{'fa-chevron-circle-down':dt.isRowExpanded(rowData), 'fa-chevron-circle-right': !dt.isRowExpanded(rowData)}"></span> </a> <p-dtRadioButton *ngIf="col.selectionMode=='single'" (onClick)="dt.selectRowWithRadio($event, rowData)" [checked]="dt.isSelected(rowData)"></p-dtRadioButton> <p-dtCheckbox *ngIf="col.selectionMode=='multiple'" (onChange)="dt.toggleRowWithCheckbox($event,rowData)" [checked]="dt.isSelected(rowData)"></p-dtCheckbox> </td> </ng-template> </tr> <tr class="ui-widget-header" *ngIf="dt.rowGroupFooterTemplate && dt.rowGroupMode=='subheader' && ((rowIndex === dt.dataToRender.length - 1)||(dt.resolveFieldData(rowData,dt.groupField) !== dt.resolveFieldData(dt.dataToRender[rowIndex + 1],dt.groupField))) && (!dt.expandableRowGroups || dt.isRowGroupExpanded(rowData))"> <p-templateLoader class="ui-helper-hidden" [data]="rowData" [template]="dt.rowGroupFooterTemplate"></p-templateLoader> </tr> <tr *ngIf="dt.expandableRows && dt.isRowExpanded(rowData)"> <td [attr.colspan]="dt.visibleColumns().length"> <p-rowExpansionLoader [rowData]="rowData" [template]="dt.rowExpansionTemplate"></p-rowExpansionLoader> </td> </tr> </ng-template> <tr *ngIf="dt.isEmpty()" class="ui-widget-content"> <td [attr.colspan]="dt.visibleColumns().length" class="ui-datatable-emptymessage">{{dt.emptyMessage}}</td> </tr> ` }) export class TableBody { constructor( @Inject(forwardRef(() => DataTable)) public dt: DataTable) { } @Input("pTableBody") columns: Column[]; visibleColumns() { return this.columns ? this.columns.filter(c => !c.hidden) : []; } } @Component({ selector: '[pScrollableView]', template: ` <div #scrollHeader class="ui-widget-header ui-datatable-scrollable-header" [ngStyle]="{'width': width}" > <div #scrollHeaderBox class="ui-datatable-scrollable-header-box"> <table [class]="dt.tableStyleClass" [ngStyle]="dt.tableStyle"> <thead class="ui-datatable-thead"> <tr *ngIf="!dt.headerColumnGroup" class="ui-state-default" [pColumnHeaders]="columns"></tr> <ng-template [ngIf]="dt.headerColumnGroup"> <tr *ngFor="let headerRow of dt.headerColumnGroup.rows" class="ui-state-default" [pColumnHeaders]="initpcolumnHeader(headerRow.columns)"></tr> </ng-template> </thead> </table> </div> </div> <div #scrollBody class="ui-datatable-scrollable-body" [ngStyle]="{'width': width,'max-height':dt.scrollHeight}"> <div #scrollTableWrapper style="position:relative" [ngStyle]="{'height':virtualTableHeight}"> <table #scrollTable [class]="dt.tableStyleClass" [ngStyle]="dt.tableStyle" [ngClass]="{'ui-datatable-virtual-table':virtualScroll}" style="top:0px"> <!--<colgroup class="ui-datatable-scrollable-colgroup"> <col *ngFor="let col of dt.visibleColumns()" /> </colgroup>--> <tbody [ngClass]="{'ui-datatable-data ui-widget-content': true, 'ui-datatable-hoverable-rows': (dt.rowHover||dt.selectionMode)}" [pTableBody]="columns"></tbody> </table> </div> </div> <div #scrollFooter class="ui-widget-header ui-datatable-scrollable-footer" [ngStyle]="{'width': width}" *ngIf="dt.hasFooter()"> <div #scrollFooterBox class="ui-datatable-scrollable-footer-box"> <table [class]="dt.tableStyleClass" [ngStyle]="dt.tableStyle"> <tfoot class="ui-datatable-tfoot"> <tr *ngIf="!footerColumnGroup" [pColumnFooters]="columns" class="ui-state-default"></tr> <ng-template [ngIf]="footerColumnGroup"> <tr *ngFor="let footerRow of footerColumnGroup.rows" [pColumnFooters]="footerRow.columns"></tr> </ng-template> </tfoot> </table> </div> </div> ` }) export class ScrollableView implements AfterViewInit, AfterViewChecked, OnDestroy { constructor( @Inject(forwardRef(() => DataTable)) public dt: DataTable, public domHandler: DomHandler, public el: ElementRef, public renderer: Renderer) { } @Input("pScrollableView") columns: Column[]; @ViewChild('scrollHeader') scrollHeaderViewChild: ElementRef; @ViewChild('scrollHeaderBox') scrollHeaderBoxViewChild: ElementRef; @ViewChild('scrollBody') scrollBodyViewChild: ElementRef; @ViewChild('scrollTable') scrollTableViewChild: ElementRef; @ViewChild('scrollTableWrapper') scrollTableWrapperViewChild: ElementRef; @ViewChild('scrollFooter') scrollFooterViewChild: ElementRef; @ViewChild('scrollFooterBox') scrollFooterBoxViewChild: ElementRef; @Input() frozen: boolean; @Input() width: string; @Input() virtualScroll: boolean; @Output() onVirtualScroll: EventEmitter<any> = new EventEmitter(); @Input() loading: boolean; public scrollBody: HTMLDivElement; public scrollHeader: HTMLDivElement; public scrollHeaderBox: HTMLDivElement; public scrollTable: HTMLDivElement; public scrollTableWrapper: HTMLDivElement; public scrollFooter: HTMLDivElement; public scrollFooterBox: HTMLDivElement; public bodyScrollListener: Function; public headerScrollListener: Function; public scrollBodyMouseWheelListener: Function; public scrollFunction: Function; public rowHeight: number; public scrollTimeout: any; ngAfterViewInit() { this.initScrolling(); } initpcolumnHeader(columns: Column[]): Column[] { if (!this.frozen) { columns = columns.filter(f => !f.frozen); } else { columns = columns.filter(f => f.frozen); } return columns; } ngAfterViewChecked() { if (this.virtualScroll && !this.rowHeight) { let row = this.domHandler.findSingle(this.scrollTable, 'tr.ui-widget-content'); if (row) { this.rowHeight = this.domHandler.getOuterHeight(row); } } } initScrolling() { this.scrollHeader = <HTMLDivElement>this.scrollHeaderViewChild.nativeElement; this.scrollHeaderBox = <HTMLDivElement>this.scrollHeaderBoxViewChild.nativeElement; this.scrollBody = <HTMLDivElement>this.scrollBodyViewChild.nativeElement; this.scrollTable = <HTMLDivElement>this.scrollTableViewChild.nativeElement; this.scrollTableWrapper = <HTMLDivElement>this.scrollTableWrapperViewChild.nativeElement; this.scrollFooter = this.scrollFooterViewChild ? <HTMLDivElement>this.scrollFooterViewChild.nativeElement : null; this.scrollFooterBox = this.scrollFooterBoxViewChild ? <HTMLDivElement>this.scrollFooterBoxViewChild.nativeElement : null; if (!this.frozen) { let frozenView = this.el.nativeElement.previousElementSibling; if (frozenView) { var frozenScrollBody = this.domHandler.findSingle(frozenView, '.ui-datatable-scrollable-body'); } this.bodyScrollListener = this.renderer.listen(this.scrollBody, 'scroll', (event) => { GlobalEventsManager.onDatatableScrollEvent.emit(true); // var dropdownFilterContainer=document.getElementsByClassName('ui-dropdown-panel'); // // for(let i=0;i<dropdownFilterContainer.length;i++){ // dropdownFilterContainer[i].style.display='none'; // } this.scrollHeaderBox.style.marginLeft = -1 * this.scrollBody.scrollLeft + 'px'; if (this.scrollFooterBox) { this.scrollFooterBox.style.marginLeft = -1 * this.scrollBody.scrollLeft + 'px'; } if (frozenScrollBody) { frozenScrollBody.scrollTop = this.scrollBody.scrollTop; } if (this.virtualScroll) { clearTimeout(this.scrollTimeout); this.scrollTimeout = setTimeout(() => { let viewport = this.domHandler.getOuterHeight(this.scrollBody); let tableHeight = this.domHandler.getOuterHeight(this.scrollTable); let pageHeight = this.rowHeight * this.dt.rows; let virtualTableHeight = parseFloat(this.virtualTableHeight); let pageCount = (virtualTableHeight / pageHeight) || 1; if (this.scrollBody.scrollTop + viewport > parseFloat(this.scrollTable.style.top) + tableHeight || this.scrollBody.scrollTop < parseFloat(this.scrollTable.style.top)) { let page = Math.floor((this.scrollBody.scrollTop * pageCount) / (this.scrollBody.scrollHeight)) + 1; this.onVirtualScroll.emit({ page: page }); this.scrollTable.style.top = ((page - 1) * pageHeight) + 'px'; } }, 200); } }); //to trigger change detection this.scrollBodyMouseWheelListener = this.renderer.listen(this.scrollBody, 'mousewheel', (event) => { }); this.headerScrollListener = this.renderer.listen(this.scrollHeader, 'scroll', () => { this.scrollHeader.scrollLeft = 0; }); } let scrollBarWidth = this.domHandler.calculateScrollbarWidth(); if (!this.frozen) { this.scrollHeaderBox.style.marginRight = scrollBarWidth + 'px'; if (this.scrollFooterBox) { this.scrollFooterBox.style.marginRight = scrollBarWidth + 'px'; } } else { this.scrollBody.style.paddingBottom = scrollBarWidth + 'px'; } } get virtualTableHeight(): string { let totalRecords = this.dt.lazy ? this.dt.totalRecords : (this.dt.value ? this.dt.value.length : 0); return (totalRecords * this.rowHeight) + 'px'; } ngOnDestroy() { if (this.bodyScrollListener) { this.bodyScrollListener(); } if (this.scrollBodyMouseWheelListener) { this.scrollBodyMouseWheelListener(); } if (this.headerScrollListener) { this.headerScrollListener(); } } } @Component({ selector: 'p-dataTable', template: ` <div [ngStyle]="style" [class]="styleClass" [style.width]="containerWidth" [ngClass]="{'ui-datatable ui-widget':true,'ui-datatable-reflow':responsive,'ui-datatable-stacked':stacked,'ui-datatable-resizable':resizableColumns,'ui-datatable-scrollable':scrollable}"> <div class="ui-datatable-loading ui-widget-overlay" *ngIf="loading"></div> <div class="ui-datatable-loading-content" *ngIf="loading"> <i class="fa fa-circle-o-notch fa-spin fa-2x"></i> </div> <div class="ui-datatable-header ui-widget-header" *ngIf="header"> <ng-content select="p-header"></ng-content> </div> <p-paginator [rows]="rows" [first]="first" [totalRecords]="totalRecords" [pageLinkSize]="pageLinks" styleClass="ui-paginator-bottom" (onPageChange)="paginate($event)" [rowsPerPageOptions]="rowsPerPageOptions" *ngIf="paginator && paginatorPosition!='bottom' || paginatorPosition =='both'"></p-paginator> <div class="ui-datatable-tablewrapper" *ngIf="!scrollable"> <table [class]="tableStyleClass" [ngStyle]="tableStyle"> <thead class="ui-datatable-thead"> <tr *ngIf="!headerColumnGroup" class="ui-state-default" [pColumnHeaders]="columns"></tr> <ng-template [ngIf]="headerColumnGroup"> <tr *ngFor="let headerRow of headerColumnGroup.rows" class="ui-state-default" [pColumnHeaders]="headerRow.columns"></tr> </ng-template> </thead> <tfoot *ngIf="hasFooter()" class="ui-datatable-tfoot"> <tr *ngIf="!footerColumnGroup" class="ui-state-default" [pColumnFooters]="columns"></tr> <ng-template [ngIf]="footerColumnGroup"> <tr *ngFor="let footerRow of footerColumnGroup.rows" class="ui-state-default" [pColumnFooters]="footerRow.columns"></tr> </ng-template> </tfoot> <tbody [ngClass]="{'ui-datatable-data ui-widget-content': true, 'ui-datatable-hoverable-rows': (rowHover||selectionMode)}" [pTableBody]="columns"></tbody> </table> </div> <ng-template [ngIf]="scrollable"> <div class="ui-datatable-scrollable-wrapper ui-helper-clearfix" [ngClass]="{'max-height':scrollHeight}"> <div *ngIf="frozenColumns" [pScrollableView]="frozenColumns" frozen="true" [ngStyle]="{'width':this.frozenWidth}" class="ui-datatable-scrollable-view ui-datatable-frozen-view"></div> <div [pScrollableView]="scrollableColumns" [ngStyle]="{'width':this.unfrozenWidth, 'left': this.frozenWidth}" class="ui-datatable-scrollable-view" [virtualScroll]="virtualScroll" (onVirtualScroll)="onVirtualScroll($event)" [ngClass]="{'ui-datatable-unfrozen-view': frozenColumns}"></div> </div> </ng-template> <p-paginator [rows]="rows" [first]="first" [totalRecords]="totalRecords" [pageLinkSize]="pageLinks" styleClass="ui-paginator-bottom" (onPageChange)="paginate($event)" [rowsPerPageOptions]="rowsPerPageOptions" *ngIf="paginator && paginatorPosition!='top' || paginatorPosition =='both'"></p-paginator> <div class="ui-datatable-footer ui-widget-header" *ngIf="footer"> <ng-content select="p-footer"></ng-content> </div> <div class="ui-column-resizer-helper ui-state-highlight" style="display:none"></div> <span class="fa fa-arrow-down ui-datatable-reorder-indicator-up" style="position: absolute; display: none;"></span> <span class="fa fa-arrow-up ui-datatable-reorder-indicator-down" style="position: absolute; display: none;"></span> </div> `, providers: [DomHandler, ObjectUtils] }) export class DataTable implements AfterViewChecked, AfterViewInit, AfterContentInit, OnInit, DoCheck, OnDestroy, BlockableUI { @Input() paginator: boolean; @Input() rows: number; @Input() totalRecords: number; @Input() pageLinks: number = 5; @Input() rowsPerPageOptions: number[]; @Input() responsive: boolean; @Input() stacked: boolean; @Input() selectionMode: string; @Input() selection: any; @Output() selectionChange: EventEmitter<any> = new EventEmitter(); @Input() editable: boolean; @Output() onRowClick: EventEmitter<any> = new EventEmitter(); @Output() onRowSelect: EventEmitter<any> = new EventEmitter(); @Output() onRowUnselect: EventEmitter<any> = new EventEmitter(); @Output() onRowDblclick: EventEmitter<any> = new EventEmitter(); @Output() onHeaderCheckboxToggle: EventEmitter<any> = new EventEmitter(); @Output() onContextMenuSelect: EventEmitter<any> = new EventEmitter(); @Input() filterDelay: number = 300; @Input() lazy: boolean; @Output() onLazyLoad: EventEmitter<any> = new EventEmitter(); @Input() resizableColumns: boolean; @Input() columnResizeMode: string = 'fit'; @Output() onColResize: EventEmitter<any> = new EventEmitter(); @Input() reorderableColumns: boolean; @Output() onColReorder: EventEmitter<any> = new EventEmitter(); @Input() scrollable: boolean; @Input() virtualScroll: boolean; @Input() scrollHeight: any; @Input() scrollWidth: any; @Input() frozenWidth: any; @Input() unfrozenWidth: any; @Input() style: any; @Input() styleClass: string; @Input() tableStyle: any; @Input() tableStyleClass: string; @Input() globalFilter: any; @Input() sortMode: string = 'single'; @Input() sortField: string; @Input() sortOrder: number = -1; @Input() groupField: string; @Input() multiSortMeta: SortMeta[]; @Input() contextMenu: any; @Input() csvSeparator: string = ','; @Input() exportFilename: string = 'download'; @Input() emptyMessage: string = 'No records found'; @Input() paginatorPosition: string = 'bottom'; @Input() metaKeySelection: boolean = true; @Input() immutable: boolean; @Output() onEditInit: EventEmitter<any> = new EventEmitter(); @Output() onEditComplete: EventEmitter<any> = new EventEmitter(); @Output() onEdit: EventEmitter<any> = new EventEmitter(); @Output() onEditCancel: EventEmitter<any> = new EventEmitter(); @Output() onPage: EventEmitter<any> = new EventEmitter(); @Output() onSort: EventEmitter<any> = new EventEmitter(); @Output() onFilter: EventEmitter<any> = new EventEmitter(); @ContentChild(Header) header; @ContentChild(Footer) footer; @Input() expandableRows: boolean; @Input() expandedRows: any[]; @Input() expandableRowGroups: boolean; @Input() rowExpandMode: string = 'multiple'; @Input() public expandedRowsGroups: any[]; @Input() tabindex: number = 1; @Input() rowStyleClass: Function; @Input() rowGroupMode: string; @Input() sortableRowGroup: boolean = true; @Input() sortFile: string; @Input() rowHover: boolean; @Input() first: number = 0; @Input() public filters: { [s: string]: FilterMetadata; } = {}; @Input() dataKey: string; @Input() loading: boolean; @Input() displaysum: boolean; @Output() onRowExpand: EventEmitter<any> = new EventEmitter(); @Output() onRowCollapse: EventEmitter<any> = new EventEmitter(); @Output() onRowGroupExpand: EventEmitter<any> = new EventEmitter(); @Output() onRowGroupCollapse: EventEmitter<any> = new EventEmitter(); @ContentChildren(PrimeTemplate) templates: QueryList<PrimeTemplate>; @ContentChildren(Column) cols: QueryList<Column>; @ContentChild(HeaderColumnGroup) headerColumnGroup: HeaderColumnGroup; @ContentChild(FooterColumnGroup) footerColumnGroup: FooterColumnGroup; public _value: any[]; public dataToRender: any[]; public page: number = 0; public filterTimeout: any; public filteredValue: any[]; public columns: Column[]; public frozenColumns: Column[]; public scrollableColumns: Column[]; public columnsChanged: boolean = false; public dataChanged: boolean = false; public stopSortPropagation: boolean; public sortColumn: Column; public columnResizing: boolean; public lastResizerHelperX: number; public documentClickListener: Function; public documentColumnResizeListener: Function; public documentColumnResizeEndListener: Function; public resizerHelper: any; public resizeColumn: any; public reorderIndicatorUp: any; public reorderIndicatorDown: any; public draggedColumn: any; public dropPosition: number; public tbody: any; public rowTouched: boolean; public rowGroupToggleClick: boolean; public editingCell: any; public stopFilterPropagation: boolean; public rowGroupMetadata: any; public rowGroupHeaderTemplate: TemplateRef<any>; public rowGroupFooterTemplate: TemplateRef<any>; public rowExpansionTemplate: TemplateRef<any>; public scrollBarWidth: number; public editorClick: boolean; differ: any; globalFilterFunction: any; columnsSubscription: Subscription; resizeTimeout: any; datatableHeaderWidth: number; rowGroupScrollableWidthFix: boolean; // public emptyMessageAlignmentTimeout:any; constructor(public el: ElementRef, public domHandler: DomHandler, public differs: IterableDiffers, public renderer: Renderer, public changeDetector: ChangeDetectorRef, public objectUtils: ObjectUtils, private ngZone: NgZone) { window.onresize = (e) => { ngZone.run(() => { this.calculateUnforzenWidth(); }) } } ngOnInit() { if (this.lazy) { this.onLazyLoad.emit(this.createLazyLoadMetadata()); } if (!this.immutable) { this.differ = this.differs.find([]).create(null); } this.calculateUnforzenWidth(); } ngAfterContentInit() { this.initColumns(); this.columnsSubscription = this.cols.changes.subscribe(_ => { this.initColumns(); this.changeDetector.markForCheck(); }); this.templates.forEach((item) => { switch (item.getType()) { case 'rowexpansion': this.rowExpansionTemplate = item.template; break; case 'rowgroupheader': this.rowGroupHeaderTemplate = item.template; break; case 'rowgroupfooter': this.rowGroupFooterTemplate = item.template; break; } }); } ngAfterViewChecked() { if (this.columnsChanged && this.el.nativeElement.offsetParent) { if (this.resizableColumns) { this.initResizableColumns(); } if (this.reorderableColumns) { this.initColumnReordering(); } this.columnsChanged = false; } if (this.dataChanged) { this.dataChanged = false; } if (this.calculateRowHeight && this.dataToRender && this.dataToRender.length > 0) { //resize row height based on unfrozen columns this.initFrozenRows(); } this.scrollableBodytableAlignment(); } ngAfterViewInit() { if (this.globalFilter) { this.globalFilterFunction = this.renderer.listen(this.globalFilter, 'keyup', () => { this.filterTimeout = setTimeout(() => { this._filter(); this.filterTimeout = null; }, this.filterDelay); }); } if (this.editable) { this.documentClickListener = this.renderer.listenGlobal('body', 'click', (event) => { if (!this.editorClick) { this.closeCell(); } this.editorClick = false; }); } } ngDoCheck() { if (!this.immutable) { let changes = this.differ.diff(this.value); if (changes) { this.handleDataChange(); } } } @Input() get value(): any[] { return this._value; } set value(val: any[]) { this._value = val; this.handleDataChange(); } currentscrollY: number; calculateRowHeight: boolean = true; initFrozenRows() { if (this.unfrozenWidth) { //getting scroll height // let rowcount=this.rows; // let renderedRowCount=document.querySelectorAll('.ui-datatable-unfrozen-view .ui-datatable-scrollable-body table tr').length; // if(rowcount==renderedRowCount){ this.calculateRowHeight = false; // } let unfrozenRows = document.querySelectorAll('.ui-datatable-unfrozen-view .ui-datatable-scrollable-body table tr'); let frozenRows = document.querySelectorAll('.ui-datatable-frozen-view .ui-datatable-scrollable-body table tr'); //reset rows height to there actuals for (var i = 0; i < unfrozenRows.length; i++) { let frozenRowsHeight = frozenRows[i]['style']['height'] = 'auto'; let unfrozenRowHeight = unfrozenRows[i]['style']['height'] = 'auto'; } for (var i = 0; i < unfrozenRows.length; i++) { let frozenRowsHeight = frozenRows[i]['offsetHeight']; let unfrozenRowHeight = unfrozenRows[i]['offsetHeight']; if (frozenRowsHeight > unfrozenRowHeight) { unfrozenRows[i]['style']['height'] = frozenRowsHeight + 'px'; } else { frozenRows[i]['style']['height'] = unfrozenRowHeight + 'px'; } } if (this.headerColumnGroup) { let frozenHeaderRows = document.querySelectorAll('.ui-datatable-frozen-view .ui-datatable-thead .ui-column-title')[0].parentElement; let unfrozenHeaderRows = document.querySelectorAll('.ui-datatable-unfrozen-view .ui-datatable-thead')[0]; frozenHeaderRows['style']['height'] = unfrozenHeaderRows['offsetHeight'] + 'px'; } //set scroll position to original -- work around for firfox //when we set rows height scroll bar position get lost in case of firfox if (this.currentscrollY) { window.scrollTo(0, this.currentscrollY); } } } handleDataChange() { this.dataChanged = true; if (this.paginator) { this.updatePaginator(); } if (this.hasFilter()) { if (this.lazy) { //prevent loop if (this.stopFilterPropagation) this.stopFilterPropagation = false; else this._filter(); } else { this._filter(); } } if (this.stopSortPropagation) { this.stopSortPropagation = false; } else if (!this.lazy && (this.sortField || this.multiSortMeta)) { if (!this.sortColumn && this.columns) { this.sortColumn = this.columns.find(col => col.field === this.sortField && col.sortable === 'custom'); } if (this.sortMode == 'single') this.sortSingle(); else if (this.sortMode == 'multiple') this.sortMultiple(); } this.updateDataToRender(this.filteredValue || this.value); } initColumns(): void { this.columns = this.cols.toArray(); if (this.scrollable) { this.scrollableColumns = []; this.cols.forEach((col) => { if (col.frozen) { this.frozenColumns = this.frozenColumns || []; this.frozenColumns.push(col); } else { this.scrollableColumns.push(col); } }); //Rohit Sindhu Customization for Global Filter on scrollable grid if (this.hasFilter()) { this._filter(); } } this.columnsChanged = true; } resolveFieldData(data: any, field: string): any { if (data && field) { if (field.indexOf('.') == -1) { if (data[field] == null) { return ' '; } return data[field]; } else { let fields: string[] = field.split('.'); let value = data; for (var i = 0, len = fields.length; i < len; ++i) { if (value == null) { return ' '; } value = value[fields[i]]; } return value; } } else { return ' '; } } updateRowGroupMetadata() { this.rowGroupMetadata = {}; if (this.dataToRender) { for (let i = 0; i < this.dataToRender.length; i++) { let rowData = this.dataToRender[i]; let group = this.resolveFieldData(rowData, this.sortField); if (i == 0) { this.rowGroupMetadata[group] = { index: 0, size: 1 }; } else { let previousRowData = this.dataToRender[i - 1]; let previousRowGroup = this.resolveFieldData(previousRowData, this.sortField); if (group === previousRowGroup) { this.rowGroupMetadata[group].size++; } else { this.rowGroupMetadata[group] = { index: i, size: 1 }; } } } } } updatePaginator() { //total records this.totalRecords = this.lazy ? this.totalRecords : (this.value ? this.value.length : 0); //first if (this.totalRecords && this.first >= this.totalRecords) { let numberOfPages = Math.ceil(this.totalRecords / this.rows); this.first = Math.max((numberOfPages - 1) * this.rows, 0); } } paginate(event) { this.first = event.first; this.rows = event.rows; if (this.lazy) { this.stopFilterPropagation = true; this.onLazyLoad.emit(this.createLazyLoadMetadata()); } else { this.updateDataToRender(this.filteredValue || this.value); } this.onPage.emit({ first: this.first, rows: this.rows }); } scrollableBodytableAlignment() { let unfrozenclass = ""; let frozenWidth: number = 0; if (this.unfrozenWidth && this.frozenWidth && parseInt(this.frozenWidth) > 0) { unfrozenclass = ".ui-datatable-unfrozen-view"; if (document.querySelectorAll('.ui-datatable-unfrozen-view table tr th.col-group-header')[0]) frozenWidth = parseInt(this.frozenWidth) - (parseInt(this.frozenWidth) - 150) } if (this.isEmpty() || (!this.rowGroupScrollableWidthFix && this.rowGroupMode)) { let ele = this.el.nativeElement.querySelectorAll(unfrozenclass + ' .ui-datatable-scrollable-header-box > table'); if (ele.length > 0) { if (this.datatableHeaderWidth != ele[0].clientWidth) { this.datatableHeaderWidth = ele[0].clientWidth; let emptyContentWidth = this.el.nativeElement.querySelectorAll(unfrozenclass + " .ui-datatable-scrollable-body table")[0].clientWidth; if (this.datatableHeaderWidth > 0 && emptyContentWidth > 0 && emptyContentWidth != this.datatableHeaderWidth) { this.setScrollableBodyTableWidth(unfrozenclass, (this.datatableHeaderWidth - frozenWidth)); } if ((!this.rowGroupScrollableWidthFix && this.rowGroupMode)) { this.rowGroupScrollableWidthFix = true; } } } } if (!this.isEmpty() && this.datatableHeaderWidth && !this.rowGroupScrollableWidthFix) { this.datatableHeaderWidth = undefined; this.setScrollableBodyTableWidth(unfrozenclass, this.datatableHeaderWidth); } } setScrollableBodyTableWidth(classname: string, width: number) { let msgcontainer = this.el.nativeElement.querySelectorAll(classname + " .ui-datatable-scrollable-body table")[0]; if (msgcontainer) { msgcontainer.style.width = width ? width + 'px' : ''; } } updateDataToRender(datasource) { //setting current scroll position before rendering data this.currentscrollY = window.scrollY; if ((this.paginator || this.virtualScroll) && datasource) { this.dataToRender = []; let startIndex: number = this.lazy ? 0 : this.first; let endIndex: number = this.virtualScroll ? this.first + this.rows * 2 : startIndex + this.rows; for (let i = startIndex; i < endIndex; i++) { if (i >= datasource.length) { break; } this.dataToRender.push(datasource[i]); } } else { this.dataToRender = datasource; } if (this.rowGroupMode) { this.updateRowGroupMetadata(); } //setting flag to true so row height is being calculated for new data //as well on ngAfterViewChecked this.calculateRowHeight = true; } onVirtualScroll(event) { this.first = (event.page - 1) * this.rows; if (this.lazy) { this.stopFilterPropagation = true; this.onLazyLoad.emit(this.createLazyLoadMetadata()); } else { this.updateDataToRender(this.filteredValue || this.value); } } onHeaderKeydown(event, column: Column) { if (event.keyCode == 13) { this.sort(event, column); event.preventDefault(); } } onHeaderMousedown(event, header: any) { if (this.reorderableColumns) { if (event.target.nodeName !== 'INPUT') { header.draggable = true; } else if (event.target.nodeName === 'INPUT') { header.draggable = false; } } } sort(event, column: Column) { if (!column.sortable) { return; } let targetNode = event.target.nodeName; if (targetNode == 'TH' || (targetNode == 'SPAN' && !this.domHandler.hasClass(event.target, 'ui-c'))) { let columnSortField = column.sortField || column.field; this.sortOrder = (this.sortField === columnSortField) ? this.sortOrder * -1 : -1; this.sortField = columnSortField; this.sortColumn = column; let metaKey = event.metaKey || event.ctrlKey; if (this.sortMode == 'multiple') { if (!this.multiSortMeta || !metaKey) { this.multiSortMeta = []; } this.addSortMeta({ field: this.sortField, order: this.sortOrder }); } if (this.lazy) { this.first = 0; this.stopFilterPropagation = true; this.onLazyLoad.emit(this.createLazyLoadMetadata()); } else { if (this.sortMode == 'multiple') this.sortMultiple(); else this.sortSingle(); } this.onSort.emit({ field: this.sortField, order: this.sortOrder, multisortmeta: this.multiSortMeta }); } } sortSingle() { if (this.value) { if (this.sortColumn && this.sortColumn.sortable === 'custom') { this.sortColumn.sortFunction.emit({ field: this.sortField, order: this.sortOrder }); } else { this.value.sort((data1, data2) => { let value1 = this.resolveFieldData(data1, this.sortField); let value2 = this.resolveFieldData(data2, this.sortField); let result = null; if (value1 == null && value2 != null) result = -1; else if (value1 != null && value2 == null) result = 1; else if (value1 == null && value2 == null) result = 0; else if (typeof value1 === 'string' && typeof value2 === 'string') result = value1.localeCompare(value2); else result = (value1 < value2) ? -1 : (value1 > value2) ? 1 : 0; return (this.sortOrder * result); }); } this.first = 0; if (this.hasFilter()) { this._filter(); } } //prevent resort at ngDoCheck this.stopSortPropagation = true; } sortMultiple() { if (this.value) { this.value.sort((data1, data2) => { return this.multisortField(data1, data2, this.multiSortMeta, 0); }); if (this.hasFilter()) { this._filter(); } } //prevent resort at ngDoCheck this.stopSortPropagation = true; } multisortField(data1, data2, multiSortMeta, index) { let value1 = this.resolveFieldData(data1, multiSortMeta[index].field); let value2 = this.resolveFieldData(data2, multiSortMeta[index].field); let result = null; if (typeof value1 == 'string' || value1 instanceof String) { if (value1.localeCompare && (value1 != value2)) { return (multiSortMeta[index].order * value1.localeCompare(value2)); } } else { result = (value1 < value2) ? -1 : 1; } if (value1 == value2) { return (multiSortMeta.length - 1) > (index) ? (this.multisortField(data1, data2, multiSortMeta, index + 1)) : 0; } return (multiSortMeta[index].order * result); } addSortMeta(meta) { var index = -1; for (var i = 0; i < this.multiSortMeta.length; i++) { if (this.multiSortMeta[i].field === meta.field) { index = i; break; } } if (index >= 0) this.multiSortMeta[index] = meta; else this.multiSortMeta.push(meta); } isSorted(column: Column) { if (!column.sortable) { return false; } let columnSortField = column.sortField || column.field; if (this.sortMode === 'single') { return (this.sortField && columnSortField === this.sortField); } else if (this.sortMode === 'multiple') { let sorted = false; if (this.multiSortMeta) { for (let i = 0; i < this.multiSortMeta.length; i++) { if (this.multiSortMeta[i].field == columnSortField) { sorted = true; break; } } } return sorted; } } getSortOrder(column: Column) { let order = 0; let columnSortField = column.sortField || column.field; if (this.sortMode === 'single') { if (this.sortField && columnSortField === this.sortField) { order = this.sortOrder; } } else if (this.sortMode === 'multiple') { if (this.multiSortMeta) { for (let i = 0; i < this.multiSortMeta.length; i++) { if (this.multiSortMeta[i].field == columnSortField) { order = this.multiSortMeta[i].order; break; } } } } return order; } onRowGroupClick(event) { if (this.rowGroupToggleClick) { this.rowGroupToggleClick = false; return; } if (this.sortableRowGroup) { let targetNode = event.target.nodeName; if ((targetNode == 'TD' || (targetNode == 'SPAN' && !this.domHandler.hasClass(event.target, 'ui-c')))) { if (this.sortField != this.groupField) { this.sortField = this.groupField; this.sortSingle(); } else { this.sortOrder = -1 * this.sortOrder; this.sortSingle(); } } } } handleRowClick(event, rowData) { let targetNode = event.target.nodeName; if (targetNode == 'TD' || (targetNode == 'SPAN' && !this.domHandler.hasClass(event.target, 'ui-c'))) { this.onRowClick.next({ originalEvent: event, data: rowData }); if (!this.selectionMode) { return; } let selected = this.isSelected(rowData); let metaSelection = this.rowTouched ? false : this.metaKeySelection; if (metaSelection) { let metaKey = event.metaKey || event.ctrlKey; if (selected && metaKey) { if (this.isSingleSelectionMode()) { this.selection = null; this.selectionChange.emit(null); } else { let selectionIndex = this.findIndexInSelection(rowData); this.selection = this.selection.filter((val, i) => i != selectionIndex); this.selectionChange.emit(this.selection); } this.onRowUnselect.emit({ originalEvent: event, data: rowData, type: 'row' }); } else { if (this.isSingleSelectionMode()) { this.selection = rowData; this.selectionChange.emit(rowData); } else if (this.isMultipleSelectionMode()) { if (metaKey) this.selection = this.selection || []; else this.selection = []; this.selection = [...this.selection, rowData]; this.selectionChange.emit(this.selection); } this.onRowSelect.emit({ originalEvent: event, data: rowData, type: 'row' }); } } else { if (this.isSingleSelectionMode()) { if (selected) { this.selection = null; this.onRowUnselect.emit({ originalEvent: event, data: rowData, type: 'row' }); } else { this.selection = rowData; this.onRowSelect.emit({ originalEvent: event, data: rowData, type: 'row' }); } } else { if (selected) { let selectionIndex = this.findIndexInSelection(rowData); this.selection = this.selection.filter((val, i) => i != selectionIndex); this.onRowUnselect.emit({ originalEvent: event, data: rowData, type: 'row' }); } else { this.selection = [...this.selection || [], rowData]; this.onRowSelect.emit({ originalEvent: event, data: rowData, type: 'row' }); } } this.selectionChange.emit(this.selection); } } this.rowTouched = false; } handleRowTouchEnd(event) { this.rowTouched = true; } selectRowWithRadio(event, rowData: any) { if (this.selection != rowData) { this.selection = rowData; this.selectionChange.emit(this.selection); this.onRowSelect.emit({ originalEvent: event, data: rowData, type: 'radiobutton' }); } } toggleRowWithCheckbox(event, rowData) { let selectionIndex = this.findIndexInSelection(rowData); this.selection = this.selection || []; if (selectionIndex != -1) { this.selection = this.selection.filter((val, i) => i != selectionIndex); this.onRowUnselect.emit({ originalEvent: event, data: rowData, type: 'checkbox' }); } else { this.selection = [...this.selection, rowData]; this.onRowSelect.emit({ originalEvent: event, data: rowData, type: 'checkbox' }); } this.selectionChange.emit(this.selection); } toggleRowsWithCheckbox(event) { if (event.checked) this.selection = this.dataToRender.slice(0); else this.selection = []; this.selectionChange.emit(this.selection); this.onHeaderCheckboxToggle.emit({ originalEvent: event, checked: event.checked }); } onRowRightClick(event, rowData) { if (this.contextMenu) { let selectionIndex = this.findIndexInSelection(rowData); let selected = selectionIndex != -1; if (!selected) { if (this.isSingleSelectionMode()) { this.selection = rowData; this.selectionChange.emit(rowData); } else if (this.isMultipleSelectionMode()) { this.selection = [rowData]; this.selectionChange.emit(this.selection);<|fim▁hole|> } } this.contextMenu.show(event); this.onContextMenuSelect.emit({ originalEvent: event, data: rowData }); } } rowDblclick(event, rowData) { this.onRowDblclick.emit({ originalEvent: event, data: rowData }); } isSingleSelectionMode() { return this.selectionMode === 'single'; } isMultipleSelectionMode() { return this.selectionMode === 'multiple'; } findIndexInSelection(rowData: any) { let index: number = -1; if (this.selection) { for (let i = 0; i < this.selection.length; i++) { if (this.objectUtils.equals(rowData, this.selection[i], this.dataKey)) { index = i; break; } } } return index; } isSelected(rowData) { return ((rowData && this.objectUtils.equals(rowData, this.selection, this.dataKey)) || this.findIndexInSelection(rowData) != -1); } get allSelected() { let val = true; if (this.dataToRender && this.selection && (this.dataToRender.length <= this.selection.length)) { for (let data of this.dataToRender) { if (!this.isSelected(data)) { val = false; break; } } } else { val = false; } return val; } onFilterKeyup(event, value, field, matchMode) { if (event.keyCode === 13 || (event.type === 'blur')) { if (this.filterTimeout) { clearTimeout(this.filterTimeout); } this.filterTimeout = setTimeout(() => { this.filter(value, field, matchMode); this.filterTimeout = null; }, this.filterDelay); } } filter(value, field, matchMode) { if (!this.isFilterBlank(value)) { this.filters[field] = { value: value, matchMode: matchMode }; } else if (this.filters[field]) { delete this.filters[field]; } this._filter(); } isFilterBlank(filter: any): boolean { if (filter !== null && filter !== undefined) { if ((typeof filter === 'string' && filter.trim().length == 0) || (filter instanceof Array && filter.length == 0)) return true; else return false; } return true; } _filter() { this.first = 0; if (this.lazy) { this.stopFilterPropagation = true; this.onLazyLoad.emit(this.createLazyLoadMetadata()); } else { this.filteredValue = []; for (let i = 0; i < this.value.length; i++) { let localMatch = true; let globalMatch = false; if (this.columns) { for (let j = 0; j < this.columns.length; j++) { let col = this.columns[j], filterMeta = this.filters[col.field]; //local if (filterMeta) { let filterValue = filterMeta.value, filterField = col.field, filterMatchMode = filterMeta.matchMode || 'startsWith', dataFieldValue = this.resolveFieldData(this.value[i], filterField); let filterConstraint = this.filterConstraints[filterMatchMode]; if (!filterConstraint(dataFieldValue, filterValue.toString())) { localMatch = false; } if (!localMatch) { break; } } //global if (this.globalFilter && !globalMatch) { globalMatch = this.filterConstraints['contains'](this.resolveFieldData(this.value[i], col.field), this.globalFilter.value); } } } let matches = localMatch; if (this.globalFilter) { matches = localMatch && globalMatch; } if (matches) { this.filteredValue.push(this.value[i]); } } if (this.filteredValue.length === this.value.length) { this.filteredValue = null; } if (this.paginator) { this.totalRecords = this.filteredValue ? this.filteredValue.length : this.value ? this.value.length : 0; } this.updateDataToRender(this.filteredValue || this.value); } this.onFilter.emit({ filters: this.filters }); } hasFilter() { let empty = true; for (let prop in this.filters) { if (this.filters.hasOwnProperty(prop)) { empty = false; break; } } return !empty || (this.globalFilter && this.globalFilter.value && this.globalFilter.value.trim().length); } onFilterInputClick(event) { event.stopPropagation(); } filterConstraints = { startsWith(value, filter): boolean { if (filter === undefined || filter === null || filter.trim() === '') { return true; } if (value === undefined || value === null) { return false; } let filterValue = filter.toLowerCase(); return value.toString().toLowerCase().slice(0, filterValue.length) === filterValue; }, contains(value, filter): boolean { if (filter === undefined || filter === null || (typeof filter === 'string' && filter.trim() === '')) { return true; } if (value === undefined || value === null) { return false; } return value.toString().toLowerCase().indexOf(filter.toLowerCase()) !== -1; }, endsWith(value, filter): boolean { if (filter === undefined || filter === null || filter.trim() === '') { return true; } if (value === undefined || value === null) { return false; } let filterValue = filter.toString().toLowerCase(); return value.toString().toLowerCase().indexOf(filterValue, value.toString().length - filterValue.length) !== -1; }, equals(value, filter): boolean { if (filter === undefined || filter === null || (typeof filter === 'string' && filter.trim() === '')) { return true; } if (value === undefined || value === null) { return false; } return value.toString().toLowerCase() == filter.toString().toLowerCase(); }, in(value, filter: any[]): boolean { if (filter === undefined || filter === null || filter.length === 0) { return true; } if (value === undefined || value === null) { return false; } for (let i = 0; i < filter.length; i++) { if (filter[i] === value) return true; } return false; } } switchCellToEditMode(cell: any, column: Column, rowData: any) { if (!this.selectionMode && this.editable && column.editable) { this.editorClick = true; if (cell != this.editingCell) { if (this.editingCell && this.domHandler.find(this.editingCell, '.ng-invalid.ng-dirty').length == 0) { this.domHandler.removeClass(this.editingCell, 'ui-cell-editing'); } this.editingCell = cell; this.onEditInit.emit({ column: column, data: rowData }); this.domHandler.addClass(cell, 'ui-cell-editing'); let focusable = this.domHandler.findSingle(cell, '.ui-cell-editor input'); if (focusable) { setTimeout(() => this.renderer.invokeElementMethod(focusable, 'focus'), 50); } } } } switchCellToViewMode(element: any) { this.editingCell = null; let cell = this.findCell(element); this.domHandler.removeClass(cell, 'ui-cell-editing'); } closeCell() { if (this.editingCell) { this.domHandler.removeClass(this.editingCell, 'ui-cell-editing'); this.editingCell = null; } } onCellEditorKeydown(event, column: Column, rowData: any, rowIndex: number) { if (this.editable) { this.onEdit.emit({ originalEvent: event, column: column, data: rowData, index: rowIndex }); //enter if (event.keyCode == 13) { this.onEditComplete.emit({ column: column, data: rowData, index: rowIndex }); this.renderer.invokeElementMethod(event.target, 'blur'); this.switchCellToViewMode(event.target); event.preventDefault(); } //escape else if (event.keyCode == 27) { this.onEditCancel.emit({ column: column, data: rowData, index: rowIndex }); this.renderer.invokeElementMethod(event.target, 'blur'); this.switchCellToViewMode(event.target); event.preventDefault(); } //tab else if (event.keyCode == 9) { if (event.shiftKey) this.moveToPreviousCell(event); else this.moveToNextCell(event); } } } moveToPreviousCell(event: KeyboardEvent) { let currentCell = this.findCell(event.target); let row = currentCell.parentElement; let targetCell = this.findPreviousEditableColumn(currentCell); if (targetCell) { this.renderer.invokeElementMethod(targetCell, 'click'); event.preventDefault(); } } moveToNextCell(event: KeyboardEvent) { let currentCell = this.findCell(event.target); let row = currentCell.parentElement; let targetCell = this.findNextEditableColumn(currentCell); if (targetCell) { this.renderer.invokeElementMethod(targetCell, 'click'); event.preventDefault(); } } findPreviousEditableColumn(cell: Element) { let prevCell = cell.previousElementSibling; if (!prevCell) { let previousRow = cell.parentElement.previousElementSibling; if (previousRow) { prevCell = previousRow.lastElementChild; } } if (this.domHandler.hasClass(prevCell, 'ui-editable-column')) return prevCell; else return this.findPreviousEditableColumn(prevCell); } findNextEditableColumn(cell: Element) { let nextCell = cell.nextElementSibling; if (!nextCell) { let nextRow = cell.parentElement.nextElementSibling; if (nextRow) { nextCell = nextRow.firstElementChild; } } if (this.domHandler.hasClass(nextCell, 'ui-editable-column')) return nextCell; else return this.findNextEditableColumn(nextCell); } onCustomEditorFocusPrev(event: KeyboardEvent) { this.moveToPreviousCell(event); } onCustomEditorFocusNext(event: KeyboardEvent) { this.moveToNextCell(event); } findCell(element) { let cell = element; while (cell.tagName != 'TD') { cell = cell.parentElement; } return cell; } initResizableColumns() { this.tbody = this.domHandler.findSingle(this.el.nativeElement, 'tbody.ui-datatable-data'); this.resizerHelper = this.domHandler.findSingle(this.el.nativeElement, 'div.ui-column-resizer-helper'); this.fixColumnWidths(); this.documentColumnResizeListener = this.renderer.listenGlobal('body', 'mousemove', (event) => { if (this.columnResizing) { this.onColumnResize(event); } }); this.documentColumnResizeEndListener = this.renderer.listenGlobal('body', 'mouseup', (event) => { if (this.columnResizing) { this.columnResizing = false; this.onColumnResizeEnd(event); } }); } initColumnResize(event) { let container = this.el.nativeElement.children[0]; let containerLeft = this.domHandler.getOffset(container).left; this.resizeColumn = event.target.parentElement; this.columnResizing = true; this.lastResizerHelperX = (event.pageX - containerLeft); } onColumnResize(event) { let container = this.el.nativeElement.children[0]; let containerLeft = this.domHandler.getOffset(container).left; this.domHandler.addClass(container, 'ui-unselectable-text'); this.resizerHelper.style.height = container.offsetHeight + 'px'; this.resizerHelper.style.top = 0 + 'px'; if (event.pageX > containerLeft && event.pageX < (containerLeft + container.offsetWidth)) { this.resizerHelper.style.left = (event.pageX - containerLeft) + 'px'; } this.resizerHelper.style.display = 'block'; } onColumnResizeEnd(event) { let delta = this.resizerHelper.offsetLeft - this.lastResizerHelperX; let columnWidth = this.resizeColumn.offsetWidth; let newColumnWidth = columnWidth + delta; let minWidth = this.resizeColumn.style.minWidth || 15; if (columnWidth + delta > parseInt(minWidth)) { if (this.columnResizeMode === 'fit') { let nextColumn = this.resizeColumn.nextElementSibling; let nextColumnWidth = nextColumn.offsetWidth - delta; if (newColumnWidth > 15 && nextColumnWidth > 15) { this.resizeColumn.style.width = newColumnWidth + 'px'; if (nextColumn) { nextColumn.style.width = nextColumnWidth + 'px'; } if (this.scrollable) { let colGroup = this.domHandler.findSingle(this.el.nativeElement, 'colgroup.ui-datatable-scrollable-colgroup'); let resizeColumnIndex = this.domHandler.index(this.resizeColumn); colGroup.children[resizeColumnIndex].style.width = newColumnWidth + 'px'; if (nextColumn) { colGroup.children[resizeColumnIndex + 1].style.width = nextColumnWidth + 'px'; } } } } else if (this.columnResizeMode === 'expand') { this.tbody.parentElement.style.width = this.tbody.parentElement.offsetWidth + delta + 'px'; this.resizeColumn.style.width = newColumnWidth + 'px'; let containerWidth = this.tbody.parentElement.style.width; if (this.scrollable) { this.scrollBarWidth = this.scrollBarWidth || this.domHandler.calculateScrollbarWidth(); this.el.nativeElement.children[0].style.width = parseFloat(containerWidth) + this.scrollBarWidth + 'px'; let colGroup = this.domHandler.findSingle(this.el.nativeElement, 'colgroup.ui-datatable-scrollable-colgroup'); let resizeColumnIndex = this.domHandler.index(this.resizeColumn); colGroup.children[resizeColumnIndex].style.width = newColumnWidth + 'px'; } else { this.el.nativeElement.children[0].style.width = containerWidth; } } this.onColResize.emit({ element: this.resizeColumn, delta: delta }); } this.resizerHelper.style.display = 'none'; this.resizeColumn = null; this.domHandler.removeClass(this.el.nativeElement.children[0], 'ui-unselectable-text'); } fixColumnWidths() { let columns = this.domHandler.find(this.el.nativeElement, 'th.ui-resizable-column'); for (let col of columns) { col.style.width = col.offsetWidth + 'px'; } } onColumnDragStart(event) { if (this.columnResizing) { event.preventDefault(); return; } this.draggedColumn = this.findParentHeader(event.target); event.dataTransfer.setData('text', 'b'); // Firefox requires this to make dragging possible } onColumnDragover(event) { if (this.reorderableColumns && this.draggedColumn) { event.preventDefault(); let iconWidth = this.domHandler.getHiddenElementOuterWidth(this.reorderIndicatorUp); let iconHeight = this.domHandler.getHiddenElementOuterHeight(this.reorderIndicatorUp); let dropHeader = this.findParentHeader(event.target); let container = this.el.nativeElement.children[0]; let containerOffset = this.domHandler.getOffset(container); let dropHeaderOffset = this.domHandler.getOffset(dropHeader); if (this.draggedColumn != dropHeader) { let targetLeft = dropHeaderOffset.left - containerOffset.left; let targetTop = containerOffset.top - dropHeaderOffset.top; let columnCenter = dropHeaderOffset.left + dropHeader.offsetWidth / 2; this.reorderIndicatorUp.style.top = dropHeaderOffset.top - containerOffset.top - (iconHeight - 1) + 'px'; this.reorderIndicatorDown.style.top = dropHeaderOffset.top - containerOffset.top + dropHeader.offsetHeight + 'px'; if (event.pageX > columnCenter) { this.reorderIndicatorUp.style.left = (targetLeft + dropHeader.offsetWidth - Math.ceil(iconWidth / 2)) + 'px'; this.reorderIndicatorDown.style.left = (targetLeft + dropHeader.offsetWidth - Math.ceil(iconWidth / 2)) + 'px'; this.dropPosition = 1; } else { this.reorderIndicatorUp.style.left = (targetLeft - Math.ceil(iconWidth / 2)) + 'px'; this.reorderIndicatorDown.style.left = (targetLeft - Math.ceil(iconWidth / 2)) + 'px'; this.dropPosition = -1; } this.reorderIndicatorUp.style.display = 'block'; this.reorderIndicatorDown.style.display = 'block'; } else { event.dataTransfer.dropEffect = 'none'; } } } onColumnDragleave(event) { if (this.reorderableColumns && this.draggedColumn) { event.preventDefault(); this.reorderIndicatorUp.style.display = 'none'; this.reorderIndicatorDown.style.display = 'none'; } } onColumnDrop(event) { event.preventDefault(); if (this.draggedColumn) { let dragIndex = this.domHandler.index(this.draggedColumn); let dropIndex = this.domHandler.index(this.findParentHeader(event.target)); let allowDrop = (dragIndex != dropIndex); if (allowDrop && ((dropIndex - dragIndex == 1 && this.dropPosition === -1) || (dragIndex - dropIndex == 1 && this.dropPosition === 1))) { allowDrop = false; } if (allowDrop) { this.columns.splice(dropIndex, 0, this.columns.splice(dragIndex, 1)[0]); this.onColReorder.emit({ dragIndex: dragIndex, dropIndex: dropIndex, columns: this.columns }); } this.reorderIndicatorUp.style.display = 'none'; this.reorderIndicatorDown.style.display = 'none'; this.draggedColumn.draggable = false; this.draggedColumn = null; this.dropPosition = null; } } initColumnReordering() { this.reorderIndicatorUp = this.domHandler.findSingle(this.el.nativeElement.children[0], 'span.ui-datatable-reorder-indicator-up'); this.reorderIndicatorDown = this.domHandler.findSingle(this.el.nativeElement.children[0], 'span.ui-datatable-reorder-indicator-down'); } findParentHeader(element) { if (element.nodeName == 'TH') { return element; } else { let parent = element.parentElement; while (parent.nodeName != 'TH') { parent = parent.parentElement; } return parent; } } hasFooter() { if (this.footerColumnGroup) { return true; } else { if (this.columns) { for (let i = 0; i < this.columns.length; i++) { if (this.columns[i].footer) { return true; } } } } return false; } isEmpty() { return !this.dataToRender || (this.dataToRender.length == 0); } createLazyLoadMetadata(): LazyLoadEvent { return { first: this.first, rows: this.virtualScroll ? this.rows * 2 : this.rows, sortField: this.sortField, sortOrder: this.sortOrder, filters: this.filters, globalFilter: this.globalFilter ? this.globalFilter.value : null, multiSortMeta: this.multiSortMeta }; } toggleRow(row: any, event?: Event) { if (!this.expandedRows) { this.expandedRows = []; } let expandedRowIndex = this.findExpandedRowIndex(row); if (expandedRowIndex != -1) { this.expandedRows.splice(expandedRowIndex, 1); this.onRowCollapse.emit({ originalEvent: event, data: row }); } else { if (this.rowExpandMode === 'single') { this.expandedRows = []; } this.expandedRows.push(row); this.onRowExpand.emit({ originalEvent: event, data: row }); } if (event) { event.preventDefault(); } } findExpandedRowIndex(row: any): number { let index = -1 if (this.expandedRows) { for (let i = 0; i < this.expandedRows.length; i++) { if (this.expandedRows[i] == row) { index = i; break; } } } return index; } isRowExpanded(row: any): boolean { return this.findExpandedRowIndex(row) != -1; } findExpandedRowGroupIndex(row: any): number { let index = -1; if (this.expandedRowsGroups && this.expandedRowsGroups.length) { for (let i = 0; i < this.expandedRowsGroups.length; i++) { let group = this.expandedRowsGroups[i]; let rowGroupField = this.resolveFieldData(row, this.groupField); if (rowGroupField === group) { index = i; break; } } } return index; } isRowGroupExpanded(row: any): boolean { return this.findExpandedRowGroupIndex(row) != -1; } toggleAllRowGroup(event: Event, expand: boolean): void { if (this.rowGroupMetadata) { let groups = Object.keys(this.rowGroupMetadata); this.expandedRowsGroups = []; if (expand) { groups.forEach(row => { this.expandedRowsGroups.push(row); }); } } } toggleRowGroup(event: Event, row: any): void { this.rowGroupToggleClick = true; let index = this.findExpandedRowGroupIndex(row); let rowGroupField = this.resolveFieldData(row, this.groupField); if (index >= 0) { this.expandedRowsGroups.splice(index, 1); //this.onRowGroupCollapse.emit({ // originalEvent: event, // group: rowGroupField //}); } else { this.expandedRowsGroups = this.expandedRowsGroups || [], this.expandedRowsGroups.push(rowGroupField); // this.onRowGroupExpand.emit({ // originalEvent: event, // group: rowGroupField // }); } event.preventDefault(); } public reset() { this.sortField = null; this.sortOrder = 1; this.filteredValue = null; this.filters = {}; if (this.paginator) { this.paginate({ first: 0, rows: this.rows }); } else { this.updateDataToRender(this.value); } } public exportCSV(filter?: any) { let data = ((typeof (this.filteredValue) != "undefined" && this.filteredValue != null) && filter == true) ? this.filteredValue : this.value; let csv = '\ufeff'; let exportColumns = this.columns.filter(c => c.exportColumn != false); //headers for (let i = 0; i < exportColumns.length; i++) { if (exportColumns[i].field) { csv += exportColumns[i].header || exportColumns[i].field; if (i < (exportColumns.length - 1)) { csv += this.csvSeparator; } } } //body data.forEach((record, i) => { csv += '\n'; for (let i = 0; i < exportColumns.length; i++) { if (exportColumns[i].field) { csv += '"' + this.resolveFieldData(record, exportColumns[i].field) + '"'; if (i < (exportColumns.length - 1)) { csv += this.csvSeparator; } } } }); let blob = new Blob([csv], { type: 'text/csv;charset=utf-8;' }); if (window.navigator.msSaveOrOpenBlob) { navigator.msSaveOrOpenBlob(blob, this.exportFilename + '.csv'); } else { let link = document.createElement("a"); link.style.display = 'none'; document.body.appendChild(link); if (link.download !== undefined) { link.setAttribute('href', URL.createObjectURL(blob)); link.setAttribute('download', this.exportFilename + '.csv'); document.body.appendChild(link); link.click(); } else { csv = 'data:text/csv;charset=utf-8,' + csv; window.open(encodeURI(csv)); } document.body.removeChild(link); } } getBlockableElement(): HTMLElement { return this.el.nativeElement.children[0]; } getRowStyleClass(rowData: any, rowIndex: number) { let styleClass = 'ui-widget-content'; if (this.rowStyleClass) { let rowClass = this.rowStyleClass.call(this, rowData, rowIndex); if (rowClass) { styleClass += ' ' + rowClass; } } return styleClass; } // Excel Export transformData(filter?: any): Array<any> { let data = ((typeof (this.filteredValue) != "undefined" && this.filteredValue != null) && filter == true) ? this.filteredValue : this.value; let exportColumns = this.columns.filter(c => c.exportColumn != false); let data_array: any[] = []; let result_array: any[] = []; //headers for (let i = 0; i < exportColumns.length; i++) { if (exportColumns[i].field) { result_array.push(exportColumns[i].header || exportColumns[i].field); if (i == (exportColumns.length - 1)) { data_array.push(result_array); result_array = []; } } } //body data.forEach((record, j) => { let result_arr: any[] = []; for (let i = 0; i < exportColumns.length; i++) { if (exportColumns[i].field) { result_arr.push(this.resolveFieldData(record, exportColumns[i].field)); if (i == (exportColumns.length - 1)) { data_array.push(result_arr); } } } }); let res_columnsum: any = []; for (let j = 0; j < exportColumns.length; j++) { if (exportColumns[j].displaysum) { if (!this.displaysum) { this.displaysum = true; } let hours: number = 0; let minutes: number = 0; let seconds: number = 0; // console.log('sum column'); let columnsum: number = 0; let isNumber: boolean; for (let i = 1; i < data_array.length; i++) { let value = data_array[i][j]; if (!isNaN(Number(value))) { if (!isNumber) { isNumber = true; } if (!columnsum) { columnsum = 0; } columnsum = columnsum + parseFloat(value); } else if (this.durationvalue(value)) { var duration: any = value.split(":"); hours = hours + parseInt(duration[0]); minutes = minutes + parseInt(duration[1]); seconds = seconds + parseInt(duration[2]); // Convert each 60 minutes to an hour if (minutes >= 60) { hours++; minutes -= 60; } // Convert each 60 seconds to a minute if (seconds >= 60) { minutes++; seconds -= 60; } } } if (isNumber) { if (columnsum % 1 != 0) { res_columnsum.push(columnsum.toFixed(2)); } else { res_columnsum.push(columnsum); } } else if (hours || minutes || seconds) { res_columnsum.push(hours + ":" + minutes + ":" + seconds); } else { res_columnsum.push(0); } } else { res_columnsum.push(""); } if (j == (exportColumns.length - 1) && this.displaysum) { let indexOfFirstFilledcolumn = res_columnsum.findIndex(c => c || c === 0); if (indexOfFirstFilledcolumn > 0 && res_columnsum[indexOfFirstFilledcolumn - 1].length == 0) { res_columnsum[indexOfFirstFilledcolumn - 1] = 'Total'; } data_array.push(res_columnsum); } } // let headerData: Array<any> = []; // let dataNew: Array<any> = []; // var keys_arr: any = []; // let excel_data: any; // if (data instanceof DataTable) { // excel_data = data.value; // } // else { // excel_data = data; // } //_.forEach(this.data.headerColumnGroup.rows._results, function (value: any, index: any) { // console.log(value.columns._results); // value.columns._results.forEach((c:any) => { // headerData.push({ colspan: c['colspan'], header: c['header'], rowspan: c['rowspan']}); // }) //}); //console.log(headerData); // debugger; // _.forEach(data, function(json) { // var arr = _.filter(json, function(value: any, index: any) { // if (typeof value !== "object") { // keys_arr.push(_.startCase(index)); // return value; // } // }); // dataNew.push(arr); // }); // dataNew.unshift(_.uniq(keys_arr)); return data_array; } sheet_from_array_of_arrays(data: any) { var ws = {}; var endCell = { c: 10000000, r: 10000000 }; var startCell = { c: 0, r: 0 }; var range = { s: endCell, e: startCell }; var wscols = []; var sumormaulcellRange: {} for (var R = 0; R != data.length; ++R) { for (var C = 0; C != data[R].length; ++C) { wscols.push({ wch: 20 }); if (range.s.r > R) range.s.r = R; if (range.s.c > C) range.s.c = C; if (range.e.r < R) range.e.r = R; if (range.e.c < C) range.e.c = C; var cell = { v: data[R][C], t: 's', s: {} }; if (R === 0) { cell.s = { fill: { fgColor: { rgb: "00BFFF" } }, font: { bold: true, sz: '11' }, border: { bottom: { style: 'thin', color: { rgb: "000000" } }, top: { style: 'thin', color: { rgb: "000000" } }, left: { style: 'thin', color: { rgb: "000000" } }, right: { style: 'thin', color: { rgb: "000000" } } } }; } else if (this.displaysum && R === (data.length - 1)) { cell.t = 'n'; cell.s = { font: { bold: true, sz: '11' }, border: { bottom: { style: 'thin', color: { rgb: "000000" } }, top: { style: 'thin', color: { rgb: "000000" } }, left: { style: 'thin', color: { rgb: "000000" } }, right: { style: 'thin', color: { rgb: "000000" } } } }; } else if ((R === (data.length - 1) && !this.displaysum) || (R === (data.length - 2) && this.displaysum)) { cell.s = { border: { bottom: { style: 'thin', color: { rgb: "000000" } }, top: { style: 'thin', color: { rgb: "000000" } }, left: { style: 'thin', color: { rgb: "000000" } }, right: { style: 'thin', color: { rgb: "000000" } } } }; } else if (cell.v && cell.v.toString().toLowerCase().indexOf('\n') !== -1) { cell.s = { alignment: { wrapText: true }, border: { bottom: { style: 'thin', color: { rgb: "000000" } }, top: { style: 'thin', color: { rgb: "000000" } }, left: { style: 'thin', color: { rgb: "000000" } }, right: { style: 'thin', color: { rgb: "000000" } } } }; } else { cell.s = { border: { bottom: { style: 'thin', color: { rgb: "000000" } }, top: { style: 'thin', color: { rgb: "000000" } }, left: { style: 'thin', color: { rgb: "000000" } }, right: { style: 'thin', color: { rgb: "000000" } } } }; } //convert null value to empty string if (cell.v == null) cell.v = " "; var cell_ref = XLSX.utils.encode_cell({ c: C, r: R }); if (typeof cell.v === 'number') cell.t = 'n'; else if (typeof cell.v === 'boolean') cell.t = 'b'; else if (cell.t === 'n' && !isNaN(Number(cell.v))) cell.t = 'n'; else cell.t = 's'; ws[cell_ref] = cell; } } // ws['!cols'] = wscols; // console.log("Worksheet goes here", ws); if (range.s.c < 10000000) { ws['!ref'] = XLSX.utils.encode_range(endCell, startCell); } return ws; } datenum(v: any, date1904?: any): any { if (date1904) v += 1462; var epoch = Date.parse(v); var dt: any = new Date(Date.UTC(1899, 11, 30)); return (epoch - dt) / (24 * 60 * 60 * 1000); } durationvalue(v: any) { var patt = new RegExp("\d+:\d{2}:\d{2}$"); return patt.compile().test(v); } generateExcelFile(filter?: any): any { var dstyle = { font: { name: 'arial', sz: '10' } }; let sheetName: string = this.exportFilename; let workbook: { Sheets: {}, SheetNames: any[], Props: {} } = { Sheets: {}, SheetNames: [], Props: {} }; let ws: any; let wbout: any; ws = this.sheet_from_array_of_arrays(this.transformData(filter)); workbook.SheetNames.push(sheetName); workbook.Sheets[sheetName] = ws; wbout = XLSX.write(workbook, { bookType: 'xlsx', type: 'binary', showGridLines: false, defaultCellStyle: dstyle }); return wbout; } s2ab(s: any): ArrayBuffer { var buf = new ArrayBuffer(s.length); var view = new Uint8Array(buf); for (var i = 0; i != s.length; ++i) view[i] = s.charCodeAt(i) & 0xFF; return buf; } exportExcel(filter?: any): void { FileSaver.saveAs(new Blob([this.s2ab(this.generateExcelFile(filter))], { type: "application/octet-stream" }), this.exportFilename + ".xlsx"); } visibleColumns() { return this.columns ? this.columns.filter(c => !c.hidden) : []; } initialPerUnfrozenWidth: number; calculateUnforzenWidth() { if (this.resizeTimeout) { clearTimeout(this.resizeTimeout); } this.resizeTimeout = setTimeout(() => { if (this.unfrozenWidth && this.isWidthinPercentage(this.unfrozenWidth)) { this.initialPerUnfrozenWidth = this.initialPerUnfrozenWidth > 0 ? this.initialPerUnfrozenWidth : parseFloat(this.unfrozenWidth); let actualWidth = this.calculatePerWidth(this.initialPerUnfrozenWidth, true); let widthDifference = (parseFloat(this.frozenWidth) / parseFloat(actualWidth)) * 100; let calWidth = (this.initialPerUnfrozenWidth - widthDifference); this.unfrozenWidth = calWidth + '%'; } }, 200); } // get containerWidth() { // // if(this.scrollable) { // if(this.scrollWidth) { // // /*Custom Code Added to calculate width in %*/ // var width=this.scrollWidth; // var hasPx = width.indexOf('px') >= 0; // var hasPct = width.indexOf('%') >= 0; // if(hasPct){ // var gridwidth= document.getElementsByClassName("ui-datatable-scrollable-header-box")[0].clientWidth; // if(gridwidth) // { // width=width.replace('%',''); // width=Math.ceil(gridwidth*width/100); // width=width+'px'; // } // } // return width; // } // else if(this.frozenWidth && this.unfrozenWidth) { // debugger; // return parseFloat(this.frozenWidth) + parseFloat(this.unfrozenWidth) + 'px'; // } // } // else { // return this.style ? this.style.width : null; // } // } // isforzenWidthInit:boolean; // get containerWidth() { // if (this.scrollable) { // if (this.scrollWidth) { // return this.scrollWidth; // } // else if (this.frozenWidth && this.unfrozenWidth) { // if(!this.isforzenWidthInit){ // debugger // this.calculateUnforzenWidth(); // } // // let actualWidth=this.calculatePerWidth(this.unfrozenWidth,true); // // this.unfrozenWidth=(this.frozenWidth/actualWidth)*100; // // // if (this.isWidthinPercentage(this.unfrozenWidth)) { // // // this.unfrozenWidth = this.calculatePerWidth(this.unfrozenWidth, true);// - parseFloat(this.frozenWidth); // // return parseFloat(this.unfrozenWidth); // // } // // return parseFloat(this.frozenWidth) + parseFloat(actualWidth) + 'px'; // } // } // else { // return this.style ? this.style.width : null; // } // } isWidthinPercentage(width: any) { return typeof width == 'string' ? width.indexOf('%') >= 0 : true; } calculatePerWidth(width: any, forzen: boolean) { if (this.isWidthinPercentage(width)) { let element: any; let gridwidth = this.el.nativeElement.firstElementChild.clientWidth; if (gridwidth) { width = typeof width == 'string' ? width.replace('%', '') : width; width = Math.ceil(gridwidth * width / 100); width = (width - 19) + 'px'; } } return width; } ngOnDestroy() { //remove event listener if (this.globalFilterFunction) { this.globalFilterFunction(); } if (this.resizableColumns && this.documentColumnResizeListener && this.documentColumnResizeEndListener) { this.documentColumnResizeListener(); this.documentColumnResizeEndListener(); } if (this.documentClickListener) { this.documentClickListener(); } if (this.columnsSubscription) { this.columnsSubscription.unsubscribe(); } } } @NgModule({ imports: [CommonModule, SharedModule, PaginatorModule, FormsModule], exports: [DataTable, SharedModule], declarations: [DataTable, DTRadioButton, DTCheckbox, ColumnHeaders, ColumnFooters, TableBody, ScrollableView, RowExpansionLoader] }) export class DataTableModule { }<|fim▁end|>
<|file_name|>AbstractAdmin.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.artemis.common; import javax.naming.Context; import javax.naming.InitialContext; import javax.naming.NamingException; import java.io.BufferedReader; import java.io.InputStreamReader; import java.io.OutputStreamWriter; import org.apache.activemq.artemis.api.config.ActiveMQDefaultConfiguration; import org.apache.activemq.artemis.api.core.TransportConfiguration; import org.apache.activemq.artemis.api.core.client.ActiveMQClient; import org.apache.activemq.artemis.api.core.client.ClientMessage; import org.apache.activemq.artemis.api.core.client.ClientRequestor; import org.apache.activemq.artemis.api.core.client.ClientSession; import org.apache.activemq.artemis.api.core.client.ClientSessionFactory; import org.apache.activemq.artemis.api.core.client.ServerLocator; import org.apache.activemq.artemis.api.core.management.ManagementHelper; import org.apache.activemq.artemis.api.core.management.ResourceNames; import org.apache.activemq.artemis.core.remoting.impl.netty.NettyConnectorFactory; import org.apache.activemq.artemis.tests.util.SpawnedVMSupport; import org.junit.Assert; import org.objectweb.jtests.jms.admin.Admin; /** * AbstractAdmin. */ public class AbstractAdmin implements Admin {<|fim▁hole|> protected ClientRequestor requestor; protected boolean serverLifeCycleActive; protected Process serverProcess; protected ServerLocator serverLocator; protected ClientSessionFactory sf; // this is a constant to control if we should use a separate VM for the server. public static final boolean spawnServer = false; /** * Determines whether to act or 'no-op' on serverStart() and * serverStop(). This is used when testing combinations of client and * servers with different versions. */ private static final String SERVER_LIVE_CYCLE_PROPERTY = "org.apache.activemq.artemis.jms.ActiveMQAMQPAdmin.serverLifeCycle"; public AbstractAdmin() { serverLifeCycleActive = Boolean.valueOf(System.getProperty(SERVER_LIVE_CYCLE_PROPERTY, "true")); } @Override public String getName() { return getClass().getName(); } @Override public void start() throws Exception { serverLocator = ActiveMQClient.createServerLocatorWithoutHA(new TransportConfiguration(NettyConnectorFactory.class.getName())); sf = serverLocator.createSessionFactory(); clientSession = sf.createSession(ActiveMQDefaultConfiguration.getDefaultClusterUser(), ActiveMQDefaultConfiguration.getDefaultClusterPassword(), false, true, true, false, 1); requestor = new ClientRequestor(clientSession, ActiveMQDefaultConfiguration.getDefaultManagementAddress()); clientSession.start(); } @Override public void stop() throws Exception { requestor.close(); if (sf != null) { sf.close(); } if (serverLocator != null) { serverLocator.close(); } sf = null; serverLocator = null; } @Override public Context createContext() throws NamingException { return new InitialContext(); } @Override public void createConnectionFactory(final String name) { throw new RuntimeException("FIXME NYI createConnectionFactory"); } @Override public void deleteConnectionFactory(final String name) { throw new RuntimeException("FIXME NYI deleteConnectionFactory"); } @Override public void createQueue(final String name) { Boolean result; try { result = (Boolean) invokeSyncOperation(ResourceNames.JMS_SERVER, "createQueue", name, name); Assert.assertEquals(true, result.booleanValue()); } catch (Exception e) { throw new IllegalStateException(e); } } @Override public void deleteQueue(final String name) { Boolean result; try { result = (Boolean) invokeSyncOperation(ResourceNames.JMS_SERVER, "destroyQueue", name); Assert.assertEquals(true, result.booleanValue()); } catch (Exception e) { throw new IllegalStateException(e); } } @Override public void createQueueConnectionFactory(final String name) { createConnectionFactory(name); } @Override public void deleteQueueConnectionFactory(final String name) { deleteConnectionFactory(name); } @Override public void createTopic(final String name) { Boolean result; try { result = (Boolean) invokeSyncOperation(ResourceNames.JMS_SERVER, "createTopic", name, name); Assert.assertEquals(true, result.booleanValue()); } catch (Exception e) { throw new IllegalStateException(e); } } @Override public void deleteTopic(final String name) { Boolean result; try { result = (Boolean) invokeSyncOperation(ResourceNames.JMS_SERVER, "destroyTopic", name); Assert.assertEquals(true, result.booleanValue()); } catch (Exception e) { throw new IllegalStateException(e); } } @Override public void createTopicConnectionFactory(final String name) { createConnectionFactory(name); } @Override public void deleteTopicConnectionFactory(final String name) { deleteConnectionFactory(name); } @Override public void startServer() throws Exception { if (!serverLifeCycleActive) { return; } if (spawnServer) { String[] vmArgs = new String[]{}; serverProcess = SpawnedVMSupport.spawnVM(SpawnedJMSServer.class.getName(), vmArgs, false); InputStreamReader isr = new InputStreamReader(serverProcess.getInputStream()); final BufferedReader br = new BufferedReader(isr); String line = null; while ((line = br.readLine()) != null) { System.out.println("SERVER: " + line); if ("OK".equals(line.trim())) { new Thread() { @Override public void run() { try { String line1 = null; while ((line1 = br.readLine()) != null) { System.out.println("SERVER: " + line1); } } catch (Exception e) { e.printStackTrace(); } } }.start(); return; } else if ("KO".equals(line.trim())) { // something went wrong with the server, destroy it: serverProcess.destroy(); throw new IllegalStateException("Unable to start the spawned server :" + line); } } } else { SpawnedJMSServer.startServer(); } } @Override public void stopServer() throws Exception { if (!serverLifeCycleActive) { return; } if (spawnServer) { OutputStreamWriter osw = new OutputStreamWriter(serverProcess.getOutputStream()); osw.write("STOP\n"); osw.flush(); int exitValue = serverProcess.waitFor(); if (exitValue != 0) { serverProcess.destroy(); } } else { SpawnedJMSServer.stopServer(); } } protected Object invokeSyncOperation(final String resourceName, final String operationName, final Object... parameters) throws Exception { ClientMessage message = clientSession.createMessage(false); ManagementHelper.putOperationInvocation(message, resourceName, operationName, parameters); ClientMessage reply; try { reply = requestor.request(message, 3000); } catch (Exception e) { throw new IllegalStateException("Exception while invoking " + operationName + " on " + resourceName, e); } if (reply == null) { throw new IllegalStateException("no reply received when invoking " + operationName + " on " + resourceName); } if (!ManagementHelper.hasOperationSucceeded(reply)) { throw new IllegalStateException("operation failed when invoking " + operationName + " on " + resourceName + ": " + ManagementHelper.getResult(reply)); } return ManagementHelper.getResult(reply); } }<|fim▁end|>
protected ClientSession clientSession;
<|file_name|>gen_travis.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 from itertools import combinations, chain from enum import Enum, auto LINUX = 'linux' OSX = 'osx' WINDOWS = 'windows' AMD64 = 'amd64' ARM64 = 'arm64' PPC64LE = 'ppc64le' TRAVIS_TEMPLATE = """\ # This config file is generated by ./scripts/gen_travis.py. # Do not edit by hand. # We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also # the software provided by 'generic' is simply not needed for our tests. # Differences are explained here: # https://docs.travis-ci.com/user/languages/minimal-and-generic/ language: minimal dist: focal jobs: include: {jobs} before_install: - |- if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then source ./scripts/$TRAVIS_OS_NAME/before_install.sh fi before_script: - |- if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then source ./scripts/$TRAVIS_OS_NAME/before_script.sh else scripts/gen_travis.py > travis_script && diff .travis.yml travis_script autoconf # If COMPILER_FLAGS are not empty, add them to CC and CXX ./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \ CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS make -j3 make -j3 tests fi script: - |- if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then source ./scripts/$TRAVIS_OS_NAME/script.sh else make check fi """ class Option(object): class Type: COMPILER = auto() COMPILER_FLAG = auto() CONFIGURE_FLAG = auto() MALLOC_CONF = auto() FEATURE = auto() def __init__(self, type, value): self.type = type self.value = value @staticmethod def as_compiler(value): return Option(Option.Type.COMPILER, value) @staticmethod def as_compiler_flag(value): return Option(Option.Type.COMPILER_FLAG, value) @staticmethod def as_configure_flag(value): return Option(Option.Type.CONFIGURE_FLAG, value) @staticmethod def as_malloc_conf(value): return Option(Option.Type.MALLOC_CONF, value) @staticmethod def as_feature(value): return Option(Option.Type.FEATURE, value) def __eq__(self, obj): return (isinstance(obj, Option) and obj.type == self.type and obj.value == self.value) # The 'default' configuration is gcc, on linux, with no compiler or configure # flags. We also test with clang, -m32, --enable-debug, --enable-prof, # --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing # travis though, we don't test all 2**7 = 128 possible combinations of these; # instead, we only test combinations of up to 2 'unusual' settings, under the # hope that bugs involving interactions of such settings are rare. MAX_UNUSUAL_OPTIONS = 2 GCC = Option.as_compiler('CC=gcc CXX=g++') CLANG = Option.as_compiler('CC=clang CXX=clang++') CL = Option.as_compiler('CC=cl.exe CXX=cl.exe') compilers_unusual = [CLANG,] CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT') feature_unusuals = [CROSS_COMPILE_32BIT] configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in ( '--enable-debug', '--enable-prof', '--disable-stats', '--disable-libdl', '--enable-opt-safety-checks', '--with-lg-page=16', )] malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in ( 'tcache:false', 'dss:primary', 'percpu_arena:percpu', 'background_thread:true', )] all_unusuals = (compilers_unusual + feature_unusuals + configure_flag_unusuals + malloc_conf_unusuals) def get_extra_cflags(os, compiler): if os == WINDOWS: # For non-CL compilers under Windows (for now it's only MinGW-GCC), # -fcommon needs to be specified to correctly handle multiple # 'malloc_conf' symbols and such, which are declared weak under Linux. # Weak symbols don't work with MinGW-GCC. if compiler != CL.value: return ['-fcommon'] else: return [] # We get some spurious errors when -Warray-bounds is enabled. extra_cflags = ['-Werror', '-Wno-array-bounds'] if compiler == CLANG.value or os == OSX: extra_cflags += [ '-Wno-unknown-warning-option', '-Wno-ignored-attributes' ] if os == OSX:<|fim▁hole|> extra_cflags += [ '-Wno-deprecated-declarations', ] return extra_cflags # Formats a job from a combination of flags def format_job(os, arch, combination): compilers = [x.value for x in combination if x.type == Option.Type.COMPILER] assert(len(compilers) <= 1) compiler_flags = [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG] configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG] malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF] features = [x.value for x in combination if x.type == Option.Type.FEATURE] if len(malloc_conf) > 0: configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf)) if not compilers: compiler = GCC.value else: compiler = compilers[0] extra_environment_vars = '' cross_compile = CROSS_COMPILE_32BIT.value in features if os == LINUX and cross_compile: compiler_flags.append('-m32') features_str = ' '.join([' {}=yes'.format(feature) for feature in features]) stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else '' env_string = '{}{}{}{}{}{}'.format( compiler, features_str, stringify(compiler_flags, 'COMPILER_FLAGS'), stringify(configure_flags, 'CONFIGURE_FLAGS'), stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'), extra_environment_vars) job = ' - os: {}\n'.format(os) job += ' arch: {}\n'.format(arch) job += ' env: {}'.format(env_string) return job def generate_unusual_combinations(unusuals, max_unusual_opts): """ Generates different combinations of non-standard compilers, compiler flags, configure flags and malloc_conf settings. @param max_unusual_opts: Limit of unusual options per combination. """ return chain.from_iterable( [combinations(unusuals, i) for i in range(max_unusual_opts + 1)]) def included(combination, exclude): """ Checks if the combination of options should be included in the Travis testing matrix. @param exclude: A list of options to be avoided. """ return not any(excluded in combination for excluded in exclude) def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals): jobs = [] for combination in generate_unusual_combinations(unusuals, max_unusual_opts): if included(combination, exclude): jobs.append(format_job(os, arch, combination)) return '\n'.join(jobs) def generate_linux(arch): os = LINUX # Only generate 2 unusual options for AMD64 to reduce matrix size max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1 exclude = [] if arch == PPC64LE: # Avoid 32 bit builds and clang on PowerPC exclude = (CROSS_COMPILE_32BIT, CLANG,) return generate_jobs(os, arch, exclude, max_unusual_opts) def generate_macos(arch): os = OSX max_unusual_opts = 1 exclude = ([Option.as_malloc_conf(opt) for opt in ( 'dss:primary', 'percpu_arena:percpu', 'background_thread:true')] + [Option.as_configure_flag('--enable-prof')] + [CLANG,]) return generate_jobs(os, arch, exclude, max_unusual_opts) def generate_windows(arch): os = WINDOWS max_unusual_opts = 3 unusuals = ( Option.as_configure_flag('--enable-debug'), CL, CROSS_COMPILE_32BIT, ) return generate_jobs(os, arch, (), max_unusual_opts, unusuals) def get_manual_jobs(): return """\ # Development build - os: linux env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \ --disable-cache-oblivious --enable-stats --enable-log --enable-prof" \ EXTRA_CFLAGS="-Werror -Wno-array-bounds" # --enable-expermental-smallocx: - os: linux env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \ --enable-experimental-smallocx --enable-stats --enable-prof" \ EXTRA_CFLAGS="-Werror -Wno-array-bounds" """ def main(): jobs = '\n'.join(( generate_linux(AMD64), generate_linux(PPC64LE), generate_macos(AMD64), #generate_windows(AMD64), get_manual_jobs() )) print(TRAVIS_TEMPLATE.format(jobs=jobs)) if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>TaskIDChangeMap.java<|end_file_name|><|fim▁begin|>package org.yawlfoundation.yawl.worklet.client; import org.yawlfoundation.yawl.editor.ui.specification.SpecificationModel; import org.yawlfoundation.yawl.engine.YSpecificationID; import java.io.IOException; import java.util.Map; <|fim▁hole|> * @author Michael Adams * @date 18/02/2016 */ public class TaskIDChangeMap { private Map<String, String> _changedIdentifiers; public TaskIDChangeMap(Map<String, String> changeMap) { _changedIdentifiers = changeMap; } public String getID(String oldID) { String newID = _changedIdentifiers.get(oldID); return newID != null ? newID : oldID; } public String getOldID(String newID) { for (String oldID : _changedIdentifiers.keySet()) { if (_changedIdentifiers.get(oldID).equals(newID)) { return oldID; } } return newID; } // called when a user changes a taskID public void add(String oldID, String newID) { // need to handle the case where this id has been updated // more than once between saves _changedIdentifiers.put(getOldID(oldID), newID); } public void saveChanges() { if (! _changedIdentifiers.isEmpty()) { YSpecificationID specID = SpecificationModel.getHandler(). getSpecification().getSpecificationID(); try { if (WorkletClient.getInstance().updateRdrSetTaskIDs(specID, _changedIdentifiers)) { _changedIdentifiers.clear(); } } catch (IOException ignore) { // } } } }<|fim▁end|>
/**
<|file_name|>xc.rs<|end_file_name|><|fim▁begin|>pub struct Something { pub x: isize } pub trait A { fn f(&self) -> isize; fn g(&self) -> isize { 10 } fn h(&self) -> isize { 11 } fn lurr(x: &Self, y: &Self) -> isize { x.g() + y.h() } } impl A for isize { fn f(&self) -> isize { 10 } } impl A for Something { fn f(&self) -> isize { 10 } } pub trait B<T> { fn thing<U>(&self, x: T, y: U) -> (T, U) { (x, y) } fn staticthing<U>(_z: &Self, x: T, y: U) -> (T, U) { (x, y) } } impl<T> B<T> for isize { } impl B<f64> for bool { } pub trait TestEquality { fn test_eq(&self, rhs: &Self) -> bool;<|fim▁hole|> !self.test_eq(rhs) } } impl TestEquality for isize { fn test_eq(&self, rhs: &isize) -> bool { *self == *rhs } }<|fim▁end|>
fn test_neq(&self, rhs: &Self) -> bool {
<|file_name|>pwd_go15_plan9.go<|end_file_name|><|fim▁begin|>// Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build go1.5 // +build go1.5<|fim▁hole|>package plan9 import "syscall" func fixwd() { syscall.Fixwd() } func Getwd() (wd string, err error) { return syscall.Getwd() } func Chdir(path string) error { return syscall.Chdir(path) }<|fim▁end|>
<|file_name|>mainHandler.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- # @author victor li [email protected] # @date 2015/10/07 import baseHandler class MainHandler(baseHandler.RequestHandler): <|fim▁hole|> self.redirect('/posts/last')<|fim▁end|>
def get(self):
<|file_name|>ReportRowProvider.js<|end_file_name|><|fim▁begin|>function ReportRowProvider() { var that = this; var mXHR = new XHR(); var mFetchListeners = []; var mReportId = ""; var mCriteriaText = ""; this.addFetchListener = function(address) { mFetchListeners.push(address); }; this.getReportId = function() { return mReportId; }; this.setReportId = function(value) { mReportId = value; }; this.getCriteriaText = function() { return mCriteriaText; }; this.setCriteriaText = function(value) { mCriteriaText = value; }; function getAddress() {<|fim▁hole|> return result; }; this.startFetchingRows = function() { mXHR.beginSend("get", getAddress(), null, null, 60000, fetchHandler); }; function fetchHandler(status, responseText) { var response = null; if (status !== 200) response = { errorText: "ADS-B Radar Server 返回状态 " + status }; else { responseText = replaceDateConstructors(responseText); response = eval('(' + responseText + ')'); } for(var i = 0;i < mFetchListeners.length;++i) mFetchListeners[i](response); }; }<|fim▁end|>
var timeNow = new Date(); var result = "ReportRows.json?rep=" + mReportId; // time parameter no longer required, server now sends cache-control header if(mCriteriaText !== null && mCriteriaText.length > 0) result += mCriteriaText;
<|file_name|>Utilisateur.py<|end_file_name|><|fim▁begin|>### Type Utilisateur # username : string # password : string # import EnsUtilisateurs import EnsAdmins import EnsReservation import EnsEmprunt class Utilisateur: def __init__(self,user_id=None,username="",password="",abonnementValide=False,empruntEnCours = False,reservationEnCours = False,nbRetard = 0): self.user_id = user_id self.username = username self.password = password self.abonnementValide = abonnementValide self.empruntEnCours = empruntEnCours self.reservationEnCours = reservationEnCours self.nbRetard = nbRetard ##### GETTERS ######## def get_user_id(self): return self.user_id def get_username(self): return self.username def get_password(self): return self.password def get_abonnementValide(self): return self.abonnementValide def get_empruntEnCours(self): return self.empruntEnCours def get_reservationEnCours(self): return self.reservationEnCours def get_nbRetard(self): return self.nbRetard ##### SETTERS ##### def set_username(self, username): self.username = username self.save() def set_password(self, password): self.password = password self.save() def set_abonnementValide(self, Nbool): self.abonnementValide= Nbool self.save() def set_empruntEnCours(self,empruntEnCours): self.empruntEnCours = empruntEnCours self.save() def set_reservationEnCours(self, reservationEnCours): self.reservationEnCours = reservationEnCours self.save() def set_nbRetard(self, nbRetard): self.nbRetard = nbRetard self.save() def ajout_Retard(self,nbRetard): self.nbRetard = self.nbRetard + nbRetard self.save() <|fim▁hole|>##### FONCTIONS ANNEXES ##### def est_admin(self): return EnsAdmins.est_admin(self) def make_admin(self): if (self.user_id != 0): EnsAdmins.insert(self) else: print ("Pas d'utilisateur selectionne !") def remove_admin(self): if (self.user_id != 0 and self.est_admin()): EnsAdmins.delete_admin(self) def delete_user(self): """Supprime un utilisateur de l'ensemble des EnsUtilisateurs (#) Si cet utilisateur est admin => Supprime l'utilisateur de l'ensemble des admins """ if self.est_admin(): EnsAdmins.delete_admin(self) EnsUtilisateurs.delete_user(self) def peut_emprunter(self): return (not(EnsEmprunt.a_un_emprunt_en_cours(self)) and self.abonnementValide) def peut_reserver(self): return (not(EnsReservation.Reservation_EnCours(self)) and self.abonnementValide) def save(self): if self.user_id == None: EnsUtilisateurs.insert(self) else: EnsUtilisateurs.update(self) # SAVE AN USER IN DATABASE<|fim▁end|>
<|file_name|>Yahoo.java<|end_file_name|><|fim▁begin|>package interviews; public class Yahoo { } Run Length Encoding for byte array Input byte array [10, 10, 10, 255, 255, 0, 10] ==> output byte array [3, 10, 2, 255, 1, 0, 1, 10] Class ByteArrayEncodeDecode { public byte[] encodeByteArray(byte[] input) { int n = input.length; if (n == 0) return new byte[]; Arraylist<Byte> out = Arraylist<Byte>(); byte prevByte = input[0]; byte prevCount = 1; for (int i = 1; i < n; i++) { if(prevByte == input[i] && prevCount != 255){ // prevCount++; } else { out.add(prevCount); out.add(prevByte); prevByte = input[i]; prevCount = 1; } } out.add(prevCount);<|fim▁hole|> return out.toArray(); } public static void main() { } } // [] ==> [] // [1] ==> [1, 1] // [1, 1, 1, 2, 2, 3] ==> [3, 1, 2, 2, 1, 3] // [1 ... 300.....1] ==> [255, 1, 45, 1]<|fim▁end|>
out.add(prevByte);
<|file_name|>test_v2v_ansible.py<|end_file_name|><|fim▁begin|>import fauxfactory import pytest from cfme import test_requirements from cfme.fixtures.provider import rhel7_minimal from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import ONE_PER_TYPE from cfme.markers.env_markers.provider import ONE_PER_VERSION from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.appliance.implementations.ui import navigator from cfme.utils.conf import cfme_data from cfme.utils.conf import credentials from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [ test_requirements.ansible, pytest.mark.meta( server_roles=["+embedded_ansible"] ), pytest.mark.provider( classes=[RHEVMProvider], selector=ONE_PER_VERSION, required_flags=["v2v"], scope="module" ), pytest.mark.provider( classes=[VMwareProvider], selector=ONE_PER_TYPE, fixture_name="source_provider", required_flags=["v2v"], scope="module" ), ] def get_migrated_vm_obj(src_vm_obj, target_provider): """Returns migrated_vm obj from target_provider""" collection = target_provider.appliance.provider_based_collection(target_provider) migrated_vm = collection.instantiate(src_vm_obj.name, target_provider) return migrated_vm @pytest.fixture(scope="module") def ansible_repository(appliance): """Fixture to add ansible repository""" appliance.wait_for_embedded_ansible() repositories = appliance.collections.ansible_repositories try: repository = repositories.create( name=fauxfactory.gen_alpha(), url=cfme_data.ansible_links.playbook_repositories.v2v, description=fauxfactory.gen_alpha() ) except KeyError: pytest.skip("Skipping since no such key found in yaml") view = navigate_to(repository, "Details") wait_for(lambda: view.entities.summary("Properties").get_text_of("Status") == "successful", delay=10, timeout=60, fail_func=view.toolbar.refresh.click) yield repository if repository.exists: repository.delete() def catalog_item(request, appliance, machine_credential, ansible_repository, playbook_type): """Add provisioning and retire ansible catalog item""" cat_item = appliance.collections.catalog_items.create( catalog_item_class=appliance.collections.catalog_items.ANSIBLE_PLAYBOOK, name=fauxfactory.gen_alphanumeric(), description=fauxfactory.gen_alphanumeric(), provisioning={ "repository": ansible_repository.name, "playbook": "{}.yml".format(playbook_type), "machine_credential": machine_credential, "create_new": True, "provisioning_dialog_name": fauxfactory.gen_alphanumeric(), }, ) @request.addfinalizer def _cleanup(): if cat_item.exists: cat_item.delete() return cat_item @pytest.mark.parametrize( "form_data_vm_obj_single_datastore", [["nfs", "nfs", rhel7_minimal]], indirect=True ) def test_migration_playbooks(request, appliance, v2v_providers, host_creds, conversion_tags, ansible_repository, form_data_vm_obj_single_datastore): """Test for migrating vms with pre and post playbooks""" creds = credentials[v2v_providers.vmware_provider.data.templates.get("rhel7_minimal").creds] CREDENTIALS = ( "Machine", { "username": creds.username, "password": creds.password, "privilage_escalation": "sudo", }, ) credential = appliance.collections.ansible_credentials.create( name="{type}_credential_{cred}".format(type=CREDENTIALS[0], cred=fauxfactory.gen_alpha()), credential_type=CREDENTIALS[0], **CREDENTIALS[1] ) provision_catalog = catalog_item( request, appliance, credential.name, ansible_repository, "provision" ) retire_catalog = catalog_item( request, appliance, credential.name, ansible_repository, "retire" ) infrastructure_mapping_collection = appliance.collections.v2v_mappings mapping = infrastructure_mapping_collection.create( form_data_vm_obj_single_datastore.form_data ) @request.addfinalizer def _cleanup(): infrastructure_mapping_collection.delete(mapping) # vm_obj is a list, with only 1 VM object, hence [0] src_vm_obj = form_data_vm_obj_single_datastore.vm_list[0] migration_plan_collection = appliance.collections.v2v_plans migration_plan = migration_plan_collection.create( name="plan_{}".format(fauxfactory.gen_alphanumeric()), description="desc_{}".format(fauxfactory.gen_alphanumeric()), infra_map=mapping.name, vm_list=form_data_vm_obj_single_datastore.vm_list, start_migration=True, pre_playbook=provision_catalog.name,<|fim▁hole|> view = appliance.browser.create_view( navigator.get_class(migration_plan_collection, "All").VIEW.pick() ) wait_for( func=view.progress_card.is_plan_started, func_args=[migration_plan.name], message="migration plan is starting, be patient please", delay=5, num_sec=280, handle_exception=True, fail_cond=False ) # wait until plan is in progress wait_for( func=view.plan_in_progress, func_args=[migration_plan.name], message="migration plan is in progress, be patient please", delay=15, num_sec=3600, ) view.switch_to("Completed Plans") view.wait_displayed() migration_plan_collection.find_completed_plan(migration_plan) logger.info( "For plan %s, migration status after completion: %s, total time elapsed: %s", migration_plan.name, view.migration_plans_completed_list.get_vm_count_in_plan(migration_plan.name), view.migration_plans_completed_list.get_clock(migration_plan.name), ) # validate MAC address matches between source and target VMs assert view.migration_plans_completed_list.is_plan_succeeded(migration_plan.name) migrated_vm = get_migrated_vm_obj(src_vm_obj, v2v_providers.rhv_provider) assert src_vm_obj.mac_address == migrated_vm.mac_address<|fim▁end|>
post_playbook=retire_catalog.name, ) # explicit wait for spinner of in-progress status card
<|file_name|>Fridge.py<|end_file_name|><|fim▁begin|>import RPi.GPIO as GPIO import datetime import time import pandas as pd import logging import logging.handlers import sys logger = logging.getLogger('fridge') handler = logging.StreamHandler() fHandler = logging.FileHandler('fridge.log') formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s", "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) fHandler.setFormatter(formatter) logger.addHandler(handler) logger.addHandler(fHandler) logger.setLevel(logging.DEBUG) logging.captureWarnings(True) dataLog = logging.getLogger('fridge.data') dataFormatter = logging.Formatter("%(asctime)s, %(message)s", "%Y-%m-%d %H:%M:%S") dataFileName = 'fridge-' + str(datetime.datetime.now()) + '.data' dataHandler = logging.handlers.RotatingFileHandler(dataFileName, mode='w', maxBytes=10000, backupCount=2) dataHandler.setFormatter(dataFormatter) dataLog.addHandler(dataHandler) dataLog.setLevel(logging.INFO) class Fridge: def __init__(self, heaterGpio, coolerGpio, ambientTempSensorRomCode): self.initGpio(heaterGpio, coolerGpio) self.heater = TemperatureElement(heaterGpio, name='heater') self.cooler = TemperatureElement(coolerGpio, name='cooler') self.ambientTempSensor = DS18B20(ambientTempSensorRomCode, name='TempSens') self.resultPeriod = datetime.timedelta(minutes=10) self.maxResults = 1000 self.lastResultTime = None self.resultTime = datetime.datetime.now() self.resultsFile = 'results.txt' fo = open(self.resultsFile, 'w') fo.close() def initGpio(self, heaterGpioPin, coolerGpioPin): GPIO.setmode(GPIO.BCM) GPIO.setup(heaterGpioPin, GPIO.OUT) GPIO.setup(coolerGpioPin, GPIO.OUT) def updateResultsLog(self, dataFile): if datetime.datetime.now() >= self.resultTime: now = datetime.datetime.now() names = ['date', 'set', 'meas', 'heater', 'cooler'] d = pd.read_csv(dataFile, names=names) d['date'] = pd.to_datetime(d['date']) d['error'] = d.meas - d.set d['absError'] = d['error'].abs() if self.lastResultTime == None: dt = d else: start = self.lastResultTime end = self.resultTime mask = (d['date'] > start) & (d['date'] <= end) dt = d.loc[mask] mean = dt.meas.mean() maxErr = dt.error.max() minErr = dt.error.min() meanErr = dt.error.mean() meanAbsErr = dt.absError.mean() set = d['set'].iloc[-1] names = ['date', 'set', 'mean', 'maxErr', 'minErr', 'meanErr', 'meanAbsErr'] d_r = pd.read_csv(self.resultsFile, names=names) try: fi = open(self.resultsFile, 'r') resBefore = fi.read() resBefore = resBefore.split('\n') fi.close() except: whatever = 1000 fo = open(self.resultsFile, 'w') fo.write('{:11s}'.format('Date')) fo.write('{:9s}'.format('Time')) fo.write('{:5s}'.format('set')) fo.write('{:5s}'.format('mean')) fo.write('{:5s}'.format('maxE')) fo.write('{:5s}'.format('minE')) fo.write('{:6s}'.format('meanE')) fo.write('{:9s}'.format('meanAbsE') + '\n') fo.write( self.resultTime.strftime('%Y-%m-%d %H:%M:%S') + ' ' + '{:4.1f}'.format(set) + ' ' + '{:4.1f}'.format(mean) + ' ' + '{:4.1f}'.format(maxErr) + ' ' + '{:4.1f}'.format(minErr) + ' ' + '{:5.1f}'.format(meanErr) + ' ' + '{:8.1f}'.format(meanAbsErr) + '\n' ) if len(resBefore) >= 2: for i in xrange(1, len(resBefore)-1, 1): fo.write(resBefore[i] + '\n') if i > self.maxResults: break fo.close() self.lastResultTime = self.resultTime self.resultTime = now + self.resultPeriod class TemperatureElement: def __init__(self, bcmGpioNum, name='Name'): self.name = name self.gpioPin = bcmGpioNum self.on = None self.lastOnTime = None self.minOnTime = datetime.timedelta(minutes=1) self.minOffTime = datetime.timedelta(minutes=3) try: GPIO.output(self.gpioPin, False) self.lastOffTime = datetime.datetime.now() except: logger.error('Failed to switch off in temp el init') raise def isOn(self): if(GPIO.input(self.gpioPin)): return True else: return False def status(self): if(GPIO.input(self.gpioPin)): try: onFor = str(datetime.datetime.now()-self.lastOnTime).split('.')[0] except: onFor = 'No Last On Time' logger.debug(self.name + " been ON for " + onFor) return self.name + " ON for " + onFor else: try: offFor = str(datetime.datetime.now()-self.lastOffTime).split('.')[0] except: offFor = 'No Last Off Time' logger.debug(self.name +" been OFF for " + offFor) return self.name +" OFF for " + offFor def turnOff(self): now = datetime.datetime.now() switchOff = False #if not been on/off yet then can switch off if self.on == None: switchOff = True #if not been on yet, and not currently off then can switch off elif self.lastOnTime == None and self.on != False: switchOff = True #if on, and have been on for at least minOnTime then can switch off elif self.on == True: if (now - self.lastOnTime) > self.minOnTime: switchOff = True else: logger.debug(self.name + ' Unable to switch off. Min On Time not met' ) elif self.on == False: switchOff = False # Already off else: logger.debug(self.name + ' Unable to switch off. Valid condition not found.' ) #Switch on if have decided to if switchOff == True: try: GPIO.output(self.gpioPin, False) self.lastOffTime = now self.on = False logger.debug(self.name + ' Switched Off Return 1' ) return 1 except: logger.debug(self.name + ' Exception Return -1' ) raise return -1 else: logger.debug(self.name + ' No Change Return 0.' ) return 0 def turnOn(self): now = datetime.datetime.now() switchOn = False #if not been on/off yet then can switch on if self.on == None: switchOn = True #if not been off yet, and not currently on then can switch on elif self.lastOffTime == None and self.on != True: switchOn = True #if off, and have been off for at least minOffTime then can switch on elif self.on == False: if (now - self.lastOffTime) > self.minOffTime:<|fim▁hole|> switchOn = False # Already off else: logger.debug(self.name + ' Unable to switch on. Valid condition not found.' ) #Switch on if have decided to if switchOn == True: try: GPIO.output(self.gpioPin, True) self.lastOnTime = now self.on = True logger.debug(self.name + ' Switched On Return 1' ) return 1 except: logger.debug(self.name + ' Exception Return -1' ) raise return -1 else: logger.debug(self.name + ' No Change Return 0' ) return 0 class DS18B20: def __init__(self, romCode, name='Name'): self.name = name self.romCode = romCode def getTemp(self): tempFile = open('/sys/bus/w1/devices/' + self.romCode + '/w1_slave') tempText = tempFile.read() tempFile.close() tempData = tempText.split("\n")[1].split(" ")[9] temp = float(tempData[2:]) / 1000 logger.debug(self.name + ' ' + str(temp)) return temp heaterGpio = 6 coolerGpio = 5 tempSensRomCode='28-0316027c72ff' fridge = Fridge(heaterGpio, coolerGpio, tempSensRomCode) fridge.heater.minOffTime=datetime.timedelta(seconds=1) fridge.heater.minOnTime=datetime.timedelta(seconds=1) fridge.cooler.minOffTime=datetime.timedelta(minutes=3) fridge.cooler.minOnTime=datetime.timedelta(minutes=1) fridge.ambientTempSensor.getTemp() samplePeriod = datetime.timedelta(seconds=10) setTemp = 21 heaterOnHyst = 0.2 #Amount below set temp that heater is asked to switch on at heaterOffHyst = 0.1 #Amount below set temp that heater is asked to switch off at coolerOnHyst = 1.5 #Amount above set temp that cooler is asked to switch on at coolerOffHyst = 1 #Amount above set temp that cooler is asked to switch off at i=0 while True: try: i=i+1 loopStartTime = datetime.datetime.now() temp = fridge.ambientTempSensor.getTemp() logger.debug('i=' + str(i) + ' Error=' + str(temp-setTemp) + ' Temp=' + str(temp) + ' Set temp=' + str(setTemp)) temp = fridge.ambientTempSensor.getTemp() fridge.heater.status() fridge.cooler.status() #Heater decision #If heater not on and temp is below set - heaterOnHyst then try to switch on if not fridge.heater.isOn(): if temp < (setTemp - heaterOnHyst): fridge.heater.turnOn() #If heater is on and temp above setTemp - heaetr OffHyst then try to switch off if fridge.heater.isOn(): if temp > (setTemp - heaterOffHyst): fridge.heater.turnOff() #Cooler decision #If cooler not on and temp is above set + coolerOnHyst then try to switch cooler on if not fridge.cooler.isOn(): if temp > (setTemp + coolerOnHyst): fridge.cooler.turnOn() #If cooler is on and temp below setTemp + coolerOffHyst then try to switch off if fridge.cooler.isOn(): if temp < (setTemp + coolerOffHyst): fridge.cooler.turnOff() dataLog.info('{}'.format(setTemp) + ', ' + '{}'.format(temp) + ', ' + str(fridge.heater.isOn()) + ', ' + '{}'.format(fridge.cooler.isOn()) ) fridge.updateResultsLog(dataFileName) while datetime.datetime.now() < (loopStartTime + samplePeriod): doNothing = 1 except KeyboardInterrupt: logger.info('Ctrl-c Exit.') fridge.heater.turnOff() fridge.cooler.turnOff() sys.exit()<|fim▁end|>
switchOn = True else: logger.debug(self.name + ' Unable to switch on. Min Off Time not met' ) elif self.on == True:
<|file_name|>test_base_kanban_abstract.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright 2016 LasLabs Inc. # License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html). from odoo import models from odoo.tests.common import SavepointCase class BaseKanbanAbstractTester(models.TransientModel): _name = 'base.kanban.abstract.tester' _inherit = 'base.kanban.abstract' class TestBaseKanbanAbstract(SavepointCase): @classmethod def _init_test_model(cls, model_cls): """ It builds a model from model_cls in order to test abstract models. Note that this does not actually create a table in the database, so there may be some unidentified edge cases. Args: model_cls (openerp.models.BaseModel): Class of model to initialize Returns: model_cls: Instance """ registry = cls.env.registry cr = cls.env.cr inst = model_cls._build_model(registry, cr) model = cls.env[model_cls._name].with_context(todo=[]) model._prepare_setup() model._setup_base(partial=False) model._setup_fields(partial=False) model._setup_complete() model._auto_init() model.init() model._auto_end() cls.test_model_record = cls.env['ir.model'].search([<|fim▁hole|> ('name', '=', model._name), ]) return inst @classmethod def setUpClass(cls): super(TestBaseKanbanAbstract, cls).setUpClass() cls.env.registry.enter_test_mode() cls._init_test_model(BaseKanbanAbstractTester) cls.test_model = cls.env[BaseKanbanAbstractTester._name] @classmethod def tearDownClass(cls): cls.env.registry.leave_test_mode() super(TestBaseKanbanAbstract, cls).tearDownClass() def setUp(self): super(TestBaseKanbanAbstract, self).setUp() test_stage_1 = self.env['base.kanban.stage'].create({ 'name': 'Test Stage 1', 'res_model_id': self.test_model_record.id, }) test_stage_2 = self.env['base.kanban.stage'].create({ 'name': 'Test Stage 2', 'res_model_id': self.test_model_record.id, 'fold': True, }) self.id_1 = test_stage_1.id self.id_2 = test_stage_2.id def test_read_group_stage_ids(self): """It should return the correct recordset. """ self.assertEqual( self.test_model._read_group_stage_ids( self.env['base.kanban.stage'], [], 'id', ), self.env['base.kanban.stage'].search([], order='id'), ) def test_default_stage_id(self): """ It should return an empty RecordSet """ self.assertEqual( self.env['base.kanban.abstract']._default_stage_id(), self.env['base.kanban.stage'] )<|fim▁end|>
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>extern crate scoped_pool; #[macro_use] extern crate log; extern crate ansi_term; extern crate env_logger; extern crate pbr; extern crate serde_derive; extern crate term_painter; #[macro_use] extern crate clap; extern crate anyhow; extern crate glob; extern crate num_cpus; extern crate regex; extern crate toml; extern crate unic_char_range; extern crate walkdir; use args::Args; mod app; mod args; mod check; mod clean; mod config; mod search; use pbr::ProgressBar; use std::{ fs::File, io::prelude::*, num, process, sync::{ mpsc::{sync_channel, SyncSender}, Arc, }, thread, }; macro_rules! eprintln { ($($tt:tt)*) => {{ use std::io::Write; let _ = writeln!(&mut ::std::io::stderr(), $($tt)*); }} } #[allow(dead_code)] fn main() { match Args::parse().map(Arc::new).and_then(run) { Ok(0) => process::exit(0), Ok(_) => process::exit(1), Err(err) => { eprintln!("{}", err); process::exit(1); } } } fn run(args: Arc<Args>) -> Result<u64, num::ParseIntError> { let enforcer_cfg = config::get_cfg(args.config_file()); if args.status() { println!(" using this config: {:?}", enforcer_cfg); std::process::exit(0); } let cfg_ignores: &Vec<String> = &enforcer_cfg.ignore; let cfg_endings = enforcer_cfg.endings; let file_endings = if !args.endings().is_empty() { args.endings() } else { &cfg_endings }; let mut checked_files: u32 = 0; let mut had_tabs: u32 = 0; let mut had_trailing_ws: u32 = 0; let mut had_illegals: u32 = 0; let mut had_too_long_lines: u32 = 0; let mut had_win_line_endings: u32 = 0; let clean_f = args.clean(); let tabs_f = args.tabs(); let use_crlf = args.use_crlf(); let thread_count = args.threads(); let color_f = args.color(); let max_line_length = args.line_length(); let start_dir = args.path(); debug!("args:{:?}", args); if args.quiet() { println!("quiet flag was used but is deprecated...use verbosity instead"); } let info_level: check::InfoLevel = args.info_level(); let paths = search::find_matches(start_dir.as_path(), cfg_ignores, file_endings); let count: u64 = paths.len() as u64; let mut pb = ProgressBar::new(count); // logger thread let (logging_tx, logging_rx) = sync_channel::<Option<String>>(0); let stop_logging_tx = logging_tx.clone(); thread::spawn(move || { let mut done = false; while !done { done = logging_rx .recv() .ok() // not done when we got a receive error (sender end of connection closed) .map_or(false, |maybe_print| { maybe_print // a None indicates that logging is done .map_or(true, |p| // just print the string we received {print!("{}", p); false}) }); } }); let (w_chan, r_chan) = sync_channel(thread_count); thread::spawn(move || { use scoped_pool::Pool; let pool = Pool::new(thread_count); pool.scoped(|scope| { for path in paths { let ch: SyncSender<Result<u8, std::io::Error>> = w_chan.clone(); let l_ch: SyncSender<Option<String>> = logging_tx.clone(); scope.execute(move || { if !check::is_dir(path.as_path()) { let p = path.clone(); let mut f = File::open(path) .unwrap_or_else(|_| panic!("error reading file {:?}", p)); let mut buffer = Vec::new(); f.read_to_end(&mut buffer) .unwrap_or_else(|_| panic!("error reading file {:?}", p)); let r = check::check_path( p.as_path(), &buffer, clean_f, info_level, max_line_length, if tabs_f { clean::TabStrategy::Tabify } else { clean::TabStrategy::Untabify }, if use_crlf { clean::LineEnding::CRLF } else { clean::LineEnding::LF }, l_ch, ); ch.send(r).expect("send result with SyncSender"); } }); } }); }); for _ in 0..count { match r_chan.recv() { Ok(res) => match res { Ok(r) => { if (r & check::HAS_TABS) > 0 { had_tabs += 1 } if (r & check::TRAILING_SPACES) > 0 { had_trailing_ws += 1 } if (r & check::HAS_ILLEGAL_CHARACTERS) > 0 { had_illegals += 1 } if (r & check::LINE_TOO_LONG) > 0 { had_too_long_lines += 1 } if (r & check::HAS_WINDOWS_LINE_ENDINGS) > 0 { had_win_line_endings += 1 } } Err(e) => { error!("error occured here: {}", e); } }, Err(e) => { panic!("error in channel: {}", e); } } checked_files += 1; if info_level == check::InfoLevel::Quiet { pb.inc(); } } if info_level == check::InfoLevel::Quiet { pb.finish(); }; let _ = stop_logging_tx.send(None); let findings = Findings {<|fim▁hole|> had_trailing_ws, had_illegals, had_too_long_lines, had_win_line_endings, checked_files, }; report_findings(info_level == check::InfoLevel::Quiet, findings, color_f) } #[derive(Debug)] struct Findings { had_tabs: u32, had_trailing_ws: u32, had_illegals: u32, had_too_long_lines: u32, had_win_line_endings: u32, checked_files: u32, } fn report_findings( quiet: bool, findings: Findings, colored: bool, ) -> Result<u64, num::ParseIntError> { let total_errors = findings.had_tabs + findings.had_illegals + findings.had_trailing_ws + findings.had_too_long_lines + findings.had_win_line_endings; if quiet { if colored { println!("{}: {}", check::bold("enforcer-error-count"), total_errors); } else { println!("enforcer-error-count: {}", total_errors); } } if total_errors > 0 { if colored { println!( "checked {} files {}", findings.checked_files, check::bold("(enforcer_errors!)") ); } else { println!( "checked {} files (enforcer_errors!)", findings.checked_files ); } if findings.had_tabs > 0 { println!(" [with TABS:{}]", findings.had_tabs) } if findings.had_illegals > 0 { println!(" [with ILLEGAL CHARS:{}]", findings.had_illegals) } if findings.had_trailing_ws > 0 { println!(" [with TRAILING SPACES:{}]", findings.had_trailing_ws) } if findings.had_too_long_lines > 0 { println!(" [with TOO LONG LINES:{}]", findings.had_too_long_lines) } if findings.had_win_line_endings > 0 { println!( " [with WINDOWS LINE ENDINGS:{}]", findings.had_win_line_endings ) } Ok(1) } else { if colored { println!( "checked {} files {}", findings.checked_files, check::green("(enforcer_clean!)") ); } else { println!("checked {} files (enforcer_clean!)", findings.checked_files); } Ok(0) } }<|fim▁end|>
had_tabs,
<|file_name|>vulkano_gralloc.rs<|end_file_name|><|fim▁begin|>// Copyright 2021 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! vulkano_gralloc: Implements swapchain allocation and memory mapping //! using Vulkano. //! //! External code found at https://github.com/vulkano-rs/vulkano. #![cfg(feature = "vulkano")] use std::collections::BTreeMap as Map; use std::convert::TryInto; use std::iter::Empty; use std::sync::Arc; use base::MappedRegion; use crate::rutabaga_gralloc::gralloc::{Gralloc, ImageAllocationInfo, ImageMemoryRequirements}; use crate::rutabaga_utils::*; use vulkano::device::physical::{MemoryType, PhysicalDevice, PhysicalDeviceType}; use vulkano::device::{Device, DeviceCreationError, DeviceExtensions}; use vulkano::image::{ sys, ImageCreateFlags, ImageCreationError, ImageDimensions, ImageUsage, SampleCount, }; use vulkano::instance::{Instance, InstanceCreationError, InstanceExtensions, Version}; use vulkano::memory::{ DedicatedAlloc, DeviceMemoryAllocError, DeviceMemoryBuilder, DeviceMemoryMapping, ExternalMemoryHandleType, MemoryRequirements, }; use vulkano::memory::pool::AllocFromRequirementsFilter; use vulkano::sync::Sharing; /// A gralloc implementation capable of allocation `VkDeviceMemory`. pub struct VulkanoGralloc { devices: Map<PhysicalDeviceType, Arc<Device>>, has_integrated_gpu: bool, } struct VulkanoMapping { mapping: DeviceMemoryMapping, size: usize, } impl VulkanoMapping { pub fn new(mapping: DeviceMemoryMapping, size: usize) -> VulkanoMapping { VulkanoMapping { mapping, size } } } unsafe impl MappedRegion for VulkanoMapping { /// Used for passing this region for hypervisor memory mappings. We trust crosvm to use this /// safely. fn as_ptr(&self) -> *mut u8 { unsafe { self.mapping.as_ptr() } } /// Returns the size of the memory region in bytes. fn size(&self) -> usize { self.size } } impl VulkanoGralloc { /// Returns a new `VulkanGralloc' instance upon success. pub fn init() -> RutabagaResult<Box<dyn Gralloc>> { // Initialization copied from triangle.rs in Vulkano. Look there for a more detailed // explanation of VK initialization. let instance_extensions = InstanceExtensions { khr_external_memory_capabilities: true, khr_get_physical_device_properties2: true, ..InstanceExtensions::none() }; let instance = Instance::new(None, Version::V1_1, &instance_extensions, None)?; let mut devices: Map<PhysicalDeviceType, Arc<Device>> = Default::default(); let mut has_integrated_gpu = false; for physical in PhysicalDevice::enumerate(&instance) { let queue_family = physical .queue_families() .find(|&q| { // We take the first queue family that supports graphics. q.supports_graphics() }) .ok_or(RutabagaError::SpecViolation( "need graphics queue family to proceed", ))?; let supported_extensions = physical.supported_extensions(); let desired_extensions = DeviceExtensions { khr_dedicated_allocation: true, khr_get_memory_requirements2: true, khr_external_memory: true, khr_external_memory_fd: true, ext_external_memory_dma_buf: true, ..DeviceExtensions::none() }; let intersection = supported_extensions.intersection(&desired_extensions); if let Ok(device, mut _queues) = Device::new( physical, physical.supported_features(), &intersection, [(queue_family, 0.5)].iter().cloned(), ) { let device_type = device.physical_device().properties().device_type; if device_type == PhysicalDeviceType::IntegratedGpu { has_integrated_gpu = true } // If we have two devices of the same type (two integrated GPUs), the old value is // dropped. Vulkano is verbose enough such that a keener selection algorithm may // be used, but the need for such complexity does not seem to exist now. devices.insert(device_type, device); }; } if devices.is_empty() { return Err(RutabagaError::SpecViolation( "no matching VK devices available", )); } Ok(Box::new(VulkanoGralloc { devices, has_integrated_gpu, })) } // This function is used safely in this module because gralloc does not: // // (1) bind images to any memory. // (2) transition the layout of images. // (3) transfer ownership of images between queues. // // In addition, we trust Vulkano to validate image parameters are within the Vulkan spec. unsafe fn create_image( &mut self, info: ImageAllocationInfo, ) -> RutabagaResult<(sys::UnsafeImage, MemoryRequirements)> { let device = if self.has_integrated_gpu { self.devices .get(&PhysicalDeviceType::IntegratedGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? } else { self.devices .get(&PhysicalDeviceType::DiscreteGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? }; let usage = match info.flags.uses_rendering() { true => ImageUsage { color_attachment: true, ..ImageUsage::none() }, false => ImageUsage { sampled: true, ..ImageUsage::none() }, }; // Reasonable bounds on image width. if info.width == 0 || info.width > 4096 { return Err(RutabagaError::InvalidGrallocDimensions); } // Reasonable bounds on image height. if info.height == 0 || info.height > 4096 { return Err(RutabagaError::InvalidGrallocDimensions); } let vulkan_format = info.drm_format.vulkan_format()?; let (unsafe_image, memory_requirements) = sys::UnsafeImage::new( device.clone(), usage, vulkan_format, ImageCreateFlags::none(), ImageDimensions::Dim2d { width: info.width, height: info.height, array_layers: 1, }, SampleCount::Sample1, 1, /* mipmap count */ Sharing::Exclusive::<Empty<_>>, true, /* linear images only currently */ false, /* not preinitialized */ )?; Ok((unsafe_image, memory_requirements)) } } impl Gralloc for VulkanoGralloc { fn supports_external_gpu_memory(&self) -> bool { for device in self.devices.values() { if !device.enabled_extensions().khr_external_memory { return false; } } true } fn supports_dmabuf(&self) -> bool { for device in self.devices.values() { if !device.enabled_extensions().ext_external_memory_dma_buf { return false; } } true } fn get_image_memory_requirements( &mut self, info: ImageAllocationInfo, ) -> RutabagaResult<ImageMemoryRequirements> { let mut reqs: ImageMemoryRequirements = Default::default(); let (unsafe_image, memory_requirements) = unsafe { self.create_image(info)? }; let device = if self.has_integrated_gpu { self.devices .get(&PhysicalDeviceType::IntegratedGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? } else { self.devices .get(&PhysicalDeviceType::DiscreteGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? }; let planar_layout = info.drm_format.planar_layout()?; // Safe because we created the image with the linear bit set and verified the format is // not a depth or stencil format. We are also using the correct image aspect. Vulkano // will panic if we are not. for plane in 0..planar_layout.num_planes { let aspect = info.drm_format.vulkan_image_aspect(plane)?; let layout = unsafe { unsafe_image.multiplane_color_layout(aspect) }; reqs.strides[plane] = layout.row_pitch as u32; reqs.offsets[plane] = layout.offset as u32; } let need_visible = info.flags.host_visible(); let want_cached = info.flags.host_cached(); let memory_type = { let filter = |current_type: MemoryType| { if need_visible && !current_type.is_host_visible() { return AllocFromRequirementsFilter::Forbidden; } if !need_visible && current_type.is_device_local() { return AllocFromRequirementsFilter::Preferred; } if need_visible && want_cached && current_type.is_host_cached() { return AllocFromRequirementsFilter::Preferred; } if need_visible && !want_cached && current_type.is_host_coherent() && !current_type.is_host_cached() { return AllocFromRequirementsFilter::Preferred; } AllocFromRequirementsFilter::Allowed }; let first_loop = device .physical_device() .memory_types() .map(|t| (t, AllocFromRequirementsFilter::Preferred)); let second_loop = device .physical_device() .memory_types() .map(|t| (t, AllocFromRequirementsFilter::Allowed)); first_loop .chain(second_loop) .filter(|&(t, _)| (memory_requirements.memory_type_bits & (1 << t.id())) != 0) .find(|&(t, rq)| filter(t) == rq) .ok_or(RutabagaError::SpecViolation( "unable to find required memory type", ))? .0 }; reqs.info = info; reqs.size = memory_requirements.size as u64; if memory_type.is_host_visible() { if memory_type.is_host_cached() { reqs.map_info = RUTABAGA_MAP_CACHE_CACHED; } else if memory_type.is_host_coherent() { reqs.map_info = RUTABAGA_MAP_CACHE_WC; } } reqs.vulkan_info = Some(VulkanInfo { memory_idx: memory_type.id() as u32, physical_device_idx: device.physical_device().index() as u32, }); Ok(reqs) } fn allocate_memory(&mut self, reqs: ImageMemoryRequirements) -> RutabagaResult<RutabagaHandle> { let (unsafe_image, memory_requirements) = unsafe { self.create_image(reqs.info)? }; let vulkan_info = reqs.vulkan_info.ok_or(RutabagaError::InvalidVulkanInfo)?; let device = if self.has_integrated_gpu { self.devices .get(&PhysicalDeviceType::IntegratedGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? } else { self.devices .get(&PhysicalDeviceType::DiscreteGpu) .ok_or(RutabagaError::InvalidGrallocGpuType)? }; let memory_type = device .physical_device() .memory_type_by_id(vulkan_info.memory_idx) .ok_or(RutabagaError::InvalidVulkanInfo)?; let (handle_type, rutabaga_type) = match device.enabled_extensions().ext_external_memory_dma_buf { true => ( ExternalMemoryHandleType { dma_buf: true, ..ExternalMemoryHandleType::none() }, RUTABAGA_MEM_HANDLE_TYPE_DMABUF, ), false => ( ExternalMemoryHandleType { opaque_fd: true, ..ExternalMemoryHandleType::none() }, RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD, ), }; let dedicated = match device.enabled_extensions().khr_dedicated_allocation { true => { if memory_requirements.prefer_dedicated { DedicatedAlloc::Image(&unsafe_image) } else { DedicatedAlloc::None } } false => DedicatedAlloc::None, }; let device_memory = DeviceMemoryBuilder::new(device.clone(), memory_type.id(), reqs.size) .dedicated_info(dedicated) .export_info(handle_type) .build()?; let descriptor = device_memory.export_fd(handle_type)?.into(); Ok(RutabagaHandle { os_handle: descriptor, handle_type: rutabaga_type, }) } /// Implementations must map the memory associated with the `resource_id` upon success. fn import_and_map(<|fim▁hole|> &mut self, handle: RutabagaHandle, vulkan_info: VulkanInfo, size: u64, ) -> RutabagaResult<Box<dyn MappedRegion>> { let device = self .devices .values() .find(|device| { device.physical_device().index() as u32 == vulkan_info.physical_device_idx }) .ok_or(RutabagaError::InvalidVulkanInfo)?; let handle_type = match handle.handle_type { RUTABAGA_MEM_HANDLE_TYPE_DMABUF => ExternalMemoryHandleType { dma_buf: true, ..ExternalMemoryHandleType::none() }, RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD => ExternalMemoryHandleType { opaque_fd: true, ..ExternalMemoryHandleType::none() }, _ => return Err(RutabagaError::InvalidRutabagaHandle), }; let device_memory = DeviceMemoryBuilder::new(device.clone(), vulkan_info.memory_idx, size) .import_info(handle.os_handle.into(), handle_type) .build()?; let mapping = DeviceMemoryMapping::new(device.clone(), device_memory.clone(), 0, size, 0)?; Ok(Box::new(VulkanoMapping::new(mapping, size.try_into()?))) } } // Vulkano should really define an universal type that wraps all these errors, say // "VulkanoError(e)". impl From<InstanceCreationError> for RutabagaError { fn from(e: InstanceCreationError) -> RutabagaError { RutabagaError::VkInstanceCreationError(e) } } impl From<ImageCreationError> for RutabagaError { fn from(e: ImageCreationError) -> RutabagaError { RutabagaError::VkImageCreationError(e) } } impl From<DeviceCreationError> for RutabagaError { fn from(e: DeviceCreationError) -> RutabagaError { RutabagaError::VkDeviceCreationError(e) } } impl From<DeviceMemoryAllocError> for RutabagaError { fn from(e: DeviceMemoryAllocError) -> RutabagaError { RutabagaError::VkDeviceMemoryAllocError(e) } }<|fim▁end|>
<|file_name|>0002_auto_20160213_1225.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('orders', '0001_initial'), ] operations = [ migrations.AlterField( model_name='order', name='paid',<|fim▁hole|> ), ]<|fim▁end|>
field=models.BooleanField(default=False),
<|file_name|>xid.py<|end_file_name|><|fim▁begin|># Copyright (C) 2008-2009 Mark A. Matienzo # # This file is part of worldcat, the Python WorldCat API module. # # worldcat is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # worldcat is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with worldcat. If not, see <http://www.gnu.org/licenses/>. """ worldcat/request/xid.py -- Request objects for xID APIs xID APIs as of this writing include xISBN, xISSN, and xOCLCNUM. 'Alternate request formats' (such as OpenURL and unAPI) have not been implemented. """ from worldcat.exceptions import EmptyRecordNumberError from worldcat.request import WorldCatRequest from worldcat.response.xid import xIDResponse class xIDRequest(WorldCatRequest): """request.xid.xIDRequest: Base class for requests from xID APIs. All xIDRequests require a record number ('rec_num') to be passed when a class is instantiated. Depending on the request, this will either be an ISBN, an ISSN, or an OCLC record number. xIDRequests by default have their 'method' set as 'getEditions' and their response format set as 'python'. """ def __init__(self, rec_num=None, **kwargs): """Constructor for xIDRequests.""" if 'method' not in kwargs: kwargs['method'] = 'getEditions' if 'format' not in kwargs: kwargs['format'] = 'python' if 'fl' not in kwargs: kwargs['fl'] = '*' WorldCatRequest.__init__(self, **kwargs) self.rec_num = rec_num def get_response(self): self.http_get() return xIDResponse(self) def subclass_validator(self, quiet=False): """Validator method for xIDRequests. Does not validate ISSN or ISBN values; this should be handled by the xID APIs. """ if self.rec_num == None: if quiet == True: return False else: raise EmptyRecordNumberError else: return True class xISSNRequest(xIDRequest): """request.xid.xISSNRequest: Class for xISSN requests For more information on the xISSN API, see <http://xissn.worldcat.org/xissnadmin/doc/api.htm>. Example of an xISSNRequest: >>> from worldcat.request.xid import xISSNRequest >>> x = xISSNRequest(rec_num='1895-104X') >>> x.validate() >>> r = x.get_response() """ def __init__(self, rec_num=None, **kwargs): """Constructor method for xISSNRequests.""" xIDRequest.__init__(self, rec_num, **kwargs) self._validators = { 'method': ('getForms', 'getHistory', 'fixChecksum', 'getMetadata', 'getEditions'), 'format': ('xml', 'html', 'json', 'python', 'ruby', 'text', 'csv', 'php')} def api_url(self): self.url = 'http://xissn.worldcat.org/webservices/xid/issn/%s' \ % self.rec_num class xISBNRequest(xIDRequest): """request.xid.xISBNRequest: Class for xISBN requests """ def __init__(self, rec_num=None, **kwargs): """Constructor method for xISBNRequests.""" xIDRequest.__init__(self, rec_num, **kwargs) self._validators = { 'method': ('to10', 'to13', 'fixChecksum', 'getMetadata', 'getEditions', 'hyphen'), 'format': ('xml', 'html', 'json', 'python', 'ruby', 'txt', 'csv', 'php')} def api_url(self): self.url = 'http://xisbn.worldcat.org/webservices/xid/isbn/%s' \ % self.rec_num class xOCLCNUMRequest(xIDRequest): """request.xid.xOCLCNUMRequest: Class for xOCLCNUM requests This now replaces the old xOCLCNUMRequest class in worldcat >= 0.3.1. xOCLCNUMRequest now takes a 'type' argument; one of "oclcnum", "lccn", or "owi", for OCLC record numbers, Library of Congress Catalog Numbers, or OCLC Work Identifiers.<|fim▁hole|> """ def __init__(self, rec_num=None, numtype='oclcnum', **kwargs): """Constructor method for xISBNRequests.""" xIDRequest.__init__(self, rec_num, **kwargs) self.numtype = numtype self._validators = { 'method': ('getVariants', 'getMetadata', 'getEditions'), 'format': ('xml', 'html', 'json', 'python', 'ruby', 'txt', 'csv', 'php')} def api_url(self): self.url = 'http://xisbn.worldcat.org/webservices/xid/%s/%s' \ % (self.numtype, self.rec_num)<|fim▁end|>
<|file_name|>fn-pattern-expected-type.rs<|end_file_name|><|fim▁begin|>// run-pass pub fn main() { let f = |(x, y): (isize, isize)| { assert_eq!(x, 1); assert_eq!(y, 2); }; f((1, 2));<|fim▁hole|><|fim▁end|>
}
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
from __future__ import division
<|file_name|>test.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions, and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions, and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the author of this software nor the name of # contributors to this software may be used to endorse or promote products # derived from this software without specific prior written consent. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ### from supybot.test import * class ChanRegTestCase(PluginTestCase): plugins = ('ChanReg',) # vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:<|fim▁end|>
### # Copyright (c) 2013, Nicolas Coevoet # All rights reserved. #
<|file_name|>structofp12__experimenter__stats__header.js<|end_file_name|><|fim▁begin|>var structofp12__experimenter__stats__header =<|fim▁hole|> [ "exp_type", "structofp12__experimenter__stats__header.html#a460441be714bfea09fad329cbf887740", null ], [ "experimenter", "structofp12__experimenter__stats__header.html#a26a1cc494706c82c3d8a1a79faf82463", null ] ];<|fim▁end|>
[
<|file_name|>bert_embeddings.py<|end_file_name|><|fim▁begin|>"""Contains function for calculating BERT embeddings""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import json import re import torch from torch.utils.data import TensorDataset, DataLoader, SequentialSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer from pytorch_pretrained_bert.modeling import BertModel from scipy.spatial.distance import cosine, euclidean class BertEmbedding(object): """Class for calculating embeddings between two texts""" def __init__(self, bert_model='bert-base-uncased', max_seq_length=50, device='cpu'): """Initializing the BERT model""" self.bert_model = bert_model self.max_seq_length = max_seq_length self.device = torch.device("cpu" if device=='cpu' or not torch.cuda.is_available() else "cuda") n_gpu = torch.cuda.device_count() self.tokenizer = BertTokenizer.from_pretrained(self.bert_model, do_lower_case=True) self.model = BertModel.from_pretrained(self.bert_model) self.model.to(self.device) if n_gpu > 1: self.model = torch.nn.DataParallel(self.model) self.model.eval() def get_embeddings(self, sentences, layer=-1): """Returns embeddings of words/sentences""" assert isinstance(sentences, list) for pair in sentences: assert len(pair) == 1 examples = self._read_examples(sentences) features = self._convert_examples_to_features( examples=examples) unique_id_to_feature = {} for feature in features: unique_id_to_feature[feature.unique_id] = feature all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long) eval_data = TensorDataset(all_input_ids, all_input_mask, all_example_index) eval_sampler = SequentialSampler(eval_data) eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=16) out_features = [] for input_ids, input_mask, example_indices in eval_dataloader: input_ids = input_ids.to(self.device) input_mask = input_mask.to(self.device) all_encoder_layers, _ = self.model(input_ids, token_type_ids=None, attention_mask=input_mask) all_encoder_layers = all_encoder_layers values = torch.mean(all_encoder_layers[layer], 1) out_features.append(values.detach().cpu().numpy()) flat_list = [item for sublist in out_features for item in sublist] return flat_list def _convert_examples_to_features(self, examples): """Generate features of examples""" features = [] for (ex_index, example) in enumerate(examples): tokens_a = self.tokenizer.tokenize(example.text) if len(tokens_a) > self.max_seq_length - 2: tokens_a = tokens_a[0:(self.max_seq_length - 2)] tokens = [] input_type_ids = [] tokens.append("[CLS]") input_type_ids.append(0) for token in tokens_a: tokens.append(token) input_type_ids.append(0) tokens.append("[SEP]") input_type_ids.append(0) input_ids = self.tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1] * len(input_ids) # Zero-pad up to the sequence length. while len(input_ids) < self.max_seq_length: input_ids.append(0) input_mask.append(0) input_type_ids.append(0) assert len(input_ids) == self.max_seq_length assert len(input_mask) == self.max_seq_length assert len(input_type_ids) == self.max_seq_length features.append( InputFeatures( unique_id=example.unique_id, tokens=tokens, input_ids=input_ids, input_mask=input_mask, input_type_ids=input_type_ids)) return features def _read_examples(self, inp): """Read a list of `InputExample`s from an input file."""<|fim▁hole|> line_a = a.strip() examples.append( InputExample(unique_id=unique_id, text=line_a)) unique_id += 1 return examples class InputExample(object): """Input an example""" def __init__(self, unique_id, text): self.unique_id = unique_id self.text = text class InputFeatures(object): """A single set of features of data.""" def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids): self.unique_id = unique_id self.tokens = tokens self.input_ids = input_ids self.input_mask = input_mask self.input_type_ids = input_type_ids<|fim▁end|>
examples = [] unique_id = 0 for a, in inp:
<|file_name|>tr3.unregister.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################ # # MODULE: tr3.unregister # AUTHOR(S): Soeren Gebbert # # PURPOSE: Unregister raster3d maps from space time raster3d datasets # COPYRIGHT: (C) 2011 by the GRASS Development Team # # This program is free software under the GNU General Public # License (version 2). Read the file COPYING that comes with GRASS<|fim▁hole|># for details. # ############################################################################# #%module #% description: Unregister raster3d map(s) from a specific or from all space time raster3d dataset in which it is registered #% keywords: spacetime raster3d dataset #% keywords: raster3d #%end #%option #% key: dataset #% type: string #% description: Name of an existing space time raster3d dataset. If no name is provided the raster3d map(s) are unregistered from all space time datasets in which they are registered. #% required: no #% multiple: no #%end #%option #% key: maps #% type: string #% description: Name(s) of existing raster3d map(s) to unregister #% required: yes #% multiple: yes #%end import grass.script as grass import grass.temporal as tgis ############################################################################ def main(): # Get the options name = options["dataset"] maps = options["maps"] # Make sure the temporal database exists tgis.create_temporal_database() # Unregister maps tgis.unregister_maps_from_space_time_datasets("raster3d", name, maps) if __name__ == "__main__": options, flags = grass.parser() main()<|fim▁end|>
<|file_name|>input.rs<|end_file_name|><|fim▁begin|>use engine::components::{Movable, Move}; use engine::resources::{ReplayMode, Skip}; use engine::ActionInput; use specs::prelude::*; use specs::world::Index; use scaii_defs::protos::Action as ScaiiAction; #[derive(SystemData)] pub struct InputSystemData<'a> { movable: ReadStorage<'a, Movable>, input: FetchMut<'a, ActionInput>, ids: Entities<'a>, is_replay: Fetch<'a, ReplayMode>, skip: FetchMut<'a, Skip>, moves: WriteStorage<'a, Move>, } #[derive(Default)] pub struct InputSystem {} impl InputSystem { pub fn new() -> Self { InputSystem {} } } impl<'a> System<'a> for InputSystem { type SystemData = InputSystemData<'a>; fn run(&mut self, mut sys_data: Self::SystemData) { use engine::components::{MoveBehavior, MoveTarget}; use std::mem; let actions = mem::replace(&mut sys_data.input.0, None); let actions = if actions.is_some() { let (actions, skip, skip_lua) = to_action_list(actions.unwrap()); // ignore skipping for replays if !sys_data.is_replay.0 { *sys_data.skip = Skip(skip, skip_lua); } actions } else { return; }; for action in actions { let entity = sys_data.ids.entity(action.unit_id); // Maybe set an error state later?<|fim▁hole|> if !sys_data.movable.get(entity).is_some() { continue; } let move_order = match action.action { ActionTarget::Attack(tar_id) => { let target = sys_data.ids.entity(tar_id); if !sys_data.ids.is_alive(target) { continue; } Move { behavior: MoveBehavior::Straight, target: MoveTarget::AttackUnit(target), } } }; sys_data.moves.insert(entity, move_order); } // for (pos, moves, id) in (&mut sys_data.positions, &sys_data.moves, &*sys_data.ids).join() {} } } #[derive(Debug, Copy, Clone, PartialEq)] struct Action { unit_id: Index, action: ActionTarget, } #[derive(Debug, Copy, Clone, PartialEq)] enum ActionTarget { Attack(Index), } fn to_action_list(raw: ScaiiAction) -> (Vec<Action>, bool, Option<String>) { use prost::Message; use protos::unit_action::Action as RtsAction; use protos::{ActionList, AttackUnit}; if raw.alternate_actions.is_none() { return Default::default(); } let action: ActionList = ActionList::decode(raw.alternate_actions.unwrap()).expect("Could parse inner message"); let actions = action .actions .into_iter() .map(|a| Action { unit_id: a.unit_id as Index, action: match a.action.expect("Expected an action descriptor") { RtsAction::AttackUnit(AttackUnit { target_id }) => { ActionTarget::Attack(target_id as Index) } _ => unimplemented!(), // whats this line do }, }) .collect(); (actions, action.skip.unwrap_or_default(), action.skip_lua) } #[cfg(test)] mod tests { use engine::components::{Movable, Move}; use engine::ActionInput; use specs::prelude::*; use scaii_defs::protos::Action as ScaiiAction; use super::*; #[test] fn input() { use engine::components::{MoveBehavior, MoveTarget}; use engine::{components, resources}; use prost::Message; use protos::unit_action::Action; use protos::{ActionList, AttackUnit, UnitAction}; let mut world = World::new(); components::register_world_components(&mut world); resources::register_world_resources(&mut world); let test_player = world.create_entity().with(Movable(0)).build(); let test_target = world.create_entity().build(); let actions = ActionList { actions: vec![UnitAction { unit_id: test_player.id().into(), action: Some(Action::AttackUnit(AttackUnit { target_id: test_target.id(), })), }], ..Default::default() }; let mut buf = Vec::new(); actions.encode(&mut buf).unwrap(); world.write_resource::<ActionInput>().0 = Some( ScaiiAction { alternate_actions: Some(buf), ..Default::default() } .clone(), ); let mut sys: Dispatcher = DispatcherBuilder::new() .add(InputSystem::new(), "input", &[]) .build(); sys.dispatch(&mut world.res); let moves = world.read::<Move>(); assert!(moves.get(test_player).unwrap().target == MoveTarget::AttackUnit(test_target)); // Verifies that test_player's target is test target assert!(moves.get(test_player).unwrap().behavior == MoveBehavior::Straight); // Verifies that test_player's move behavior is straight } }<|fim▁end|>
if !sys_data.ids.is_alive(entity) { continue; }
<|file_name|>handler_test.go<|end_file_name|><|fim▁begin|>/* * Copyright © 2015-2018 Aeneas Rekkas <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @author Aeneas Rekkas <[email protected]> * @copyright 2015-2018 Aeneas Rekkas <[email protected]> * @license Apache-2.0 */ package jwk_test import (<|fim▁hole|> "net/http" "net/http/httptest" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" jose "gopkg.in/square/go-jose.v2" "github.com/ory/viper" "github.com/ory/hydra/driver/configuration" "github.com/ory/hydra/internal" "github.com/ory/hydra/x" ) func TestHandlerWellKnown(t *testing.T) { conf := internal.NewConfigurationWithDefaults() reg := internal.NewRegistry(conf) viper.Set(configuration.ViperKeyWellKnownKeys, []string{x.OpenIDConnectKeyName, x.OpenIDConnectKeyName}) router := x.NewRouterPublic() IDKS, _ := testGenerator.Generate("test-id", "sig") h := reg.KeyHandler() require.NoError(t, reg.KeyManager().AddKeySet(context.TODO(), x.OpenIDConnectKeyName, IDKS)) h.SetRoutes(router.RouterAdmin(), router, func(h http.Handler) http.Handler { return h }) testServer := httptest.NewServer(router) JWKPath := "/.well-known/jwks.json" res, err := http.Get(testServer.URL + JWKPath) require.NoError(t, err, "problem in http request") defer res.Body.Close() var known jose.JSONWebKeySet err = json.NewDecoder(res.Body).Decode(&known) require.NoError(t, err, "problem in decoding response") require.Len(t, known.Keys, 1) resp := known.Key("public:test-id") require.NotNil(t, resp, "Could not find key public") assert.Equal(t, resp, IDKS.Key("public:test-id")) }<|fim▁end|>
"context" "encoding/json"
<|file_name|>.eslintrc.js<|end_file_name|><|fim▁begin|>module.exports = { "extends": "airbnb-base", "plugins": [ "import" ], "env": { "browser": true, "mocha": true, "protractor": true, "node": true }, "globals": { "expect": true }, "rules": { "semi": ["error", "never"], "quotes": ["error", "double"], "indent": ["error", "tab"], "no-console": 0, "no-param-reassign": ["error", { "props": true, "ignorePropertyModificationsFor": [ "newParent", "parent", "child" ] }], "no-tabs": 0 }<|fim▁hole|>};<|fim▁end|>
<|file_name|>test_migration.py<|end_file_name|><|fim▁begin|>import unittest import os from unittest.mock import patch import migration from configuration import Builder import configuration from tests import testhelper <|fim▁hole|>class MigrationTestCase(unittest.TestCase): def setUp(self): self.rootfolder = os.path.dirname(os.path.realpath(__file__)) @patch('migration.Commiter') @patch('migration.Initializer') @patch('migration.RTCInitializer') @patch('migration.os') @patch('configuration.shutil') def testDeletionOfLogFolderOnInitalization(self, shutil_mock, os_mock, rtc_initializer_mock, git_initializer_mock, git_comitter_mock): config = Builder().setrootfolder(self.rootfolder).build() anylogpath = config.getlogpath("testDeletionOfLogFolderOnInitalization") os_mock.path.exists.return_value = False configuration.config = config migration.initialize() expectedlogfolder = self.rootfolder + os.sep + "Logs" shutil_mock.rmtree.assert_called_once_with(expectedlogfolder) def testExistRepo_Exists_ShouldReturnTrue(self): with testhelper.createrepo(folderprefix="test_migration"): self.assertTrue(migration.existsrepo()) def testExistRepo_DoesntExist_ShouldReturnFalse(self): configuration.config = Builder().setworkdirectory(self.rootfolder).setgitreponame("test.git").build() self.assertFalse(migration.existsrepo())<|fim▁end|>
<|file_name|>bencher.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python3 # Suggest name to z3 binary based on it its sha import sys import words import subprocess import argparse import os.path import shutil from pathlib import Path import yaml class Bencher(object):<|fim▁hole|> self._name = 'bencher' self._help = 'Make benchmark direcotry' def mk_arg_parser(self, ap): ap.add_argument('--suffix', '-s', metavar='EXT', type=str, default='smt2', help='File extension') ap.add_argument('--prefix', '-p', metavar='PREF', required='True', help='Prefix to assign') ap.add_argument('--out', '-o', type=str, metavar="DIR", help='Output directory', required=True) ap.add_argument('files', nargs='+') ap.add_argument( '--mv', action='store_true', help='Move (instead of copy) benchmarks into new location') ap.add_argument('--verbose', '-v', action='store_true') ap.add_argument('--dry-run', action='store_true') return ap def run(self, args=None): num_files = len(args.files) num_fmt = '{idx:0' + str(len(str(num_files))) + '}' out_dir = Path(args.out) out_dir.mkdir(parents=True, exist_ok=True) prefix = args.prefix suffix = args.suffix # pick an action to apply to each file if args.dry_run: def _dry_run_action(src, dst): pass file_action = _dry_run_action elif args.mv: file_action = shutil.move else: file_action = shutil.copy2 inverse = dict() for id, src in enumerate(args.files): idx_str = num_fmt.format(idx=id) dst_name = f'{prefix}-{idx_str}.{suffix}' dst = out_dir / dst_name if (args.verbose): print(f'{src} --> {dst}') file_action(src, dst) inverse[dst_name] = src with open(out_dir / 'inverse.yaml', 'w') as inverse_file: yaml.dump(inverse, inverse_file) return 0 def main(self, argv): ap = argparse.ArgumentParser(prog=self._name, description=self._help) ap = self.mk_arg_parser(ap) args = ap.parse_args(argv) return self.run(args) def main(): cmd = Bencher() return cmd.main(sys.argv[1:]) if __name__ == '__main__': sys.exit(main())<|fim▁end|>
def __init__(self):
<|file_name|>borrowck-multiple-captures.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax)] use std::thread::Thread; fn borrow<T>(_: &T) { } fn different_vars_after_borrows() { let x1 = box 1is; let p1 = &x1; let x2 = box 2is; let p2 = &x2; Thread::spawn(move|| { drop(x1); //~ ERROR cannot move `x1` into closure because it is borrowed drop(x2); //~ ERROR cannot move `x2` into closure because it is borrowed }); borrow(&*p1); borrow(&*p2); } fn different_vars_after_moves() { let x1 = box 1is; drop(x1); let x2 = box 2is; drop(x2); Thread::spawn(move|| { drop(x1); //~ ERROR capture of moved value: `x1` drop(x2); //~ ERROR capture of moved value: `x2` }); } fn same_var_after_borrow() { let x = box 1is; let p = &x; Thread::spawn(move|| { drop(x); //~ ERROR cannot move `x` into closure because it is borrowed drop(x); //~ ERROR use of moved value: `x` }); borrow(&*p); } fn same_var_after_move() { let x = box 1is; drop(x); Thread::spawn(move|| { drop(x); //~ ERROR capture of moved value: `x` drop(x); //~ ERROR use of moved value: `x` }); } fn main() { different_vars_after_borrows(); different_vars_after_moves();<|fim▁hole|> same_var_after_move(); }<|fim▁end|>
same_var_after_borrow();
<|file_name|>page.rs<|end_file_name|><|fim▁begin|>// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //! Contains Parquet Page definitions and page reader interface. use crate::basic::{Encoding, PageType}; use crate::errors::Result; use crate::file::{metadata::ColumnChunkMetaData, statistics::Statistics}; use crate::schema::types::{ColumnDescPtr, SchemaDescPtr}; use crate::util::memory::ByteBufferPtr; /// Parquet Page definition. /// /// List of supported pages. /// These are 1-to-1 mapped from the equivalent Thrift definitions, except `buf` which /// used to store uncompressed bytes of the page. pub enum Page { DataPage { buf: ByteBufferPtr, num_values: u32, encoding: Encoding, def_level_encoding: Encoding, rep_level_encoding: Encoding, statistics: Option<Statistics>, }, DataPageV2 { buf: ByteBufferPtr, num_values: u32, encoding: Encoding, num_nulls: u32, num_rows: u32, def_levels_byte_len: u32, rep_levels_byte_len: u32, is_compressed: bool, statistics: Option<Statistics>, }, DictionaryPage { buf: ByteBufferPtr, num_values: u32, encoding: Encoding, is_sorted: bool, }, } impl Page { /// Returns [`PageType`](crate::basic::PageType) for this page. pub fn page_type(&self) -> PageType { match self { &Page::DataPage { .. } => PageType::DATA_PAGE, &Page::DataPageV2 { .. } => PageType::DATA_PAGE_V2, &Page::DictionaryPage { .. } => PageType::DICTIONARY_PAGE, } } /// Returns internal byte buffer reference for this page. pub fn buffer(&self) -> &ByteBufferPtr { match self { &Page::DataPage { ref buf, .. } => &buf, &Page::DataPageV2 { ref buf, .. } => &buf, &Page::DictionaryPage { ref buf, .. } => &buf, } } /// Returns number of values in this page. pub fn num_values(&self) -> u32 { match self { &Page::DataPage { num_values, .. } => num_values, &Page::DataPageV2 { num_values, .. } => num_values, &Page::DictionaryPage { num_values, .. } => num_values, } } /// Returns this page [`Encoding`](crate::basic::Encoding). pub fn encoding(&self) -> Encoding { match self { &Page::DataPage { encoding, .. } => encoding, &Page::DataPageV2 { encoding, .. } => encoding, &Page::DictionaryPage { encoding, .. } => encoding, } } /// Returns optional [`Statistics`](crate::file::metadata::Statistics). pub fn statistics(&self) -> Option<&Statistics> { match self { &Page::DataPage { ref statistics, .. } => statistics.as_ref(), &Page::DataPageV2 { ref statistics, .. } => statistics.as_ref(), &Page::DictionaryPage { .. } => None, } } } /// Helper struct to represent pages with potentially compressed buffer (data page v1) or /// compressed and concatenated buffer (def levels + rep levels + compressed values for /// data page v2). /// /// The difference with `Page` is that `Page` buffer is always uncompressed. pub struct CompressedPage { compressed_page: Page, uncompressed_size: usize, } impl CompressedPage { /// Creates `CompressedPage` from a page with potentially compressed buffer and /// uncompressed size. pub fn new(compressed_page: Page, uncompressed_size: usize) -> Self { Self { compressed_page, uncompressed_size, } } /// Returns page type. pub fn page_type(&self) -> PageType { self.compressed_page.page_type() } /// Returns underlying page with potentially compressed buffer. pub fn compressed_page(&self) -> &Page { &self.compressed_page } /// Returns uncompressed size in bytes. pub fn uncompressed_size(&self) -> usize { self.uncompressed_size } /// Returns compressed size in bytes. /// /// Note that it is assumed that buffer is compressed, but it may not be. In this /// case compressed size will be equal to uncompressed size. pub fn compressed_size(&self) -> usize { self.compressed_page.buffer().len() } /// Number of values in page. pub fn num_values(&self) -> u32 { self.compressed_page.num_values() } /// Returns encoding for values in page. pub fn encoding(&self) -> Encoding { self.compressed_page.encoding() } /// Returns slice of compressed buffer in the page. pub fn data(&self) -> &[u8] { self.compressed_page.buffer().data() } } /// Contains page write metrics. pub struct PageWriteSpec { pub page_type: PageType, pub uncompressed_size: usize, pub compressed_size: usize, pub num_values: u32, pub offset: u64, pub bytes_written: u64, } impl PageWriteSpec { /// Creates new spec with default page write metrics. pub fn new() -> Self { Self { page_type: PageType::DATA_PAGE, uncompressed_size: 0, compressed_size: 0, num_values: 0, offset: 0, bytes_written: 0, } } } /// API for reading pages from a column chunk. /// This offers a iterator like API to get the next page. pub trait PageReader { /// Gets the next page in the column chunk associated with this reader. /// Returns `None` if there are no pages left. fn get_next_page(&mut self) -> Result<Option<Page>>; } /// API for writing pages in a column chunk. /// /// It is reasonable to assume that all pages will be written in the correct order, e.g. /// dictionary page followed by data pages, or a set of data pages, etc. pub trait PageWriter { /// Writes a page into the output stream/sink. /// Returns `PageWriteSpec` that contains information about written page metrics, /// including number of bytes, size, number of values, offset, etc. /// /// This method is called for every compressed page we write into underlying buffer, /// either data page or dictionary page. fn write_page(&mut self, page: CompressedPage) -> Result<PageWriteSpec>; /// Writes column chunk metadata into the output stream/sink. /// /// This method is called once before page writer is closed, normally when writes are /// finalised in column writer. fn write_metadata(&mut self, metadata: &ColumnChunkMetaData) -> Result<()>; /// Closes resources and flushes underlying sink. /// Page writer should not be used after this method is called. fn close(&mut self) -> Result<()>; } /// An iterator over pages of some specific column in a parquet file. pub trait PageIterator: Iterator<Item = Result<Box<PageReader>>> { /// Get schema of parquet file. fn schema(&mut self) -> Result<SchemaDescPtr>; /// Get column schema of this page iterator. fn column_schema(&mut self) -> Result<ColumnDescPtr>; } #[cfg(test)] mod tests { use super::*; #[test] fn test_page() { let data_page = Page::DataPage { buf: ByteBufferPtr::new(vec![0, 1, 2]), num_values: 10,<|fim▁hole|> }; assert_eq!(data_page.page_type(), PageType::DATA_PAGE); assert_eq!(data_page.buffer().data(), vec![0, 1, 2].as_slice()); assert_eq!(data_page.num_values(), 10); assert_eq!(data_page.encoding(), Encoding::PLAIN); assert_eq!( data_page.statistics(), Some(&Statistics::int32(Some(1), Some(2), None, 1, true)) ); let data_page_v2 = Page::DataPageV2 { buf: ByteBufferPtr::new(vec![0, 1, 2]), num_values: 10, encoding: Encoding::PLAIN, num_nulls: 5, num_rows: 20, def_levels_byte_len: 30, rep_levels_byte_len: 40, is_compressed: false, statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)), }; assert_eq!(data_page_v2.page_type(), PageType::DATA_PAGE_V2); assert_eq!(data_page_v2.buffer().data(), vec![0, 1, 2].as_slice()); assert_eq!(data_page_v2.num_values(), 10); assert_eq!(data_page_v2.encoding(), Encoding::PLAIN); assert_eq!( data_page_v2.statistics(), Some(&Statistics::int32(Some(1), Some(2), None, 1, true)) ); let dict_page = Page::DictionaryPage { buf: ByteBufferPtr::new(vec![0, 1, 2]), num_values: 10, encoding: Encoding::PLAIN, is_sorted: false, }; assert_eq!(dict_page.page_type(), PageType::DICTIONARY_PAGE); assert_eq!(dict_page.buffer().data(), vec![0, 1, 2].as_slice()); assert_eq!(dict_page.num_values(), 10); assert_eq!(dict_page.encoding(), Encoding::PLAIN); assert_eq!(dict_page.statistics(), None); } #[test] fn test_compressed_page() { let data_page = Page::DataPage { buf: ByteBufferPtr::new(vec![0, 1, 2]), num_values: 10, encoding: Encoding::PLAIN, def_level_encoding: Encoding::RLE, rep_level_encoding: Encoding::RLE, statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)), }; let cpage = CompressedPage::new(data_page, 5); assert_eq!(cpage.page_type(), PageType::DATA_PAGE); assert_eq!(cpage.uncompressed_size(), 5); assert_eq!(cpage.compressed_size(), 3); assert_eq!(cpage.num_values(), 10); assert_eq!(cpage.encoding(), Encoding::PLAIN); assert_eq!(cpage.data(), &[0, 1, 2]); } }<|fim▁end|>
encoding: Encoding::PLAIN, def_level_encoding: Encoding::RLE, rep_level_encoding: Encoding::RLE, statistics: Some(Statistics::int32(Some(1), Some(2), None, 1, true)),
<|file_name|>redirect.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from httoop.status.types import StatusException from httoop.uri import URI class RedirectStatus(StatusException): u"""REDIRECTIONS = 3xx A redirection to other URI(s) which are set in the Location-header.<|fim▁hole|> def __init__(self, location, *args, **kwargs): if not isinstance(location, (type(None), list, tuple)): location = [location] if location is not None: kwargs.setdefault('headers', {})['Location'] = ', '.join(str(URI(uri)) for uri in location) super(RedirectStatus, self).__init__(*args, **kwargs) def to_dict(self): dct = super(RedirectStatus, self).to_dict() if self.headers.get('Location'): dct.update(dict(Location=self.headers['Location'])) return dct class MULTIPLE_CHOICES(RedirectStatus): u"""The server has multiple representations of the requested resource. And the client e.g. did not specify the Accept-header or the requested representation does not exists. """ code = 300 class MOVED_PERMANENTLY(RedirectStatus): u"""The the server knows the target resource but the URI is incorrect (wrong domain, trailing slash, etc.). It can also be send if a resource have moved or renamed to prevent broken links.""" code = 301 cacheable = True class FOUND(RedirectStatus): code = 302 cacheable = True class SEE_OTHER(RedirectStatus): u"""The request has been processed but instead of serving a representation of the result or resource it links to another document which contains a static status message, etc. so the client is not forced to download the data. This is also useful for links like /release-latest.tar.gz -> /release-1.2.tar.gz""" code = 303 cacheable = True class NOT_MODIFIED(RedirectStatus): u"""The client already has the data which is provided through the information in the Etag or If-Modified-Since-header. The Date-header is required, the ETag-header and Content-Location-header are useful. Also the caching headers Expires, Cache-Control and Vary are required if they differ from those sent previously. TODO: what to do if the representation format has changed but not the representation itself? The response body has to be empty.""" code = 304 body = None def __init__(self, *args, **kwargs): # don't set location super(NOT_MODIFIED, self).__init__(None, *args, **kwargs) header_to_remove = ( "Allow", "Content-Encoding", "Content-Language", "Content-Length", "Content-MD5", "Content-Range", "Content-Type", "Expires", "Location" ) class USE_PROXY(RedirectStatus): code = 305 class TEMPORARY_REDIRECT(RedirectStatus): u"""The request has not processed because the requested resource is located at a different URI. The client should resent the request to the URI given in the Location-header. for GET this is the same as 303 but for POST, PUT and DELETE it is important that the request was not processed.""" code = 307 cacheable = True class PERMANENT_REDIRECT(RedirectStatus): code = 308 cacheable = True<|fim▁end|>
""" location = None
<|file_name|>rain_mask_save_lat_lon_west_southern_indian_ocean.py<|end_file_name|><|fim▁begin|>import os, sys import datetime import iris import iris.unit as unit import iris.analysis.cartography import numpy as np from iris.coord_categorisation import add_categorised_coord diag = 'avg.5216' cube_name_explicit='stratiform_rainfall_rate' cube_name_param='convective_rainfall_rate' pp_file_path='/projects/cascade/pwille/moose_retrievals/' experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12 #experiment_ids = ['djzns', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] #experiment_ids = [ 'dklwu', 'dklzq', 'dklyu', 'dkmbq', 'dkbhu', 'djznu', 'dkhgu', 'djzns' ] #experiment_ids = ['djznu', 'dkhgu' ] # High Res #experiment_ids = ['djznw', 'djzny', 'djznq', 'dkjxq'] #experiment_ids = ['djznw', 'djzny', 'djznq', 'dkmbq', 'dklzq', 'dkjxq' ] # Params # Load global LAM dtmindt = datetime.datetime(2011,8,19,0,0,0) dtmaxdt = datetime.datetime(2011,9,7,23,0,0) dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD) dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD) time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax) # Min and max lats lons from smallest model domain (dkbhu) - see spreadsheet latmin=-10 latmax=5 lonmin=64.115 lonmax=80 lat_constraint=iris.Constraint(grid_latitude= lambda la: latmin <= la.point <= latmax) lon_constraint=iris.Constraint(grid_longitude= lambda lo: lonmin <= lo.point <= lonmax)<|fim▁hole|> fg = '%sdjzn/djznw/%s.pp' % (pp_file_path, diag) glob_load = iris.load_cube(fg, ('%s' % cube_name_param) & time_constraint) ## Get time points from global LAM to use as time constraint when loading other runs time_list = glob_load.coord('time').points glob_tc = iris.Constraint(time=time_list) del glob_load def unrotate_pole_update_cube(cube): lat = cube.coord('grid_latitude').points lon = cube.coord('grid_longitude').points cs = cube.coord_system('CoordSystem') if isinstance(cs, iris.coord_systems.RotatedGeogCS): print ' %s - %s - Unrotate pole %s' % (diag, experiment_id, cs) lons, lats = np.meshgrid(lon, lat) lons,lats = iris.analysis.cartography.unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude) lon=lons[0] lat=lats[:,0] for i, coord in enumerate (cube.coords()): if coord.standard_name=='grid_latitude': lat_dim_coord_cube = i if coord.standard_name=='grid_longitude': lon_dim_coord_cube = i csur=cs.ellipsoid cube.remove_coord('grid_latitude') cube.remove_coord('grid_longitude') cube.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_cube) cube.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_cube) return cube for experiment_id in experiment_ids: expmin1 = experiment_id[:-1] fu = '%s%s/%s/%s.pp' % (pp_file_path, expmin1, experiment_id, diag) flsm = '%s%s/%s/30.pp' % (pp_file_path, expmin1, experiment_id) print experiment_id sys.stdout.flush() try: #cube_names = ['%s' % cube_name_param, '%s' % cube_name_explicit] cubeconv = iris.load_cube(fu,'%s' % cube_name_param & glob_tc) cubeconv= unrotate_pole_update_cube(cubeconv) cubestrat = iris.load_cube(fu,'%s' % cube_name_explicit & glob_tc) cubestrat= unrotate_pole_update_cube(cubestrat) print cubestrat cube=cubeconv.extract(lat_constraint & lon_constraint) + cubestrat.extract(lat_constraint & lon_constraint) cube.rename('total_precipitation_rate') except iris.exceptions.ConstraintMismatchError: cube = iris.load_cube(fu, ('%s' % cube_name_explicit) & glob_tc) cube= unrotate_pole_update_cube(cube) cube = cube.extract(lat_constraint & lon_constraint) # Mean at each grid point by hour of day and save add_categorised_coord(cube, 'hour', 'time',lambda coord, x: coord.units.num2date(x).hour) diurnal_mean_cube = cube.aggregated_by('hour', iris.analysis.MEAN) del cube #try: # iris.save(diurnal_mean_cube, '%s%s/%s/%s_rainfall_hourly_mean.pp' % (pp_file_path, expmin1, experiment_id, diag)) #except Exception, e: # print e # pass # Load land/sea mask lsm = iris.load_cube(flsm, ('land_binary_mask' ) ) lsm = unrotate_pole_update_cube(lsm) lsm=lsm.extract(lat_constraint & lon_constraint) print lsm sys.stdout.flush() # For Sea and Land, mask area and calculate mean of each hour for sea/land and SAVE as numpy array #tdmc= diurnal_mean_cube.collapsed(['grid_latitude', 'grid_longitude'], iris.analysis.MEAN) #total_diurnal_mean_cube=[tdmc.data.data, diurnal_mean_cube.coord('hour').points+0.5] #print total_diurnal_mean_cube #np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), total_diurnal_mean_cube) for s in ([0]): nancube = np.where(lsm.data==s, diurnal_mean_cube.data, np.NaN) maskedcube = np.ma.masked_array(nancube,np.isnan(nancube)) total_rainfall = np.mean(maskedcube.reshape(maskedcube.shape[0], (maskedcube.shape[1]*maskedcube.shape[2])), axis=1) trnp =[total_rainfall.data, diurnal_mean_cube.coord('hour').points+0.5] if s == 0: # Areas of ocean print total_rainfall np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp) #np.save('%s%s/%s/%s_sea_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube) if s == 1: # Areas of land np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), trnp) #np.save('%s%s/%s/%s_land_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), maskedcube) del lsm #tdmc= diurnal_mean_cube.collapsed(['grid_latitude', 'grid_longitude'], iris.analysis.MEAN) #total_diurnal_mean_cube=tdmc #np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), tdmc.data.data) #np.save('%s%s/%s/%s_total_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s_MASKED_ARRAY' % (pp_file_path, expmin1, experiment_id, diag, latmin, latmax, lonmin, lonmax), ma)<|fim▁end|>
<|file_name|>orig_sig.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from pylab import * # create some data to use for the plot dt = 0.001 t = arange(0.0, 10.0, dt) r = exp(-t[:1000]/0.05) # impulse response x = randn(len(t)) s = convolve(x,r)[:len(x)]*dt # colored noise # the main axes is subplot(111) by default plot(t, s) #axis([0, 1, 1.1*amin(s), 2*amax(s) ]) #xlabel('time (s)') #ylabel('current (nA)') #title('The original signal for channel i', fontsize=28) plt.xticks( range(5), ('', '', '', '','', '', '', '','2', '', '', '','3', '', '', '','4', '', '', '',) ) plt.yticks( range(1), ('', '', '', '','', '', '', '','2', '', '', '','3', '', '', '','4', '', '', '',) ) ## this is an inset axes over the main axes #a = axes([.65, .6, .2, .2], axisbg='y') #n, bins, patches = hist(s, 400, normed=1) #title('Probability') #setp(a, xticks=[], yticks=[]) # ## this is another inset axes over the main axes #a = axes([0.2, 0.6, .2, .2], axisbg='y') #plot(t[:len(r)], r)<|fim▁hole|> show()<|fim▁end|>
#title('Impulse response') #setp(a, xlim=(0,.2), xticks=[], yticks=[])
<|file_name|>tvec.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use back::abi; use lib; use lib::llvm::{llvm, ValueRef}; use middle::trans::base; use middle::trans::base::*; use middle::trans::build::*; use middle::trans::callee; use middle::trans::common::*; use middle::trans::datum::*; use middle::trans::expr::{Dest, Ignore, SaveIn}; use middle::trans::expr; use middle::trans::glue; use middle::trans::machine::{llsize_of, nonzero_llsize_of}; use middle::trans::type_of; use middle::ty; use util::common::indenter; use util::ppaux::ty_to_str; use middle::trans::type_::Type; use std::option::None; use syntax::ast; use syntax::codemap; pub fn make_uniq_free_glue(bcx: block, vptrptr: ValueRef, box_ty: ty::t) -> block { let box_datum = immediate_rvalue(Load(bcx, vptrptr), box_ty); let not_null = IsNotNull(bcx, box_datum.val); do with_cond(bcx, not_null) |bcx| { let body_datum = box_datum.box_body(bcx); let bcx = glue::drop_ty(bcx, body_datum.to_ref_llval(bcx), body_datum.ty); if ty::type_contents(bcx.tcx(), box_ty).contains_managed() { glue::trans_free(bcx, box_datum.val) } else { glue::trans_exchange_free(bcx, box_datum.val) } } } // Boxed vector types are in some sense currently a "shorthand" for a box // containing an unboxed vector. This expands a boxed vector type into such an // expanded type. It doesn't respect mutability, but that doesn't matter at // this point. pub fn expand_boxed_vec_ty(tcx: ty::ctxt, t: ty::t) -> ty::t { let unit_ty = ty::sequence_element_type(tcx, t); let unboxed_vec_ty = ty::mk_mut_unboxed_vec(tcx, unit_ty); match ty::get(t).sty { ty::ty_estr(ty::vstore_uniq) | ty::ty_evec(_, ty::vstore_uniq) => { fail!("cannot treat vectors/strings as exchange allocations yet"); } ty::ty_estr(ty::vstore_box) | ty::ty_evec(_, ty::vstore_box) => { ty::mk_imm_box(tcx, unboxed_vec_ty) } _ => tcx.sess.bug("non boxed-vec type \ in tvec::expand_boxed_vec_ty") } } pub fn get_fill(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = push_ctxt("tvec::get_fill"); Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_fill])) } pub fn set_fill(bcx: block, vptr: ValueRef, fill: ValueRef) { Store(bcx, fill, GEPi(bcx, vptr, [0u, abi::vec_elt_fill])); } pub fn get_alloc(bcx: block, vptr: ValueRef) -> ValueRef { Load(bcx, GEPi(bcx, vptr, [0u, abi::vec_elt_alloc])) } pub fn get_bodyptr(bcx: block, vptr: ValueRef) -> ValueRef { GEPi(bcx, vptr, [0u, abi::box_field_body]) } pub fn get_dataptr(bcx: block, vptr: ValueRef) -> ValueRef { let _icx = push_ctxt("tvec::get_dataptr"); GEPi(bcx, vptr, [0u, abi::vec_elt_elems, 0u]) } pub fn pointer_add(bcx: block, ptr: ValueRef, bytes: ValueRef) -> ValueRef { let _icx = push_ctxt("tvec::pointer_add"); let old_ty = val_ty(ptr); let bptr = PointerCast(bcx, ptr, Type::i8p()); return PointerCast(bcx, InBoundsGEP(bcx, bptr, [bytes]), old_ty); } pub fn alloc_raw(bcx: block, unit_ty: ty::t, fill: ValueRef, alloc: ValueRef, heap: heap) -> Result { let _icx = push_ctxt("tvec::alloc_uniq"); let ccx = bcx.ccx(); let vecbodyty = ty::mk_mut_unboxed_vec(bcx.tcx(), unit_ty); let vecsize = Add(bcx, alloc, llsize_of(ccx, ccx.opaque_vec_type)); let base::MallocResult {bcx, box: bx, body} = base::malloc_general_dyn(bcx, vecbodyty, heap, vecsize); Store(bcx, fill, GEPi(bcx, body, [0u, abi::vec_elt_fill])); Store(bcx, alloc, GEPi(bcx, body, [0u, abi::vec_elt_alloc])); base::maybe_set_managed_unique_rc(bcx, bx, heap); return rslt(bcx, bx); } pub fn heap_for_unique_vector(bcx: block, t: ty::t) -> heap { if ty::type_contents(bcx.tcx(), t).contains_managed() { heap_managed_unique } else { heap_exchange_vector } } pub fn alloc_uniq_raw(bcx: block, unit_ty: ty::t, fill: ValueRef, alloc: ValueRef) -> Result { alloc_raw(bcx, unit_ty, fill, alloc, heap_for_unique_vector(bcx, unit_ty)) } pub fn alloc_vec(bcx: block, unit_ty: ty::t, elts: uint, heap: heap) -> Result { let _icx = push_ctxt("tvec::alloc_uniq"); let ccx = bcx.ccx(); let llunitty = type_of::type_of(ccx, unit_ty); let unit_sz = nonzero_llsize_of(ccx, llunitty); let fill = Mul(bcx, C_uint(ccx, elts), unit_sz); let alloc = if elts < 4u { Mul(bcx, C_int(ccx, 4), unit_sz) } else { fill }; let Result {bcx: bcx, val: vptr} = alloc_raw(bcx, unit_ty, fill, alloc, heap); return rslt(bcx, vptr); } pub fn duplicate_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> Result { let _icx = push_ctxt("tvec::duplicate_uniq"); let fill = get_fill(bcx, get_bodyptr(bcx, vptr)); let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); let Result {bcx, val: newptr} = alloc_uniq_raw(bcx, unit_ty, fill, fill); let data_ptr = get_dataptr(bcx, get_bodyptr(bcx, vptr)); let new_data_ptr = get_dataptr(bcx, get_bodyptr(bcx, newptr)); base::call_memcpy(bcx, new_data_ptr, data_ptr, fill, 1); let bcx = if ty::type_needs_drop(bcx.tcx(), unit_ty) { iter_vec_raw(bcx, new_data_ptr, vec_ty, fill, glue::take_ty) } else { bcx }; return rslt(bcx, newptr); } pub fn make_drop_glue_unboxed(bcx: block, vptr: ValueRef, vec_ty: ty::t) -> block { let _icx = push_ctxt("tvec::make_drop_glue_unboxed"); let tcx = bcx.tcx(); let unit_ty = ty::sequence_element_type(tcx, vec_ty); if ty::type_needs_drop(tcx, unit_ty) { iter_vec_unboxed(bcx, vptr, vec_ty, glue::drop_ty) } else { bcx } } pub struct VecTypes { vec_ty: ty::t, unit_ty: ty::t, llunit_ty: Type, llunit_size: ValueRef } impl VecTypes { pub fn to_str(&self, ccx: &CrateContext) -> ~str { fmt!("VecTypes {vec_ty=%s, unit_ty=%s, llunit_ty=%s, llunit_size=%s}", ty_to_str(ccx.tcx, self.vec_ty), ty_to_str(ccx.tcx, self.unit_ty), ccx.tn.type_to_str(self.llunit_ty), ccx.tn.val_to_str(self.llunit_size)) } } pub fn trans_fixed_vstore(bcx: block, vstore_expr: @ast::expr, content_expr: &ast::expr, dest: expr::Dest) -> block { //! // // [...] allocates a fixed-size array and moves it around "by value". // In this case, it means that the caller has already given us a location // to store the array of the suitable size, so all we have to do is // generate the content. debug!("trans_fixed_vstore(vstore_expr=%s, dest=%?)", bcx.expr_to_str(vstore_expr), dest.to_str(bcx.ccx())); let _indenter = indenter(); let vt = vec_types_from_expr(bcx, vstore_expr); return match dest { Ignore => write_content(bcx, &vt, vstore_expr, content_expr, dest), SaveIn(lldest) => { // lldest will have type *[T x N], but we want the type *T, // so use GEP to convert: let lldest = GEPi(bcx, lldest, [0, 0]); write_content(bcx, &vt, vstore_expr, content_expr, SaveIn(lldest)) } }; } pub fn trans_slice_vstore(bcx: block, vstore_expr: @ast::expr, content_expr: @ast::expr, dest: expr::Dest) -> block { //! // // &[...] allocates memory on the stack and writes the values into it, // returning a slice (pair of ptr, len). &"..." is similar except that // the memory can be statically allocated. let ccx = bcx.ccx(); debug!("trans_slice_vstore(vstore_expr=%s, dest=%s)", bcx.expr_to_str(vstore_expr), dest.to_str(ccx)); let _indenter = indenter(); // Handle the &"..." case: match content_expr.node { ast::expr_lit(@codemap::spanned {node: ast::lit_str(s), span: _}) => { return trans_lit_str(bcx, content_expr, s, dest); } _ => {} } // Handle the &[...] case: let vt = vec_types_from_expr(bcx, vstore_expr);<|fim▁hole|> debug!("vt=%s, count=%?", vt.to_str(ccx), count); // Make a fixed-length backing array and allocate it on the stack. let llcount = C_uint(ccx, count); let llfixed = base::arrayalloca(bcx, vt.llunit_ty, llcount); // Arrange for the backing array to be cleaned up. let fixed_ty = ty::mk_evec(bcx.tcx(), ty::mt {ty: vt.unit_ty, mutbl: ast::m_mutbl}, ty::vstore_fixed(count)); let llfixed_ty = type_of::type_of(bcx.ccx(), fixed_ty).ptr_to(); let llfixed_casted = BitCast(bcx, llfixed, llfixed_ty); add_clean(bcx, llfixed_casted, fixed_ty); // Generate the content into the backing array. let bcx = write_content(bcx, &vt, vstore_expr, content_expr, SaveIn(llfixed)); // Finally, create the slice pair itself. match dest { Ignore => {} SaveIn(lldest) => { Store(bcx, llfixed, GEPi(bcx, lldest, [0u, abi::slice_elt_base])); let lllen = Mul(bcx, llcount, vt.llunit_size); Store(bcx, lllen, GEPi(bcx, lldest, [0u, abi::slice_elt_len])); } } return bcx; } pub fn trans_lit_str(bcx: block, lit_expr: @ast::expr, str_lit: @str, dest: Dest) -> block { //! // // Literal strings translate to slices into static memory. This is // different from trans_slice_vstore() above because it does need to copy // the content anywhere. debug!("trans_lit_str(lit_expr=%s, dest=%s)", bcx.expr_to_str(lit_expr), dest.to_str(bcx.ccx())); let _indenter = indenter(); match dest { Ignore => bcx, SaveIn(lldest) => { unsafe { let bytes = str_lit.len() + 1; // count null-terminator too let llbytes = C_uint(bcx.ccx(), bytes); let llcstr = C_cstr(bcx.ccx(), str_lit); let llcstr = llvm::LLVMConstPointerCast(llcstr, Type::i8p().to_ref()); Store(bcx, llcstr, GEPi(bcx, lldest, [0u, abi::slice_elt_base])); Store(bcx, llbytes, GEPi(bcx, lldest, [0u, abi::slice_elt_len])); bcx } } } } pub fn trans_uniq_or_managed_vstore(bcx: block, heap: heap, vstore_expr: @ast::expr, content_expr: &ast::expr) -> DatumBlock { //! // // @[...] or ~[...] (also @"..." or ~"...") allocate boxes in the // appropriate heap and write the array elements into them. debug!("trans_uniq_or_managed_vstore(vstore_expr=%s, heap=%?)", bcx.expr_to_str(vstore_expr), heap); let _indenter = indenter(); // Handle ~"". match heap { heap_exchange_vector => { match content_expr.node { ast::expr_lit(@codemap::spanned { node: ast::lit_str(s), _ }) => { let llptrval = C_cstr(bcx.ccx(), s); let llptrval = PointerCast(bcx, llptrval, Type::i8p()); let llsizeval = C_uint(bcx.ccx(), s.len()); let typ = ty::mk_estr(bcx.tcx(), ty::vstore_uniq); let lldestval = scratch_datum(bcx, typ, "", false); let bcx = callee::trans_lang_call( bcx, bcx.tcx().lang_items.strdup_uniq_fn(), [ llptrval, llsizeval ], Some(expr::SaveIn(lldestval.to_ref_llval(bcx)))).bcx; return DatumBlock { bcx: bcx, datum: lldestval }; } _ => {} } } heap_exchange | heap_exchange_closure => fail!("vectors use vector_exchange_alloc"), heap_managed | heap_managed_unique => {} } let vt = vec_types_from_expr(bcx, vstore_expr); let count = elements_required(bcx, content_expr); let Result {bcx, val} = alloc_vec(bcx, vt.unit_ty, count, heap); add_clean_free(bcx, val, heap); let dataptr = get_dataptr(bcx, get_bodyptr(bcx, val)); debug!("alloc_vec() returned val=%s, dataptr=%s", bcx.val_to_str(val), bcx.val_to_str(dataptr)); let bcx = write_content(bcx, &vt, vstore_expr, content_expr, SaveIn(dataptr)); revoke_clean(bcx, val); return immediate_rvalue_bcx(bcx, val, vt.vec_ty); } pub fn write_content(bcx: block, vt: &VecTypes, vstore_expr: @ast::expr, content_expr: &ast::expr, dest: Dest) -> block { let _icx = push_ctxt("tvec::write_content"); let mut bcx = bcx; debug!("write_content(vt=%s, dest=%s, vstore_expr=%?)", vt.to_str(bcx.ccx()), dest.to_str(bcx.ccx()), bcx.expr_to_str(vstore_expr)); let _indenter = indenter(); match content_expr.node { ast::expr_lit(@codemap::spanned { node: ast::lit_str(s), _ }) => { match dest { Ignore => { return bcx; } SaveIn(lldest) => { let bytes = s.len() + 1; // copy null-terminator too let llbytes = C_uint(bcx.ccx(), bytes); let llcstr = C_cstr(bcx.ccx(), s); base::call_memcpy(bcx, lldest, llcstr, llbytes, 1); return bcx; } } } ast::expr_vec(ref elements, _) => { match dest { Ignore => { for elements.iter().advance |element| { bcx = expr::trans_into(bcx, *element, Ignore); } } SaveIn(lldest) => { let mut temp_cleanups = ~[]; for elements.iter().enumerate().advance |(i, element)| { let lleltptr = GEPi(bcx, lldest, [i]); debug!("writing index %? with lleltptr=%?", i, bcx.val_to_str(lleltptr)); bcx = expr::trans_into(bcx, *element, SaveIn(lleltptr)); add_clean_temp_mem(bcx, lleltptr, vt.unit_ty); temp_cleanups.push(lleltptr); } for temp_cleanups.iter().advance |cleanup| { revoke_clean(bcx, *cleanup); } } } return bcx; } ast::expr_repeat(element, count_expr, _) => { match dest { Ignore => { return expr::trans_into(bcx, element, Ignore); } SaveIn(lldest) => { let count = ty::eval_repeat_count(bcx.tcx(), count_expr); if count == 0 { return bcx; } // Some cleanup would be required in the case in which failure happens // during a copy. But given that copy constructors are not overridable, // this can only happen as a result of OOM. So we just skip out on the // cleanup since things would *probably* be broken at that point anyways. let elem = unpack_datum!(bcx, { expr::trans_to_datum(bcx, element) }); let next_bcx = sub_block(bcx, "expr_repeat: while next"); let loop_bcx = loop_scope_block(bcx, next_bcx, None, "expr_repeat", None); let cond_bcx = scope_block(loop_bcx, None, "expr_repeat: loop cond"); let set_bcx = scope_block(loop_bcx, None, "expr_repeat: body: set"); let inc_bcx = scope_block(loop_bcx, None, "expr_repeat: body: inc"); Br(bcx, loop_bcx.llbb); let loop_counter = { // i = 0 let i = alloca(loop_bcx, bcx.ccx().int_type, "__i"); Store(loop_bcx, C_uint(bcx.ccx(), 0), i); Br(loop_bcx, cond_bcx.llbb); i }; { // i < count let lhs = Load(cond_bcx, loop_counter); let rhs = C_uint(bcx.ccx(), count); let cond_val = ICmp(cond_bcx, lib::llvm::IntULT, lhs, rhs); CondBr(cond_bcx, cond_val, set_bcx.llbb, next_bcx.llbb); } { // v[i] = elem let i = Load(set_bcx, loop_counter); let lleltptr = InBoundsGEP(set_bcx, lldest, [i]); let set_bcx = elem.copy_to(set_bcx, INIT, lleltptr); Br(set_bcx, inc_bcx.llbb); } { // i += 1 let i = Load(inc_bcx, loop_counter); let plusone = Add(inc_bcx, i, C_uint(bcx.ccx(), 1)); Store(inc_bcx, plusone, loop_counter); Br(inc_bcx, cond_bcx.llbb); } return next_bcx; } } } _ => { bcx.tcx().sess.span_bug(content_expr.span, "Unexpected evec content"); } } } pub fn vec_types_from_expr(bcx: block, vec_expr: &ast::expr) -> VecTypes { let vec_ty = node_id_type(bcx, vec_expr.id); vec_types(bcx, vec_ty) } pub fn vec_types(bcx: block, vec_ty: ty::t) -> VecTypes { let ccx = bcx.ccx(); let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); let llunit_ty = type_of::type_of(ccx, unit_ty); let llunit_size = nonzero_llsize_of(ccx, llunit_ty); VecTypes {vec_ty: vec_ty, unit_ty: unit_ty, llunit_ty: llunit_ty, llunit_size: llunit_size} } pub fn elements_required(bcx: block, content_expr: &ast::expr) -> uint { //! Figure out the number of elements we need to store this content match content_expr.node { ast::expr_lit(@codemap::spanned { node: ast::lit_str(s), _ }) => { s.len() + 1 }, ast::expr_vec(ref es, _) => es.len(), ast::expr_repeat(_, count_expr, _) => { ty::eval_repeat_count(bcx.tcx(), count_expr) } _ => bcx.tcx().sess.span_bug(content_expr.span, "Unexpected evec content") } } pub fn get_base_and_len(bcx: block, llval: ValueRef, vec_ty: ty::t) -> (ValueRef, ValueRef) { //! // // Converts a vector into the slice pair. The vector should be stored in // `llval` which should be either immediate or by-ref as appropriate for // the vector type. If you have a datum, you would probably prefer to // call `Datum::get_base_and_len()` which will handle any conversions for // you. let ccx = bcx.ccx(); let vt = vec_types(bcx, vec_ty); let vstore = match ty::get(vt.vec_ty).sty { ty::ty_estr(vst) | ty::ty_evec(_, vst) => vst, _ => ty::vstore_uniq }; match vstore { ty::vstore_fixed(n) => { let base = GEPi(bcx, llval, [0u, 0u]); let n = if ty::type_is_str(vec_ty) { n + 1u } else { n }; let len = Mul(bcx, C_uint(ccx, n), vt.llunit_size); (base, len) } ty::vstore_slice(_) => { let base = Load(bcx, GEPi(bcx, llval, [0u, abi::slice_elt_base])); let len = Load(bcx, GEPi(bcx, llval, [0u, abi::slice_elt_len])); (base, len) } ty::vstore_uniq | ty::vstore_box => { let body = get_bodyptr(bcx, llval); (get_dataptr(bcx, body), get_fill(bcx, body)) } } } pub type iter_vec_block<'self> = &'self fn(block, ValueRef, ty::t) -> block; pub fn iter_vec_raw(bcx: block, data_ptr: ValueRef, vec_ty: ty::t, fill: ValueRef, f: iter_vec_block) -> block { let _icx = push_ctxt("tvec::iter_vec_raw"); let unit_ty = ty::sequence_element_type(bcx.tcx(), vec_ty); // Calculate the last pointer address we want to handle. // FIXME (#3729): Optimize this when the size of the unit type is // statically known to not use pointer casts, which tend to confuse // LLVM. let data_end_ptr = pointer_add(bcx, data_ptr, fill); // Now perform the iteration. let header_bcx = base::sub_block(bcx, "iter_vec_loop_header"); Br(bcx, header_bcx.llbb); let data_ptr = Phi(header_bcx, val_ty(data_ptr), [data_ptr], [bcx.llbb]); let not_yet_at_end = ICmp(header_bcx, lib::llvm::IntULT, data_ptr, data_end_ptr); let body_bcx = base::sub_block(header_bcx, "iter_vec_loop_body"); let next_bcx = base::sub_block(header_bcx, "iter_vec_next"); CondBr(header_bcx, not_yet_at_end, body_bcx.llbb, next_bcx.llbb); let body_bcx = f(body_bcx, data_ptr, unit_ty); AddIncomingToPhi(data_ptr, InBoundsGEP(body_bcx, data_ptr, [C_int(bcx.ccx(), 1)]), body_bcx.llbb); Br(body_bcx, header_bcx.llbb); return next_bcx; } pub fn iter_vec_uniq(bcx: block, vptr: ValueRef, vec_ty: ty::t, fill: ValueRef, f: iter_vec_block) -> block { let _icx = push_ctxt("tvec::iter_vec_uniq"); let data_ptr = get_dataptr(bcx, get_bodyptr(bcx, vptr)); iter_vec_raw(bcx, data_ptr, vec_ty, fill, f) } pub fn iter_vec_unboxed(bcx: block, body_ptr: ValueRef, vec_ty: ty::t, f: iter_vec_block) -> block { let _icx = push_ctxt("tvec::iter_vec_unboxed"); let fill = get_fill(bcx, body_ptr); let dataptr = get_dataptr(bcx, body_ptr); return iter_vec_raw(bcx, dataptr, vec_ty, fill, f); }<|fim▁end|>
let count = elements_required(bcx, content_expr);
<|file_name|>index.js<|end_file_name|><|fim▁begin|>describe("setPathValues", function() {<|fim▁hole|> require("./expired.spec"); require("./branch.spec"); });<|fim▁end|>
require("./primitive.spec"); require("./atom.spec");
<|file_name|>image_copy_test.py<|end_file_name|><|fim▁begin|>import numpy as np from menpo.image import Image, BooleanImage, MaskedImage from menpo.shape import PointCloud from menpo.testing import is_same_array def test_image_copy(): pixels = np.ones([1, 10, 10]) landmarks = PointCloud(np.ones([3, 2]), copy=False)<|fim▁hole|> im.landmarks['test'] = landmarks im_copy = im.copy() assert (not is_same_array(im.pixels, im_copy.pixels)) assert (not is_same_array(im_copy.landmarks['test'].points, im.landmarks['test'].points)) def test_booleanimage_copy(): pixels = np.ones([10, 10], dtype=np.bool) landmarks = PointCloud(np.ones([3, 2]), copy=False) im = BooleanImage(pixels, copy=False) im.landmarks['test'] = landmarks im_copy = im.copy() assert (not is_same_array(im.pixels, im_copy.pixels)) assert (not is_same_array(im_copy.landmarks['test'].points, im.landmarks['test'].points)) def test_maskedimage_copy(): pixels = np.ones([1, 10, 10]) landmarks = PointCloud(np.ones([3, 2]), copy=False) im = MaskedImage(pixels, copy=False) im.landmarks['test'] = landmarks im_copy = im.copy() assert (not is_same_array(im.pixels, im_copy.pixels)) assert (not is_same_array(im_copy.landmarks['test'].points, im.landmarks['test'].points))<|fim▁end|>
im = Image(pixels, copy=False)
<|file_name|>font-size.js<|end_file_name|><|fim▁begin|>window.BOLDGRID = window.BOLDGRID || {}; BOLDGRID.EDITOR = BOLDGRID.EDITOR || {}; BOLDGRID.EDITOR.CONTROLS = BOLDGRID.EDITOR.CONTROLS || {}; BOLDGRID.EDITOR.CONTROLS.GENERIC = BOLDGRID.EDITOR.CONTROLS.GENERIC || {}; ( function( $ ) { 'use strict'; var self, BG = BOLDGRID.EDITOR; BOLDGRID.EDITOR.CONTROLS.GENERIC.Fontsize = { template: wp.template( 'boldgrid-editor-font-size' ), render: function() { var $control = $( this.template() ); BG.Panel.$element .find( '.panel-body .customize' ) .find( '.section.size' ) .remove(); BG.Panel.$element.find( '.panel-body .customize' ).append( $control ); return $control; }, bind: function() { var $el = BG.Menu.getTarget( BG.Panel.currentControl ), elementSize = $el.css( 'font-size' ), defaultSize = elementSize ? parseInt( elementSize ) : 14; defaultSize = 5 <= defaultSize ? defaultSize : 14; BG.Panel.$element.find( '.section.size .value' ).html( defaultSize ); BG.Panel.$element.find( '.section.size .slider' ).slider( { min: 5, max: 115, value: defaultSize, range: 'max',<|fim▁hole|> } ); } }; self = BOLDGRID.EDITOR.CONTROLS.GENERIC.Fontsize; } )( jQuery );<|fim▁end|>
slide: function( event, ui ) { BG.Panel.$element.find( '.section.size .value' ).html( ui.value ); BG.Controls.addStyle( $el, 'font-size', ui.value ); }
<|file_name|>vtk_io.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- __version__ = "who knows" # ''' I/O for VTK <https://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf>. .. moduleauthor:: Nico Schlömer <[email protected]> NOTE: Stolen from https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py NOTE: which is distributed under the MIT license: The MIT License (MIT) Copyright (c) 2015-2018 Nico Schlömer Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import logging import numpy # https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html vtk_to_meshio_type = { 0: 'empty', 1: 'vertex', # 2: 'poly_vertex', 3: 'line', # 4: 'poly_line', 5: 'triangle', # 6: 'triangle_strip', # 7: 'polygon', # 8: 'pixel', 9: 'quad', 10: 'tetra', # 11: 'voxel', 12: 'hexahedron', 13: 'wedge', 14: 'pyramid', 15: 'penta_prism', 16: 'hexa_prism', 21: 'line3', 22: 'triangle6', 23: 'quad8', 24: 'tetra10', 25: 'hexahedron20', 26: 'wedge15', 27: 'pyramid13', 28: 'quad9', 29: 'hexahedron27', 30: 'quad6', 31: 'wedge12', 32: 'wedge18', 33: 'hexahedron24', 34: 'triangle7', 35: 'line4', # # 60: VTK_HIGHER_ORDER_EDGE, # 61: VTK_HIGHER_ORDER_TRIANGLE, # 62: VTK_HIGHER_ORDER_QUAD, # 63: VTK_HIGHER_ORDER_POLYGON, # 64: VTK_HIGHER_ORDER_TETRAHEDRON, # 65: VTK_HIGHER_ORDER_WEDGE, # 66: VTK_HIGHER_ORDER_PYRAMID, # 67: VTK_HIGHER_ORDER_HEXAHEDRON, } meshio_to_vtk_type = {v: k for k, v in vtk_to_meshio_type.items()} # These are all VTK data types. One sometimes finds 'vtktypeint64', but # this is ill-formed. vtk_to_numpy_dtype_name = { 'bit': 'bool', 'unsigned_char': 'uint8', 'char': 'int8', 'unsigned_short': 'uint16', 'short': 'int16', 'unsigned_int': 'uint32', 'int': numpy.dtype('int32'), 'unsigned_long': 'int64', 'long': 'int64', 'float': 'float32', 'double': 'float64', } numpy_to_vtk_dtype = {v: k for k, v in vtk_to_numpy_dtype_name.items()} def read(filename): '''Reads a Gmsh msh file. ''' with open(filename, 'rb') as f: out = read_buffer(f) return out def read_buffer(f): # initialize output data points = None field_data = {} cell_data_raw = {} point_data = {} # skip header and title f.readline() f.readline() data_type = f.readline().decode('utf-8').strip() assert data_type in ['ASCII', 'BINARY'], \ 'Unknown VTK data type \'{}\'.'.format(data_type) is_ascii = data_type == 'ASCII' c = None offsets = None ct = None # One of the problem in reading VTK files are POINT_DATA and CELL_DATA # fields. They can contain a number of SCALARS+LOOKUP_TABLE tables, without # giving and indication of how many there are. Hence, SCALARS must be # treated like a first-class section. To associate it with POINT/CELL_DATA, # we store the `active` section in this variable. active = None while True: line = f.readline().decode('utf-8') if not line: # EOF<|fim▁hole|> if len(line) == 0: continue split = line.split() section = split[0] if section == 'DATASET': dataset_type = split[1] assert dataset_type == 'UNSTRUCTURED_GRID', \ 'Only VTK UNSTRUCTURED_GRID supported.' elif section == 'POINTS': active = 'POINTS' num_points = int(split[1]) data_type = split[2] dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type]) if is_ascii: points = numpy.fromfile( f, count=num_points*3, sep=' ', dtype=dtype ) else: # binary num_bytes = numpy.dtype(dtype).itemsize total_num_bytes = num_points * (3 * num_bytes) # Binary data is big endian, see # <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>. dtype = dtype.newbyteorder('>') points = \ numpy.fromstring(f.read(total_num_bytes), dtype=dtype) line = f.readline().decode('utf-8') assert line == '\n' points = points.reshape((num_points, 3)) elif section == 'CELLS': active = 'CELLS' num_items = int(split[2]) if is_ascii: c = numpy.fromfile(f, count=num_items, sep=' ', dtype=int) else: # binary num_bytes = 4 total_num_bytes = num_items * num_bytes c = numpy.fromstring(f.read(total_num_bytes), dtype='>i4') line = f.readline().decode('utf-8') assert line == '\n' offsets = [] if len(c) > 0: offsets.append(0) while offsets[-1] + c[offsets[-1]] + 1 < len(c): offsets.append(offsets[-1] + c[offsets[-1]] + 1) offsets = numpy.array(offsets) elif section == 'CELL_TYPES': active = 'CELL_TYPES' num_items = int(split[1]) if is_ascii: ct = \ numpy.fromfile(f, count=int(num_items), sep=' ', dtype=int) else: # binary num_bytes = 4 total_num_bytes = num_items * num_bytes ct = numpy.fromstring(f.read(total_num_bytes), dtype='>i4') line = f.readline().decode('utf-8') assert line == '\n' elif section == 'POINT_DATA': active = 'POINT_DATA' num_items = int(split[1]) elif section == 'CELL_DATA': active = 'CELL_DATA' num_items = int(split[1]) elif section == 'SCALARS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_scalar_field(f, num_items, split)) elif section == 'VECTORS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_vector_field(f, num_items, split)) elif section == 'TENSORS': if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal SCALARS in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_tensor_field(f, num_items, split)) else: assert section == 'FIELD', \ 'Unknown section \'{}\'.'.format(section) if active == 'POINT_DATA': d = point_data else: assert active == 'CELL_DATA', \ 'Illegal FIELD in section \'{}\'.'.format(active) d = cell_data_raw d.update(_read_fields(f, int(split[2]), is_ascii)) assert c is not None, \ 'Required section CELLS not found.' assert ct is not None, \ 'Required section CELL_TYPES not found.' cells, cell_data = translate_cells(c, offsets, ct, cell_data_raw) return points, cells, point_data, cell_data, field_data def _read_scalar_field(f, num_data, split): data_name = split[1] data_type = split[2] try: num_comp = int(split[3]) except IndexError: num_comp = 1 # The standard says: # > The parameter numComp must range between (1,4) inclusive; [...] assert 0 < num_comp < 5 dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type]) lt, _ = f.readline().decode('utf-8').split() assert lt == 'LOOKUP_TABLE' data = numpy.fromfile(f, count=num_data, sep=' ', dtype=dtype) return {data_name: data} def _read_vector_field(f, num_data, split): data_name = split[1] data_type = split[2] dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type]) data = numpy.fromfile( f, count=3*num_data, sep=' ', dtype=dtype ).reshape(-1, 3) return {data_name: data} def _read_tensor_field(f, num_data, split): data_name = split[1] data_type = split[2] dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type]) data = numpy.fromfile( f, count=9*num_data, sep=' ', dtype=dtype ).reshape(-1, 3, 3) return {data_name: data} def _read_fields(f, num_fields, is_ascii): data = {} for _ in range(num_fields): name, shape0, shape1, data_type = \ f.readline().decode('utf-8').split() shape0 = int(shape0) shape1 = int(shape1) dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type]) if is_ascii: dat = numpy.fromfile( f, count=shape0 * shape1, sep=' ', dtype=dtype ) else: # binary num_bytes = numpy.dtype(dtype).itemsize total_num_bytes = shape0 * shape1 * num_bytes # Binary data is big endian, see # <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>. dtype = dtype.newbyteorder('>') dat = numpy.fromstring(f.read(total_num_bytes), dtype=dtype) line = f.readline().decode('utf-8') assert line == '\n' if shape0 != 1: dat = dat.reshape((shape1, shape0)) data[name] = dat return data def raw_from_cell_data(cell_data): # merge cell data cell_data_raw = {} for d in cell_data.values(): for name, values in d.items(): if name in cell_data_raw: cell_data_raw[name].append(values) else: cell_data_raw[name] = [values] for name in cell_data_raw: cell_data_raw[name] = numpy.concatenate(cell_data_raw[name]) return cell_data_raw def translate_cells(data, offsets, types, cell_data_raw): # Translate it into the cells dictionary. # `data` is a one-dimensional vector with # (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ... # Collect types into bins. # See <https://stackoverflow.com/q/47310359/353337> for better # alternatives. uniques = numpy.unique(types) bins = {u: numpy.where(types == u)[0] for u in uniques} cells = {} cell_data = {} for tpe, b in bins.items(): meshio_type = vtk_to_meshio_type[tpe] n = data[offsets[b[0]]] assert (data[offsets[b]] == n).all() indices = numpy.array([ numpy.arange(1, n+1) + o for o in offsets[b] ]) cells[meshio_type] = data[indices] cell_data[meshio_type] = \ {key: value[b] for key, value in cell_data_raw.items()} return cells, cell_data def write(filename, points, cells, point_data=None, cell_data=None, field_data=None, write_binary=True): if not write_binary: logging.warning('VTK ASCII files are only meant for debugging.') with open(filename, 'wb') as f: f.write('# vtk DataFile Version 4.2\n'.encode('utf-8')) f.write('written by meshio v{}\n'.format(__version__).encode('utf-8')) f.write(('BINARY\n' if write_binary else 'ASCII\n').encode('utf-8')) f.write('DATASET UNSTRUCTURED_GRID\n'.encode('utf-8')) # write points and cells _write_points(f, points, write_binary) _write_cells(f, cells, write_binary) # write point data if point_data is not None: num_points = len(points) f.write('POINT_DATA {}\n'.format(num_points).encode('utf-8')) _write_field_data(f, point_data, write_binary) # write cell data if cell_data is not None: total_num_cells = sum([len(c) for c in cells.values()]) cell_data_raw = raw_from_cell_data(cell_data) f.write('CELL_DATA {}\n'.format(total_num_cells).encode('utf-8')) _write_field_data(f, cell_data_raw, write_binary) return def _write_points(f, points, write_binary): f.write( 'POINTS {} {}\n'.format( len(points), numpy_to_vtk_dtype[points.dtype.name] ).encode('utf-8')) if write_binary: # Binary data must be big endian, see # <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>. points.astype(points.dtype.newbyteorder('>')).tofile(f, sep='') else: # ascii points.tofile(f, sep=' ') f.write('\n'.encode('utf-8')) return def _write_cells(f, cells, write_binary): total_num_cells = sum([len(c) for c in cells.values()]) total_num_idx = sum([numpy.prod(c.shape) for c in cells.values()]) # For each cell, the number of nodes is stored total_num_idx += total_num_cells f.write( 'CELLS {} {}\n'.format(total_num_cells, total_num_idx) .encode('utf-8')) if write_binary: for key in cells: n = cells[key].shape[1] d = numpy.column_stack([ numpy.full(len(cells[key]), n), cells[key] ]).astype(numpy.dtype('>i4')) f.write(d.tostring()) if write_binary: f.write('\n'.encode('utf-8')) else: # ascii for key in cells: n = cells[key].shape[1] for cell in cells[key]: f.write((' '.join([ '{}'.format(idx) for idx in numpy.concatenate([[n], cell]) ]) + '\n').encode('utf-8')) # write cell types f.write('CELL_TYPES {}\n'.format(total_num_cells).encode('utf-8')) if write_binary: for key in cells: d = numpy.full( len(cells[key]), meshio_to_vtk_type[key] ).astype(numpy.dtype('>i4')) f.write(d.tostring()) f.write('\n'.encode('utf-8')) else: # ascii for key in cells: for _ in range(len(cells[key])): f.write( '{}\n'.format(meshio_to_vtk_type[key]).encode('utf-8') ) return def _write_field_data(f, data, write_binary): f.write(( 'FIELD FieldData {}\n'.format(len(data)) ).encode('utf-8')) for name, values in data.items(): if len(values.shape) == 1: num_tuples = values.shape[0] num_components = 1 else: assert len(values.shape) == 2, \ 'Only one and two-dimensional field data supported.' num_tuples = values.shape[0] num_components = values.shape[1] f.write(('{} {} {} {}\n'.format( name, num_components, num_tuples, numpy_to_vtk_dtype[values.dtype.name] )).encode('utf-8')) if write_binary: values.astype(values.dtype.newbyteorder('>')).tofile(f, sep='') else: # ascii values.tofile(f, sep=' ') # numpy.savetxt(f, points) f.write('\n'.encode('utf-8')) return<|fim▁end|>
break line = line.strip() # pylint: disable=len-as-condition
<|file_name|>operation_printer_test.rs<|end_file_name|><|fim▁begin|>/* * Copyright (c) Facebook, Inc. and its affiliates.<|fim▁hole|> * * @generated SignedSource<<c7c89fe9853f3412ef8e91f793903d25>> */ mod operation_printer; use operation_printer::transform_fixture; use fixture_tests::test_fixture; #[test] fn field_arguments() { let input = include_str!("operation_printer/fixtures/field-arguments.graphql"); let expected = include_str!("operation_printer/fixtures/field-arguments.expected"); test_fixture(transform_fixture, "field-arguments.graphql", "operation_printer/fixtures/field-arguments.expected", input, expected); } #[test] fn multiple_queries_with_same_fragment() { let input = include_str!("operation_printer/fixtures/multiple-queries-with-same-fragment.graphql"); let expected = include_str!("operation_printer/fixtures/multiple-queries-with-same-fragment.expected"); test_fixture(transform_fixture, "multiple-queries-with-same-fragment.graphql", "operation_printer/fixtures/multiple-queries-with-same-fragment.expected", input, expected); } #[test] fn query_variables() { let input = include_str!("operation_printer/fixtures/query-variables.graphql"); let expected = include_str!("operation_printer/fixtures/query-variables.expected"); test_fixture(transform_fixture, "query-variables.graphql", "operation_printer/fixtures/query-variables.expected", input, expected); } #[test] fn query_with_fragment_spreads() { let input = include_str!("operation_printer/fixtures/query-with-fragment-spreads.graphql"); let expected = include_str!("operation_printer/fixtures/query-with-fragment-spreads.expected"); test_fixture(transform_fixture, "query-with-fragment-spreads.graphql", "operation_printer/fixtures/query-with-fragment-spreads.expected", input, expected); } #[test] fn query_with_nested_fragment_srpeads() { let input = include_str!("operation_printer/fixtures/query-with-nested-fragment-srpeads.graphql"); let expected = include_str!("operation_printer/fixtures/query-with-nested-fragment-srpeads.expected"); test_fixture(transform_fixture, "query-with-nested-fragment-srpeads.graphql", "operation_printer/fixtures/query-with-nested-fragment-srpeads.expected", input, expected); }<|fim▁end|>
* * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree.
<|file_name|>cddrevs.C<|end_file_name|><|fim▁begin|>/* cddrevs.C: Reverse Search Procedures for cdd.C written by Komei Fukuda, [email protected] Version 0.77, August 19, 2003 */ /* cdd.C : C-Implementation of the double description method for computing all vertices and extreme rays of the polyhedron P= {x : b - A x >= 0}. Please read COPYING (GNU General Public Licence) and the manual cddman.tex for detail. */ #include <fstream> #include <string> using namespace std; #include "cddtype.h" #include "cddrevs.h" extern "C" { #include "setoper.h" /* set operation library header (Ver. May 14,1995 or later) */ #include "cdddef.h" #include "cdd.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> } /* end of extern "C" */ topeOBJECT::topeOBJECT(const topeOBJECT& tope) { long j; dim=tope.dim; sv = new int[dim]; for (j=1;j<=dim;j++) sv[j-1]=tope.sv[j-1]; } void topeOBJECT::operator=(const topeOBJECT& tope) { long j; delete[] sv; dim = tope.dim; sv = new int[dim]; for (j=1;j<=dim;j++) sv[j-1]=tope.sv[j-1]; } int topeOBJECT::operator[](long j) // return the i-th component of tope { if (j>=1 && j<=dim) return sv[j-1];else return 0; } topeOBJECT operator-(topeOBJECT tope, long j) // reversing the sign of sv[j-1] { topeOBJECT t=tope; if (j>=1 && j<=t.dim) t.sv[j-1]=-t.sv[j-1]; return t; } void topeOBJECT::fwrite(ostream& f) { long j; for (j=1; j<=this->dim; j++) { if (this->sv[j-1] > 0) f << " +"; if (this->sv[j-1] < 0) f << " -"; if (this->sv[j-1] ==0) f << " 0"; } } int operator==(const topeOBJECT &t1, const topeOBJECT &t2) { long j=1; int equal=1; if (t1.dim != t2.dim) return 0; else { while (equal && j<=t1.dim) { if (t1.sv[j-1] != t2.sv[j-1]) equal=0; j++; } } return equal; } int operator!=(const topeOBJECT &t1, const topeOBJECT &t2) { if (t1==t2) return 0; else return 1; } topeOBJECT f(topeOBJECT v) { long i=1, nexti=0; while (nexti==0 && i<=v.dim) { if (v[i]<0 && Facet_Q(v,i)) nexti=i; i++; } if (nexti==0) return v;<|fim▁hole|> else return v-nexti; } long NeighbourIndex(topeOBJECT u, topeOBJECT v) { long i=1,nindex=0; while ( nindex==0 && i <= v.dim) { if (u==v-i) nindex = i; i++; } return nindex; } topeOBJECT Adj(topeOBJECT v, long i) { if (i<=0 || i>v.dim || v[i] <= 0) return v; else if (Facet_Q(v, i)) return v-i; else return v; } void ReverseSearch(ostream &wf, topeOBJECT s, long delta) { topeOBJECT v=s,u=s,w=s; long j=0,count=1,fcount=0; boolean completed=False; cout << "\nReverse search starts with #1 object: "; s.fwrite(cout); if (CondensedListOn) { wf << "begin\n" << " ***** " << delta << " tope_condensed\n"; }else{ wf << "begin\n" << " ***** " << delta << " tope\n"; } s.fwrite(wf); while (!completed) { while (j<delta) { j=j+1; w=Adj(v,j); if (w!=v && f(w)==v) { count++; if (CondensedListOn){ cout << "\n #" << count << " r " << j << " f " << fcount; wf << "\n r " << j << " f " << fcount; } else { cout << "\nA new object #" << count << " found: "; w.fwrite(cout); wf << "\n"; w.fwrite(wf); } v=w; j=0; fcount=0; } } if (!(v==s)) { u=v; v=f(v); j=NeighbourIndex(u,v); fcount++; if (j==delta) completed=True; } } wf << "\nend"; cout << "\nNumber ***** of topes = " << count << "\n"; wf << "\nNumber ***** of topes = " << count << "\n"; } // Facet recognition programs using Linear Programming boolean Facet_Q(topeOBJECT tope, rowrange ii) { static colindex nbindex; /* NBIndex[s] stores the nonbasic variable in column s */ static Arow LPdsol; /* LP solution and the dual solution (basic var only) */ static colrange nlast=0; if (nlast!=nn){ if (nlast>0){ delete[] LPdsol; } LPdsol = new myTYPE[nn]; nlast=nn; } return Facet_Q2(tope,ii,nbindex,LPdsol); } boolean Facet_Q2(topeOBJECT tope, rowrange ii, colindex NBIndex, Arow LPdsol) { /* Before calling this, LPdsol must be initialized with LPdsol = new myTYPE[nn]. When ii is detected to be non-facet, NBIndex returns the nonbasic variables at the evidence solution. LPdsol returns the evidence dual solution. */ static Arow LPsol; /* LP solution and the dual solution (basic var only) */ rowrange re; /* evidence row when LP is inconsistent */ colrange se; /* evidence col when LP is dual-inconsistent */ colrange s=0; myTYPE ov=0, tempRHS=0, purezero=0, pureone=1; /* ov = LP optimum value */ long LPiter,testi, i, j; boolean answer=True,localdebug=False; static colrange nlast=0; static Bmatrix BInv; static boolean firstcall=True, UsePrevBasis; static ConversionType ConversionSave; if (nlast!=nn){ if (nlast>0){ delete[] LPsol; free_Bmatrix(BInv); } InitializeBmatrix(BInv); LPsol = new myTYPE[nn]; nlast=nn; firstcall=True; } // if (firstcall || Conversion==TopeListing) // UsePrevBasis=False; else UsePrevBasis=True; UsePrevBasis=False; ConversionSave=Conversion; Conversion=LPmax; RHScol=1; mm=mm+1; OBJrow=mm; AA[OBJrow-1]=new myTYPE[nn]; if (localdebug) cout << "Facet_Q: create an exra row " << OBJrow << "\n"; for (i=1; i<=mm; i++) { if (tope[i]<0) { if (debug) cout << "reversing the signs of " << i << "th inequality\n"; for (s=0,j=1; j<=nn; j++){ AA[i-1][j-1]=-AA[i-1][j-1]; } } } tempRHS=AA[ii-1][0]; for (s=0,j=1; j<=nn; j++){ AA[OBJrow-1][j-1]=-AA[ii-1][j-1]; if (!firstcall && NBIndex[j]==ii){ s=j; if (localdebug) cout << "Row "<< ii << " is nonbasic" << s << "\n"; } } AA[OBJrow-1][0]=purezero; AA[ii-1][0]=tempRHS+pureone; /* relax the ii-th inequality by a large number*/ if (s>0) GausianColumnPivot2(AA,BInv, ii, s); DualSimplexMaximize(cout, cout, AA, BInv, OBJrow, RHScol, UsePrevBasis, &LPStatus, &ov, LPsol, LPdsol,NBIndex, &re, &se, &LPiter); if (LPStatus!=Optimal){ if (DynamicWriteOn) cout << "The Dual Simplex failed. Run the Criss-Cross method.\n"; CrissCrossMaximize(cout, cout, AA, BInv, OBJrow, RHScol, UsePrevBasis, &LPStatus, &ov, LPsol, LPdsol,NBIndex, &re, &se, &LPiter); } if (localdebug) cout << ii << "-th LP solved with objective value =" << ov << " RHS value = " << tempRHS << " iter= " << LPiter << "\n"; if ((ov - tempRHS) > zero) { answer=True; if (localdebug) cout << ii << "-th inequality determines a facet.\n"; } else { answer=False; if (localdebug) cout << ii << "-th inequality does not determine a facet.\n"; } AA[ii-1][0]=tempRHS; /* restore the original RHS */ for (s=0,j=1; j<=nn; j++){ if (NBIndex[j]==ii){ s=j; } } if (s>0){ if (localdebug) cout << "Row "<< ii << " is nonbasic: basisinv updated " << s << "\n"; GausianColumnPivot2(AA,BInv, ii, s); } delete[] AA[OBJrow-1]; if (localdebug) cout << "Facet_Q: delete the exra row " << OBJrow << "\n"; mm=mm-1; for (i=1; i<=mm; i++) { if (tope[i]<0) { for (j=1; j<=nn; j++) AA[i-1][j-1]=-AA[i-1][j-1]; /* restore the original data */ } } firstcall=False; Conversion=ConversionSave; return answer; } void FacetandVertexListMain(ostream &f, ostream &f_log) { rowrange i; colrange j; rowset subrows; /* rows which define a facet inequality */ colset allcols; /* allcols: all column indices */ rowindex rowequiv; rowrange classno; topeOBJECT Tope(mm); Arow LPdsol,center; static colindex NBIndex; LPdsol = new myTYPE[nn]; center = new myTYPE[nn]; WriteProgramDescription(f); (f) << "*Input File:" << inputfile << "(" << minput << "x" << ninput << ")\n"; WriteRunningMode(f); WriteRunningMode(f_log); if (Conversion==VertexListing){ ShiftPointsAroundOrigin(f,f_log, center); /* Shifting the points so that the origin will be in the relative interior of their convex hull */ } (f) << "* `e` means essential and `r` means redundant.\n"; if (DynamicWriteOn) cout << "* `e` means essential and `r` means redundant.\n"; rowequiv = new long[mm+1]; // FindRowEquivalenceClasses(&classno, rowequiv); // if (classno<mm) { // cout << "*There are multiple equivalent rows!!!\n"; // cout << "*You have to remove duplicates before listing facets. \n"; // (f) << "*There are multiple equivalent rows!!!\n"; // (f) << "*You have to remove duplicates before listing facets. \n"; // WriteRowEquivalence(cout, classno, rowequiv); // WriteRowEquivalence(f, classno, rowequiv); // goto _L99; // } time(&starttime); set_initialize(&subrows,mm); set_initialize(&allcols,nn); for (j=1;j<=nn;j++) set_addelem(allcols,j); if (Inequality==ZeroRHS){ printf("Sorry, facet/vertex listing is not implemented for RHS==0.\n"); goto _L99; } (f) << "begin\n"; if (DynamicWriteOn) (cout) <<"begin\n"; for (i=1; i<=mm; i++){ if (Facet_Q2(Tope, i, NBIndex, LPdsol)) { if (DynamicWriteOn) cout << i << " e:"; (f) << i << " e:"; set_addelem(subrows,i); } else { (f) << i << " r:"; if (DynamicWriteOn) (cout) << i << " r:"; } for (j=1; j<nn; j++){ (f) << " " << NBIndex[j+1]; if (LogWriteOn){ (f) <<"("; WriteNumber(f,LPdsol[j]); (f) << ")"; } } (f) << "\n"; if (DynamicWriteOn){ for (j=1; j<nn; j++){ (cout) << " " << NBIndex[j+1]; if (LogWriteOn){ (cout) <<"("; WriteNumber(cout,LPdsol[j]); (cout) << ")"; } } (cout) << "\n"; } } (f) << "end\n"; if (DynamicWriteOn) (cout) <<"end\n"; time(&endtime); // (f) << "* Here is a minimal system representing the same polyhedral set as the input.\n"; // WriteSubMatrixOfAA(f,subrows,allcols,Inequality); WriteTimes(f); WriteTimes(f_log); WriteTimes(cout); // set_free(&subrows); // set_free(&allcols); _L99:; // delete[] rowequiv; // delete[] LPdsol; // delete[] center; } void FacetandVertexExternalListMain(ostream &f, ostream &f_log) { rowrange i,mmxtn; colrange j,nnxtn; rowset subrows; /* rows which define a facet inequality */ colset allcols; /* allcols: all column indices */ rowindex rowequiv; rowrange classno; topeOBJECT Tope(mm); Arow LPdsol,center; static colindex NBIndex; string xtnnumbertype,command; myRational rvalue=0; myTYPE value=0; boolean found,localdebug=False; char ch; SetReadFileName(xtnfile,'x',"external"); ifstream reading_xtn(xtnfile); if (reading_xtn.is_open()) { found=False; while (!found) { if (!reading_xtn.eof()) { reading_xtn >> command; if (command=="begin") { found=True; } } else { Error=ImproperInputFormat; goto _L99; } } reading_xtn >> mmxtn; reading_xtn >> nnxtn; reading_xtn >> xtnnumbertype; LPdsol = new myTYPE[nn]; center = new myTYPE[nn]; WriteProgramDescription(f); (f) << "*Essential File:" << inputfile << "(" << minput << "x" << ninput << ")\n"; (f) << "*Input File:" << xtnfile << "(" << mmxtn << "x" << nnxtn << ")\n"; WriteRunningMode(f); WriteRunningMode(f_log); if (Conversion==VertexListingExternal){ ShiftPointsAroundOrigin(f,f_log, center); /* Shifting the points so that the origin will be in the relative interior of their convex hull */ } (f) << "* `e` means essential and `r` means redundant.\n"; /* Extrarow to store each line from the external file */ mm = minput + 1; AA[mm-1]= new myTYPE[ninput]; time(&starttime); set_initialize(&subrows,mm); set_initialize(&allcols,nn); for (j=1;j<=nn;j++) set_addelem(allcols,j); if (Inequality==ZeroRHS){ printf("Sorry, facet/vertex listing is not implemented for RHS==0.\n"); goto _L99; } (f) << "begin\n"; if (DynamicWriteOn) (cout) <<"begin\n"; for (i=1; i<=mmxtn; i++){ for (j = 1; j <= nn; j++) { if (xtnnumbertype=="rational" && OutputNumberString=="real"){ reading_xtn >> rvalue; value=myTYPE(rvalue); } else { reading_xtn >> value; } AA[mm-1][j - 1] = value; if (localdebug){ if (xtnnumbertype=="rational" && OutputNumberString=="real") cout << "a(" << i << "," << j << ") = " << value << " ("<< rvalue << ")\n"; else cout << "a(" << i << "," << j << ") = " << value << "\n"; } } /*of j*/ while (reading_xtn.get(ch) && ch != '\n') { if (localdebug) cout << ch; } if (Conversion==VertexListingExternal){ /* Each point must be shifted w.r.t the relative interior of their convex hull */ for (j=2; j<=nn; j++){AA[mm-1][j-1]-=center[j-1];} if (localdebug){ for (j=1; j<=nn; j++){cout << " " << AA[mm-1][j-1];} cout << " " << i << "th point Shifted.\n"; } } if (Facet_Q2(Tope, mm, NBIndex, LPdsol)) { if (DynamicWriteOn) cout << i << " e:"; (f) << i << " e:"; set_addelem(subrows,i); } else { (f) << i << " r:"; if (DynamicWriteOn) (cout) << i << " r:"; } long poscomp_count=0,rindex=0; for (j=1; j<nn; j++){ (f) << " " << NBIndex[j+1]; if (LPdsol[j]>zero) {poscomp_count++; rindex=NBIndex[j+1];}; if (LogWriteOn){ (f) <<"("; WriteNumber(f,LPdsol[j]); (f) << ")"; } } if (poscomp_count==1 && RowEquivalent_Q(AA[mm-1],AA[rindex-1], nn)) { (f) << " =#" << rindex; } else {poscomp_count=0;} (f) << "\n"; if (DynamicWriteOn){ for (j=1; j<nn; j++){ (cout) << " " << NBIndex[j+1]; if (LogWriteOn){ (cout) <<"("; WriteNumber(cout,LPdsol[j]); (cout) << ")"; } } if (poscomp_count==1) cout << " =#" << rindex; (cout) << "\n"; } } (f) << "end\n"; if (DynamicWriteOn) (cout) <<"end\n"; time(&endtime); WriteTimes(f); WriteTimes(f_log); WriteTimes(cout); _L99:; reading_xtn.close(); } else { Error=FileNotFound; WriteErrorMessages(cout); WriteErrorMessages(f_log); } } void TopeListMain(ostream &f, ostream &f_log) { rowrange i; colrange j; rowset subrows; /* rows which define a facet inequality */ colset allcols; /* allcols: all column indices */ rowindex rowequiv; rowrange classno; topeOBJECT Tope(mm); WriteProgramDescription(f); (f) << "*Input File:" << inputfile << "(" << minput << "x" << ninput << ")\n"; WriteRunningMode(f); WriteRunningMode(f_log); rowequiv = new long[mm+1]; FindRowEquivalenceClasses(&classno, rowequiv); if (classno<mm) { cout << "*There are multiple equivalent rows!!!\n"; cout << "*You have to remove duplicates before listing topes. \n"; (f) << "*There are multiple equivalent rows!!!\n"; (f) << "*You have to remove duplicates before listing topes. \n"; WriteRowEquivalence(cout, classno, rowequiv); WriteRowEquivalence(f, classno, rowequiv); goto _L99; } time(&starttime); set_initialize(&subrows,mm); set_initialize(&allcols,nn); for (j=1;j<=nn;j++) set_addelem(allcols,j); if (Inequality==ZeroRHS){ printf("Sorry, tope listing is not implemented for RHS==0.\n"); } else { ReverseSearch(f, Tope,mm); } time(&endtime); WriteTimes(f); WriteTimes(f_log); WriteTimes(cout); set_free(&subrows); set_free(&allcols); _L99:; delete[] rowequiv; } // end of cddrevs.C<|fim▁end|>
<|file_name|>build.js<|end_file_name|><|fim▁begin|>'use strict'; function _toConsumableArray(arr) { if (Array.isArray(arr)) { for (var i = 0, arr2 = Array(arr.length); i < arr.length; i++) { arr2[i] = arr[i]; } return arr2; } else { return Array.from(arr); } } (function () { function pushComponent(array, component) { if (!array) { array = []; } array.push({ name: component.getAttribute('name'), source: '' + component.textContent }); return array; } function getComponents() { var components = document.getElementsByClassName('component-template'); var componentTree = {}; [].concat(_toConsumableArray(components)).forEach(function (component) { var componentType = component.getAttribute('data-type'); componentTree[componentType] = pushComponent(componentTree[componentType], component); }); return componentTree; } function createFragment() { return document.createDocumentFragment(); } function listElementComponent() {} function addToElement(source, destination) { destination.innerHTML = source; } /* rewrite this */ function buildComponentList(tree) { var fragment = createFragment(); var components = getComponents(); var keys = Object.keys(components); var list = document.createElement('ul'); keys.forEach(function (key) { var listItem = document.createElement('li'); var subList = document.createElement('ul'); var title = document.createElement('p'); title.textContent = key; listItem.appendChild(title); components[key].forEach(function (component) { var item = document.createElement('li'); item.textContent = component.name; subList.appendChild(item); }); listItem.appendChild(subList); list.appendChild(listItem);<|fim▁hole|> /* end */ // temp stuff for demo buildComponentList(getComponents()); document.getElementById('addBanner').addEventListener('click', function () { addToElement(getComponents().information[0].source, document.querySelector('.container')); }); })();<|fim▁end|>
}); fragment.appendChild(list); document.getElementById('container').appendChild(fragment); }
<|file_name|>reader.py<|end_file_name|><|fim▁begin|>import feedparser import logging from rss import sources from util import date, dict_tool, tags log = logging.getLogger('app') def parse_feed_by_name(name): feed_params = sources.get_source(name) if not feed_params: raise ValueError('There is no feed with name %s' % name) source_name = feed_params['name'] feed = feedparser.parse(feed_params['url']) data = [] for entry in feed['entries']: data.append( create_doc( source_name, feed, entry, feed_params.get('tags', ()), feed_params.get('author_name'), feed_params.get('author_link'), feed_params.get('dressing_params'), ) ) log.info('%s: got %d documents', source_name, len(data)) return data def create_doc(source_name, feed, entry, additional_tags, default_author_name, default_author_link, dressing_params): link = dict_tool.get_alternative(entry, 'feedburner_origlink', 'link', assert_val=True) published = date.utc_format( dict_tool.get_alternative(entry, 'published', 'updated', assert_val=True) ) description = dict_tool.get_alternative(entry, 'summary', 'description', 'title', assert_val=True) picture = dict_tool.get_deep(entry, 'gd_image', 'src') text = dict_tool.get_deep(entry, 'content', 0, 'value')<|fim▁hole|> default_author_name ) author_link = handle_default_param( entry, dict_tool.get_deep(entry, 'authors', 0, 'href'), default_author_link ) entry_tags = [] for tag in entry.get('tags', []): tag_text = dict_tool.get_alternative(tag, 'term', 'label') if tag_text: entry_tags.append(tag_text.lower()) additional_tags += tuple(entry_tags) comments_count = entry.get('slash_comments') if comments_count is not None: comments_count = int(comments_count) return { 'link': link, 'title': entry['title'], 'published': published, 'picture': picture, 'author_name': author_name, 'author_link': author_link, 'description': description, 'text': text, 'source_name': source_name, 'source_type': 'rss', 'source_title': feed['feed']['title'], 'source_link': feed['feed']['link'], 'comments_count': comments_count, 'tags': tags.create_tags_list(*additional_tags), '__dressing_params': dressing_params, } def handle_default_param(entry, val, default_val): if callable(default_val): return default_val(entry, val) return val or default_val<|fim▁end|>
author_name = handle_default_param( entry, dict_tool.get_deep(entry, 'authors', 0, 'name'),
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>"""The auto-tuning module of tvm This module includes: * Tuning space definition API * Efficient auto-tuners * Tuning result and database support * Distributed measurement to scale up tuning """ from . import database<|fim▁hole|>from . import tuner from . import util from . import env from . import tophub # some shortcuts from .measure import measure_option, MeasureInput, MeasureResult, MeasureErrorNo, \ LocalBuilder, LocalRunner, RPCRunner from .tuner import callback from .task import template, get_config, create, ConfigSpace, ConfigEntity, \ register_topi_compute, register_topi_schedule, \ DispatchContext, FallbackContext, ApplyHistoryBest as apply_history_best from .env import GLOBAL_SCOPE<|fim▁end|>
from . import feature from . import measure from . import record from . import task
<|file_name|>ProductsListWindow.js<|end_file_name|><|fim▁begin|>var // get util library util = require("core/Util"), // get product manager ProductManager = require("core/ProductManager"); function ProductListWindow() { var Window = Ti.UI.createWindow({ title : L("products"), navBarHidden : false, barColor : Theme.TabGroup.BarColor, navTintColor : Theme.TabGroup.NavTintColor, backgroundColor : Theme.Windows.BackgroundColor }), Table = Ti.UI.createTableView({ width : Ti.UI.FILL, height : Ti.UI.FILL, backgroundColor : "#FFFFFF" }); // get product manager var products = require("core/ProductManager"), productEvents = products.events; // assemble UI Window.add(Table); /* * Product row factory method * * @param {String} name: the product name to display * @param {String} image: the icon image to display * @param {String} desc: description of item to display in row * @param {String} itemId: item id used to load product page */ function createRow(name, image, desc, itemId){ var row = Ti.UI.createTableViewRow({ className : "product_rows", backgroundColor : Theme.ProductsList.RowsBackgroundColor, selectedBackgroundColor : Theme.ProductsList.SelectedBackgroundColor, hasChild : true }), img = Ti.UI.createImageView({ image : image, left : 1, top : 1, borderWidth : 3, borderColor : Theme.ProductsList.ImageBorderColor, defaultImage : Config.PRODUCTS_DEFAULT_THUMB_IMAGE }), bodyView = Ti.UI.createView({ layout : "vertical" }), title = Ti.UI.createLabel({ text : name, minimumFontSize : 12, color : Theme.ProductsList.TitleColor, height : Ti.UI.SIZE, left : 2, top : 4, font : { fontSize : Theme.ProductsList.TitleFontSize, fontWeight : Theme.ProductsList.TitleFontWeight } }), body = Ti.UI.createLabel({ text : desc, height : Ti.UI.SIZE, left : 2, top : 2, color : Theme.ProductsList.DescriptionColor, font : { fontSize : Theme.ProductsList.DescriptionFontSize, fontWeight : Theme.ProductsList.DescriptionFontWeight } }); // assemble row bodyView.add(title); bodyView.add(body); row.add(img); if(util.osname==="android"){ img.width = Theme.ProductsList.ImageWidth + "dip"; img.height = Theme.ProductsList.ImageHeight + "dip"; bodyView.left = (Theme.ProductsList.ImageWidth + 1) + "dip"; bodyView.right = "3dip"; bodyView.top = 0; bodyView.bottom = 0; body.height = Ti.UI.SIZE; } else{ img.width = 81; bodyView.left = 82; bodyView.height = Ti.UI.SIZE; } row.add(bodyView); // handle featured item click event row.addEventListener( "click", function(e){ Ti.App.fireEvent( "APP:SHOW_PRODUCT", { "itemId" : itemId, "tab" : "Products" } ); } ); return row; } /* * Product group factory method * * @param {String} name: the name of the group/section * @param {Array} products: array of products for this group/section */ function createProductGroup(name, products){ var productGroupHeader = Ti.UI.createLabel({ text : " "+name, //add space for padding (border not working for left padding) textAlign : Ti.UI.TEXT_ALIGNMENT_LEFT, color : Theme.ProductsList.HeaderColor, backgroundColor : Theme.ProductsList.HeaderBackgroundColor, borderWidth : 2, borderColor : Theme.ProductsList.HeaderBackgroundColor, font : { fontSize : Theme.ProductsList.HeaderFontSize, fontWeight : Theme.ProductsList.HeaderFontWeight } }), productGroup = Ti.UI.createTableViewSection({ headerView : productGroupHeader }); for(var i=0,l=products.length;i<l;i++){ productGroup.add( createRow( products[i].name, products[i].imgs.thumb, products[i].desc.short, products[i].id ) ); } return productGroup; } /* * Assemble product groups for table view * * @param {Object} groups: the groups object containing product arrays for each group/section */ function displayProducts(){<|fim▁hole|> var data = [], groups = require("core/ProductManager").getProductGroup("__ALL__"); for(var key in groups){ data.push( createProductGroup( key, groups[key] ) ); } Table.setData(data); } Window.addEventListener( "focus", displayProducts ); return Window; }; module.exports = ProductListWindow;<|fim▁end|>
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Invalidation of element styles due to attribute or style changes. <|fim▁hole|>pub mod invalidator; pub mod restyle_hints;<|fim▁end|>
pub mod collector; pub mod element_wrapper; pub mod invalidation_map;
<|file_name|>events.go<|end_file_name|><|fim▁begin|>/* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package unversioned <|fim▁hole|> "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/watch" ) // EventNamespacer can return an EventInterface for the given namespace. type EventNamespacer interface { Events(namespace string) EventInterface } // EventInterface has methods to work with Event resources type EventInterface interface { Create(event *api.Event) (*api.Event, error) Update(event *api.Event) (*api.Event, error) Patch(event *api.Event, data []byte) (*api.Event, error) List(label labels.Selector, field fields.Selector) (*api.EventList, error) Get(name string) (*api.Event, error) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) // Search finds events about the specified object Search(objOrRef runtime.Object) (*api.EventList, error) Delete(name string) error // Returns the appropriate field selector based on the API version being used to communicate with the server. // The returned field selector can be used with List and Watch to filter desired events. GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector } // events implements Events interface type events struct { client *Client namespace string } // newEvents returns a new events object. func newEvents(c *Client, ns string) *events { return &events{ client: c, namespace: ns, } } // Create makes a new event. Returns the copy of the event the server returns, // or an error. The namespace to create the event within is deduced from the // event; it must either match this event client's namespace, or this event // client must have been created with the "" namespace. func (e *events) Create(event *api.Event) (*api.Event, error) { if e.namespace != "" && event.Namespace != e.namespace { return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.namespace) } result := &api.Event{} err := e.client.Post(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Body(event). Do(). Into(result) return result, err } // Update modifies an existing event. It returns the copy of the event that the server returns, // or an error. The namespace and key to update the event within is deduced from the event. The // namespace must either match this event client's namespace, or this event client must have been // created with the "" namespace. Update also requires the ResourceVersion to be set in the event // object. func (e *events) Update(event *api.Event) (*api.Event, error) { if len(event.ResourceVersion) == 0 { return nil, fmt.Errorf("invalid event update object, missing resource version: %#v", event) } result := &api.Event{} err := e.client.Put(). NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). Resource("events"). Name(event.Name). Body(event). Do(). Into(result) return result, err } // Patch modifies an existing event. It returns the copy of the event that the server returns, or an // error. The namespace and name of the target event is deduced from the incompleteEvent. The // namespace must either match this event client's namespace, or this event client must have been // created with the "" namespace. func (e *events) Patch(incompleteEvent *api.Event, data []byte) (*api.Event, error) { result := &api.Event{} err := e.client.Patch(api.StrategicMergePatchType). NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). Resource("events"). Name(incompleteEvent.Name). Body(data). Do(). Into(result) return result, err } // List returns a list of events matching the selectors. func (e *events) List(label labels.Selector, field fields.Selector) (*api.EventList, error) { result := &api.EventList{} err := e.client.Get(). NamespaceIfScoped(e.namespace, len(e.namespace) > 0). Resource("events"). LabelsSelectorParam(label). FieldsSelectorParam(field). Do(). Into(result) return result, err } // Get returns the given event, or an error. func (e *events) Get(name string) (*api.Event, error) { result := &api.Event{} err := e.client.Get(). NamespaceIfScoped(e.namespace, len(e.namespace) > 0). Resource("events"). Name(name). Do(). Into(result) return result, err } // Watch starts watching for events matching the given selectors. func (e *events) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { return e.client.Get(). Prefix("watch"). NamespaceIfScoped(e.namespace, len(e.namespace) > 0). Resource("events"). Param("resourceVersion", resourceVersion). LabelsSelectorParam(label). FieldsSelectorParam(field). Watch() } // Search finds events about the specified object. The namespace of the // object must match this event's client namespace unless the event client // was made with the "" namespace. func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) { ref, err := api.GetReference(objOrRef) if err != nil { return nil, err } if e.namespace != "" && ref.Namespace != e.namespace { return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.namespace) } stringRefKind := string(ref.Kind) var refKind *string if stringRefKind != "" { refKind = &stringRefKind } stringRefUID := string(ref.UID) var refUID *string if stringRefUID != "" { refUID = &stringRefUID } fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) return e.List(labels.Everything(), fieldSelector) } // Delete deletes an existing event. func (e *events) Delete(name string) error { return e.client.Delete(). NamespaceIfScoped(e.namespace, len(e.namespace) > 0). Resource("events"). Name(name). Do(). Error() } // Returns the appropriate field selector based on the API version being used to communicate with the server. // The returned field selector can be used with List and Watch to filter desired events. func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { apiVersion := e.client.APIVersion() field := fields.Set{} if involvedObjectName != nil { field[getInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName } if involvedObjectNamespace != nil { field["involvedObject.namespace"] = *involvedObjectNamespace } if involvedObjectKind != nil { field["involvedObject.kind"] = *involvedObjectKind } if involvedObjectUID != nil { field["involvedObject.uid"] = *involvedObjectUID } return field.AsSelector() } // Returns the appropriate field label to use for name of the involved object as per the given API version. func getInvolvedObjectNameFieldLabel(version string) string { return "involvedObject.name" }<|fim▁end|>
import ( "fmt"
<|file_name|>dedent-tests.ts<|end_file_name|><|fim▁begin|>import * as dedent from 'dedent'; const lines: string = dedent` first second<|fim▁hole|> third `; const text: string = dedent(` A test argument. `);<|fim▁end|>
<|file_name|>合并字典方式二.py<|end_file_name|><|fim▁begin|>#coding=utf-8 #介紹:合併字典方式二 import pickle <|fim▁hole|>hashdataS = {} hashdataP = {} def updata(self,hashdic): dic = open(self, 'rb') newdata = cPickle.load(dic) hashdic.update(newdata) def main(dict1,dict2,hashname,new): try: updata(dict1,hashname) #载入 print 'befor: %s' % len(hashname) updata(dict2,hashname) #覆盖 print len(hashname) with open(new, 'wb') as output: cPickle.dump(hashname, output) output.close() except IOError: pass os.rename('DictS.K','DictS.old') os.rename('DictP.K','DictP.old') main('DictS.old','oneDictS.kmy',hashdataS,'DictS.K') main('DictP.old','oneDictP.kmy',hashdataP,'DictP.K')<|fim▁end|>
import os
<|file_name|>q2.py<|end_file_name|><|fim▁begin|>test = { 'name': 'Question 2', 'points': 2, 'suites': [ { 'type': 'sqlite', 'setup': r""" sqlite> .open hw1.db """, 'cases': [ { 'code': r""" sqlite> select * from colors; red|primary blue|primary green|secondary yellow|primary """, },<|fim▁hole|> sqlite> select color from colors; red blue green yellow """, }, ], } ] }<|fim▁end|>
{ 'code': r"""
<|file_name|>version.py<|end_file_name|><|fim▁begin|>################################################################### # Numexpr - Fast numerical array expression evaluator for NumPy. # # License: MIT # Author: See AUTHORS.txt # # See LICENSE.txt and LICENSES/*.txt for details about copyright and # rights to use. #################################################################### version = '2.4.1' release = False if not release: version += '.dev' import os svn_version_file = os.path.join(os.path.dirname(__file__), '__svn_version__.py') if os.path.isfile(svn_version_file):<|fim▁hole|> import imp svn = imp.load_module('numexpr.__svn_version__', open(svn_version_file), svn_version_file, ('.py', 'U', 1)) version += svn.version<|fim▁end|>
<|file_name|>transaction.rs<|end_file_name|><|fim▁begin|>// Copyright 2015, Igor Shaula // Licensed under the MIT License <LICENSE or // http://opensource.org/licenses/MIT>. This file // may not be copied, modified, or distributed // except according to those terms. //! Structure for a registry transaction. //! Part of `transactions` feature. //! //!```no_run //!extern crate winreg; //!use std::io; //!use winreg::RegKey; //!use winreg::enums::*; //!use winreg::transaction::Transaction; //! //!fn main() { //! let t = Transaction::new().unwrap(); //! let hkcu = RegKey::predef(HKEY_CURRENT_USER); //! let (key, _disp) = hkcu.create_subkey_transacted("Software\\RustTransaction", &t).unwrap(); //! key.set_value("TestQWORD", &1234567891011121314u64).unwrap(); //! key.set_value("TestDWORD", &1234567890u32).unwrap(); //! //! println!("Commit transaction? [y/N]:"); //! let mut input = String::new(); //! io::stdin().read_line(&mut input).unwrap(); //! input = input.trim_right().to_owned(); //! if input == "y" || input == "Y" { //! t.commit().unwrap(); //! println!("Transaction committed."); //! } //! else { //! // this is optional, if transaction wasn't committed, //! // it will be rolled back on disposal //! t.rollback().unwrap(); //! //! println!("Transaction wasn't committed, it will be rolled back."); //! } //!} //!``` #![cfg(feature = "transactions")] use std::io; use std::ptr; use winapi::um::handleapi; use winapi::um::ktmw32; use winapi::um::winnt; #[derive(Debug)] pub struct Transaction { pub handle: winnt::HANDLE, } impl Transaction { //TODO: add arguments pub fn new() -> io::Result<Transaction> { unsafe { let handle = ktmw32::CreateTransaction( ptr::null_mut(), ptr::null_mut(), 0, 0, 0, 0, ptr::null_mut(),<|fim▁hole|> }; Ok(Transaction { handle }) } } pub fn commit(&self) -> io::Result<()> { unsafe { match ktmw32::CommitTransaction(self.handle) { 0 => Err(io::Error::last_os_error()), _ => Ok(()), } } } pub fn rollback(&self) -> io::Result<()> { unsafe { match ktmw32::RollbackTransaction(self.handle) { 0 => Err(io::Error::last_os_error()), _ => Ok(()), } } } fn close_(&mut self) -> io::Result<()> { unsafe { match handleapi::CloseHandle(self.handle) { 0 => Err(io::Error::last_os_error()), _ => Ok(()), } } } } impl Drop for Transaction { fn drop(&mut self) { self.close_().unwrap_or(()); } }<|fim▁end|>
); if handle == handleapi::INVALID_HANDLE_VALUE { return Err(io::Error::last_os_error());
<|file_name|>extra.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. /*! Rust extras. The `extra` crate is a set of useful modules for a variety of purposes, including collections, numerics, I/O, serialization, and concurrency. Rust extras are part of the standard Rust distribution. */ #[link(name = "extra", vers = "0.7", uuid = "122bed0b-c19b-4b82-b0b7-7ae8aead7297", url = "https://github.com/mozilla/rust/tree/master/src/libextra")]; #[comment = "Rust extras"]; #[license = "MIT/ASL2"]; #[crate_type = "lib"]; #[deny(non_camel_case_types)]; #[deny(missing_doc)]; use std::str::{StrSlice, OwnedStr}; pub use std::os; pub mod uv_ll; // General io and system-services modules #[path = "net/mod.rs"] pub mod net; // libuv modules pub mod uv; pub mod uv_iotask; pub mod uv_global_loop; // Utility modules pub mod c_vec; pub mod timer; pub mod io_util; pub mod rc; // Concurrency pub mod sync; pub mod arc; pub mod comm; pub mod future; pub mod task_pool; pub mod flatpipes; // Collections pub mod bitv; pub mod deque; pub mod fun_treemap; pub mod list; pub mod priority_queue; pub mod smallintmap; pub mod sort; pub mod dlist; pub mod treemap; // Crypto #[path="crypto/digest.rs"] pub mod digest; #[path="crypto/sha1.rs"] pub mod sha1; #[path="crypto/sha2.rs"] pub mod sha2; // And ... other stuff pub mod ebml; pub mod dbg; pub mod getopts; pub mod json; pub mod md4; pub mod tempfile; pub mod term; pub mod time; pub mod arena; pub mod par; pub mod base64; pub mod rl; pub mod workcache; #[path="num/bigint.rs"] pub mod bigint; #[path="num/rational.rs"] pub mod rational; #[path="num/complex.rs"] pub mod complex; pub mod stats; pub mod semver; pub mod fileinput; pub mod flate; #[cfg(unicode)] mod unicode; #[path="terminfo/terminfo.rs"] pub mod terminfo; // Compiler support modules pub mod test; pub mod serialize; // A curious inner-module that's not exported that contains the binding // 'extra' so that macro-expanded references to extra::serialize and such // can be resolved within libextra. #[doc(hidden)] pub mod extra { pub use serialize; pub use test; <|fim▁hole|> // For bootstrapping. pub use std::clone; pub use std::condition; pub use std::cmp; pub use std::sys; pub use std::unstable; pub use std::str; pub use std::os; }<|fim▁end|>
<|file_name|>test_froms.py<|end_file_name|><|fim▁begin|>from sqlalchemy.testing import eq_, assert_raises, assert_raises_message import operator from sqlalchemy import * from sqlalchemy import exc as sa_exc, util from sqlalchemy.sql import compiler, table, column from sqlalchemy.engine import default from sqlalchemy.orm import * from sqlalchemy.orm import attributes from sqlalchemy.testing import eq_ import sqlalchemy as sa from sqlalchemy import testing from sqlalchemy.testing import AssertsCompiledSQL, engines from sqlalchemy.testing.schema import Column from test.orm import _fixtures from sqlalchemy.testing import fixtures from sqlalchemy.orm.util import join, outerjoin, with_parent class QueryTest(_fixtures.FixtureTest): run_setup_mappers = 'once' run_inserts = 'once' run_deletes = None @classmethod def setup_mappers(cls): Node, composite_pk_table, users, Keyword, items, Dingaling, \ order_items, item_keywords, Item, User, dingalings, \ Address, keywords, CompositePk, nodes, Order, orders, \ addresses = cls.classes.Node, \ cls.tables.composite_pk_table, cls.tables.users, \ cls.classes.Keyword, cls.tables.items, \ cls.classes.Dingaling, cls.tables.order_items, \ cls.tables.item_keywords, cls.classes.Item, \ cls.classes.User, cls.tables.dingalings, \ cls.classes.Address, cls.tables.keywords, \ cls.classes.CompositePk, cls.tables.nodes, \ cls.classes.Order, cls.tables.orders, cls.tables.addresses mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'orders':relationship(Order, backref='user', order_by=orders.c.id), # o2m, m2o }) mapper(Address, addresses, properties={ 'dingaling':relationship(Dingaling, uselist=False, backref="address") #o2o }) mapper(Dingaling, dingalings) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m 'address':relationship(Address), # m2o }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords) #m2m }) mapper(Keyword, keywords) mapper(Node, nodes, properties={ 'children':relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]) ) }) mapper(CompositePk, composite_pk_table) configure_mappers() class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL): query_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" query_not_correlated = "SELECT users.name AS users_name, " \ "(SELECT count(addresses.id) AS count_1 FROM addresses, users " \ "WHERE addresses.user_id = users.id) AS anon_1 FROM users" def test_as_scalar_select_auto_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_explicit_correlate(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(users).as_scalar() query = select([users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_select_correlate_off(self): addresses, users = self.tables.addresses, self.tables.users query = select( [func.count(addresses.c.id)], addresses.c.user_id==users.c.id ).correlate(None).as_scalar() query = select([ users.c.name.label('users_name'), query]) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_auto_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_explicit_correlate(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(self.tables.users)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_correlated, dialect=default.DefaultDialect() ) def test_as_scalar_query_correlate_off(self): sess = create_session() Address, User = self.classes.Address, self.classes.User query = sess.query(func.count(Address.id))\ .filter(Address.user_id==User.id)\ .correlate(None)\ .as_scalar() query = sess.query(User.name, query) self.assert_compile(query, self.query_not_correlated, dialect=default.DefaultDialect() ) class RawSelectTest(QueryTest, AssertsCompiledSQL): """compare a bunch of select() tests with the equivalent Query using straight table/columns. Results should be the same as Query should act as a select() pass- thru for ClauseElement entities. """ __dialect__ = 'default' def test_select(self): addresses, users = self.tables.addresses, self.tables.users sess = create_session() self.assert_compile(sess.query(users).select_entity_from( users.select()).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name FROM users, " "(SELECT users.id AS id, users.name AS name FROM users) AS anon_1", ) self.assert_compile(sess.query(users, exists([1], from_obj=addresses) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, EXISTS " "(SELECT 1 FROM addresses) AS anon_1 FROM users", ) # a little tedious here, adding labels to work around Query's # auto-labelling. s = sess.query(addresses.c.id.label('id'), addresses.c.email_address.label('email')).\ filter(addresses.c.user_id == users.c.id).correlate(users).\ statement.alias() self.assert_compile(sess.query(users, s.c.email).select_entity_from( users.join(s, s.c.id == users.c.id) ).with_labels().statement, "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email AS anon_1_email " "FROM users JOIN (SELECT addresses.id AS id, " "addresses.email_address AS email FROM addresses, users " "WHERE addresses.user_id = users.id) AS anon_1 " "ON anon_1.id = users.id", ) x = func.lala(users.c.id).label('foo') self.assert_compile(sess.query(x).filter(x == 5).statement, "SELECT lala(users.id) AS foo FROM users WHERE " "lala(users.id) = :param_1") self.assert_compile(sess.query(func.sum(x).label('bar')).statement, "SELECT sum(lala(users.id)) AS bar FROM users") class FromSelfTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_filter(self): User = self.classes.User eq_( [User(id=8), User(id=9)], create_session(). query(User). filter(User.id.in_([8,9])). from_self().all() ) eq_( [User(id=8), User(id=9)], create_session().query(User). order_by(User.id).slice(1,3). from_self().all() ) eq_( [User(id=8)], list( create_session(). query(User). filter(User.id.in_([8,9])). from_self().order_by(User.id)[0:1] ) ) def test_join(self): User, Address = self.classes.User, self.classes.Address eq_( [ (User(id=8), Address(id=2)), (User(id=8), Address(id=3)), (User(id=8), Address(id=4)), (User(id=9), Address(id=5)) ], create_session(). query(User). filter(User.id.in_([8,9])). from_self(). join('addresses'). add_entity(Address). order_by(User.id, Address.id).all() ) def test_group_by(self): Address = self.classes.Address eq_( create_session().query(Address.user_id, func.count(Address.id).label('count')).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) eq_( create_session().query(Address.user_id, Address.id).\ from_self(Address.user_id, func.count(Address.id)).\ group_by(Address.user_id). order_by(Address.user_id).all(), [(7, 1), (8, 3), (9, 1)] ) def test_having(self): User = self.classes.User s = create_session() self.assert_compile( s.query(User.id).group_by(User.id).having(User.id>5). from_self(), "SELECT anon_1.users_id AS anon_1_users_id FROM " "(SELECT users.id AS users_id FROM users GROUP " "BY users.id HAVING users.id > :id_1) AS anon_1" ) def test_no_joinedload(self): """test that joinedloads are pushed outwards and not rendered in subqueries.""" User = self.classes.User s = create_session() self.assert_compile( s.query(User).options(joinedload(User.addresses)). from_self().statement, "SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, " "addresses_1.user_id, addresses_1.email_address FROM " "(SELECT users.id AS users_id, users.name AS " "users_name FROM users) AS anon_1 LEFT OUTER JOIN " "addresses AS addresses_1 ON anon_1.users_id = " "addresses_1.user_id ORDER BY addresses_1.id" ) def test_aliases(self): """test that aliased objects are accessible externally to a from_self() call.""" User, Address = self.classes.User, self.classes.Address s = create_session() ualias = aliased(User) eq_( s.query(User, ualias).filter(User.id > ualias.id). from_self(User.name, ualias.name). order_by(User.name, ualias.name).all(), [ ('chuck', 'ed'), ('chuck', 'fred'), ('chuck', 'jack'), ('ed', 'jack'), ('fred', 'ed'), ('fred', 'jack') ] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(User.name, ualias.name). filter(ualias.name=='ed')\ .order_by(User.name, ualias.name).all(), [('chuck', 'ed'), ('fred', 'ed')] ) eq_( s.query(User, ualias). filter(User.id > ualias.id). from_self(ualias.name, Address.email_address). join(ualias.addresses). order_by(ualias.name, Address.email_address).all(), [ ('ed', '[email protected]'), ('jack', '[email protected]'), ('jack', '[email protected]'), ('jack', '[email protected]'), ('jack', '[email protected]')] ) def test_multiple_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).from_self().all(), [ (User(id=8), Address(id=2)), (User(id=9), Address(id=5)) ] ) eq_( sess.query(User, Address).\ filter(User.id==Address.user_id).\ filter(Address.id.in_([2, 5])).\ from_self().\ options(joinedload('addresses')).first(), (User(id=8, addresses=[Address(), Address(), Address()]), Address(id=2)), ) def test_multiple_with_column_entities(self): User = self.classes.User sess = create_session() eq_( sess.query(User.id).from_self().\ add_column(func.count().label('foo')).\ group_by(User.id).\ order_by(User.id).\ from_self().all(), [ (7,1), (8, 1), (9, 1), (10, 1) ] ) class ColumnAccessTest(QueryTest, AssertsCompiledSQL): """test access of columns after _from_selectable has been applied""" __dialect__ = 'default' def test_from_self(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS " "anon_1_users_name FROM (SELECT users.id AS users_id, users.name " "AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = " ":name_1" ) def test_from_self_twice(self): User = self.classes.User sess = create_session() q = sess.query(User).from_self(User.id, User.name).from_self() self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, " "anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM " "(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name " "AS anon_2_users_name FROM (SELECT users.id AS users_id, " "users.name AS users_name FROM users) AS anon_2) AS anon_1 " "WHERE anon_1.anon_2_users_name = :name_1" ) def test_select_entity_from(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_entity_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE anon_1.name = :name_1" ) def test_select_entity_from_no_entities(self): User = self.classes.User sess = create_session() q = sess.query(User) assert_raises_message( sa.exc.ArgumentError, r"A selectable \(FromClause\) instance is " "expected when the base alias is being set", sess.query(User).select_entity_from, User ) def test_select_from_no_aliasing(self): User = self.classes.User sess = create_session() q = sess.query(User) q = sess.query(User).select_from(q.statement) self.assert_compile( q.filter(User.name=='ed'), "SELECT users.id AS users_id, users.name AS users_name " "FROM users, (SELECT users.id AS id, users.name AS name FROM " "users) AS anon_1 WHERE users.name = :name_1" ) def test_anonymous_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(c1), "SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS " "anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 " "AS anon_2_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE c1 = :c1_1) AS " "anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1" ) def test_anonymous_expression_union(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1, c2).filter(c1 == 'dog') q2 = sess.query(c1, c2).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 " "AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE " "c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 " "WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1" ) def test_table_anonymous_expression_from_self_twice(self): from sqlalchemy.sql import column, table sess = create_session() t1 = table('t1', column('c1'), column('c2')) q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == 'dog') q1 = q1.from_self().from_self() self.assert_compile( q1.order_by(t1.c.c1), "SELECT anon_1.anon_2_t1_c1 AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 " "AS anon_1_anon_2_t1_c2 FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, " "anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 " "AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 ORDER BY " "anon_1.anon_2_t1_c1" ) def test_anonymous_labeled_expression(self): from sqlalchemy.sql import column sess = create_session() c1, c2 = column('c1'), column('c2') q1 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'dog') q2 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'cat') q3 = q1.union(q2) self.assert_compile( q3.order_by(c1), "SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM " "(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT " "c1 AS foo, c2 AS bar WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo" ) def test_anonymous_expression_plus_aliased_join(self): """test that the 'dont alias non-ORM' rule remains for other kinds of aliasing when _from_selectable() is used.""" User = self.classes.User Address = self.classes.Address addresses = self.tables.addresses sess = create_session() q1 = sess.query(User.id).filter(User.id > 5) q1 = q1.from_self() q1 = q1.join(User.addresses, aliased=True).\ order_by(User.id, Address.id, addresses.c.id) self.assert_compile( q1, "SELECT anon_1.users_id AS anon_1_users_id " "FROM (SELECT users.id AS users_id FROM users " "WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 " "ON anon_1.users_id = addresses_1.user_id " "ORDER BY anon_1.users_id, addresses_1.id, addresses.id" ) class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL): run_setup_mappers = 'once' @classmethod def define_tables(cls, metadata): Table('a', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)), Column('bid', Integer, ForeignKey('b.id')) ) Table('b', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(50)), Column('type', String(20)) ) Table('c', metadata, Column('id', Integer, ForeignKey('b.id'), primary_key=True), Column('age', Integer) ) Table('d', metadata, Column('id', Integer, ForeignKey('a.id'), primary_key=True), Column('dede', Integer) ) @classmethod def setup_classes(cls): a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d) class A(cls.Comparable): pass class B(cls.Comparable): pass class C(B): pass class D(A): pass mapper(A, a, polymorphic_identity='a', polymorphic_on=a.c.type, with_polymorphic= ('*', None), properties={ 'link':relation( B, uselist=False, backref='back') }) mapper(B, b, polymorphic_identity='b', polymorphic_on=b.c.type, with_polymorphic= ('*', None) ) mapper(C, c, inherits=B, polymorphic_identity='c') mapper(D, d, inherits=A, polymorphic_identity='d') @classmethod def insert_data(cls): A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B) sess = create_session() sess.add_all([ B(name='b1'), A(name='a1', link= C(name='c1',age=3)), C(name='c2',age=6), A(name='a2') ]) sess.flush() def test_add_entity_equivalence(self): A, C, B = (self.classes.A, self.classes.C, self.classes.B) sess = create_session() for q in [ sess.query( A,B).join( A.link), sess.query( A).join( A.link).add_entity(B), ]: eq_( q.all(), [( A(bid=2, id=1, name='a1', type='a'), C(age=3, id=2, name='c1', type='c') )] ) for q in [ sess.query( B,A).join( B.back), sess.query( B).join( B.back).add_entity(A), sess.query( B).add_entity(A).join( B.back) ]: eq_( q.all(), [( C(age=3, id=2, name='c1', type='c'), A(bid=2, id=1, name='a1', type='a') )] ) class InstancesTest(QueryTest, AssertsCompiledSQL): def test_from_alias(self): User, addresses, users = (self.classes.User, self.tables.addresses, self.tables.users) query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(addresses).\ select(use_labels=True, order_by=['ulist.id', addresses.c.id]) sess =create_session() q = sess.query(User) def go(): l = list(q.options(contains_alias('ulist'), contains_eager('addresses')).\ instances(query.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options(contains_alias('ulist'), contains_eager('addresses')).\ from_statement(query).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # better way. use select_entity_from() def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses')).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) # same thing, but alias addresses, so that the adapter # generated by select_entity_from() is wrapped within # the adapter created by contains_eager() adalias = addresses.alias() query = users.select(users.c.id==7).\ union(users.select(users.c.id>7)).\ alias('ulist').\ outerjoin(adalias).\ select(use_labels=True, order_by=['ulist.id', adalias.c.id]) def go(): l = sess.query(User).select_entity_from(query).\ options(contains_eager('addresses', alias=adalias)).all() assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager(self): users, addresses, User = (self.tables.users, self.tables.addresses, self.classes.User) sess = create_session() # test that contains_eager suppresses the normal outer join rendering q = sess.query(User).outerjoin(User.addresses).\ options(contains_eager(User.addresses)).\ order_by(User.id, addresses.c.id) self.assert_compile(q.with_labels().statement, 'SELECT addresses.id AS addresses_id, ' 'addresses.user_id AS addresses_user_id, ' 'addresses.email_address AS ' 'addresses_email_address, users.id AS ' 'users_id, users.name AS users_name FROM ' 'users LEFT OUTER JOIN addresses ON ' 'users.id = addresses.user_id ORDER BY ' 'users.id, addresses.id', dialect=default.DefaultDialect()) def go(): assert self.static.user_address_result == q.all() self.assert_sql_count(testing.db, go, 1) sess.expunge_all() adalias = addresses.alias() q = sess.query(User).\ select_entity_from(users.outerjoin(adalias)).\ options(contains_eager(User.addresses, alias=adalias)).\ order_by(User.id, adalias.c.id) def go(): eq_(self.static.user_address_result, q.order_by(User.id).all()) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() selectquery = users.\ outerjoin(addresses).\ select(users.c.id<10, use_labels=True, order_by=[users.c.id, addresses.c.id]) q = sess.query(User) def go(): l = list(q.options( contains_eager('addresses') ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = list(q.options( contains_eager(User.addresses) ).instances(selectquery.execute())) assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): l = q.options( contains_eager('addresses') ).from_statement(selectquery).all() assert self.static.user_address_result[0:3] == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_string_alias(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # string alias name def go(): l = list(q.options( contains_eager('addresses', alias="adalias") ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased_instances(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() q = sess.query(User) adalias = addresses.alias('adalias') selectquery = users.outerjoin(adalias).\ select(use_labels=True, order_by=[users.c.id, adalias.c.id]) # expression.Alias object def go(): l = list(q.options( contains_eager('addresses', alias=adalias) ).instances(selectquery.execute())) assert self.static.user_address_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_aliased(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User) # Aliased object adalias = aliased(Address) def go(): l = q.options( contains_eager('addresses', alias=adalias) ).\ outerjoin(adalias, User.addresses).\ order_by(User.id, adalias.id) assert self.static.user_address_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_string_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using string alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias='o1'), contains_eager('orders.items', alias='i1') ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_alias(self): orders, items, users, order_items, User = (self.tables.orders, self.tables.items, self.tables.users, self.tables.order_items, self.classes.User) sess = create_session() q = sess.query(User) oalias = orders.alias('o1') ialias = items.alias('i1') query = users.outerjoin(oalias).\ outerjoin(order_items).\ outerjoin(ialias).\ select(use_labels=True).\ order_by(users.c.id, oalias.c.id, ialias.c.id) # test using Alias with more than one level deep def go(): l = list(q.options( contains_eager('orders', alias=oalias), contains_eager('orders.items', alias=ialias) ).instances(query.execute())) assert self.static.user_order_result == l self.assert_sql_count(testing.db, go, 1) def test_contains_eager_multi_aliased(self): Item, User, Order = (self.classes.Item, self.classes.User, self.classes.Order) sess = create_session() q = sess.query(User) # test using Aliased with more than one level deep oalias = aliased(Order) ialias = aliased(Item) def go(): l = q.options( contains_eager(User.orders, alias=oalias), contains_eager(User.orders, Order.items, alias=ialias) ).\ outerjoin(oalias, User.orders).\ outerjoin(ialias, oalias.items).\ order_by(User.id, oalias.id, ialias.id) assert self.static.user_order_result == l.all() self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining(self): """test that contains_eager() 'chains' by default.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User).\ join(User.addresses).\ join(Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='[email protected]', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='[email protected]', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_contains_eager_chaining_aliased_endpoint(self): """test that contains_eager() 'chains' by default and supports an alias at the end.""" Dingaling, User, Address = (self.classes.Dingaling, self.classes.User, self.classes.Address) sess = create_session() da = aliased(Dingaling, name="foob") q = sess.query(User).\ join(User.addresses).\ join(da, Address.dingaling).\ options( contains_eager(User.addresses, Address.dingaling, alias=da), ) def go(): eq_( q.all(), # note we only load the Address records that # have a Dingaling here due to using the inner # join for the eager load [ User(name='ed', addresses=[ Address(email_address='[email protected]', dingaling=Dingaling(data='ding 1/2')), ]), User(name='fred', addresses=[ Address(email_address='[email protected]', dingaling=Dingaling(data='ding 2/5')) ]) ] ) self.assert_sql_count(testing.db, go, 1) def test_mixed_eager_contains_with_limit(self): Order, User, Address = (self.classes.Order, self.classes.User, self.classes.Address) sess = create_session() q = sess.query(User) def go(): # outerjoin to User.orders, offset 1/limit 2 so we get user # 7 + second two orders. then joinedload the addresses. # User + Order columns go into the subquery, address left # outer joins to the subquery, joinedloader for User.orders # applies context.adapter to result rows. This was # [ticket:1180]. l = \ q.outerjoin(User.orders).options(joinedload(User.addresses), contains_eager(User.orders)).order_by(User.id, Order.id).offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address='[email protected]', user_id=7, id=1)], name='jack', orders=[Order(address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description='order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): # same as above, except Order is aliased, so two adapters # are applied by the eager loader oalias = aliased(Order) l = q.outerjoin(oalias, User.orders).\ options(joinedload(User.addresses), contains_eager(User.orders, alias=oalias)).\ order_by(User.id, oalias.id).\ offset(1).limit(2).all() eq_(l, [User(id=7, addresses=[Address(email_address='[email protected]', user_id=7, id=1)], name='jack', orders=[Order(address_id=1, user_id=7, description='order 3', isopen=1, id=3), Order(address_id=None, user_id=7, description='order 5' , isopen=0, id=5)])]) self.assert_sql_count(testing.db, go, 1) class MixedEntitiesTest(QueryTest, AssertsCompiledSQL): __dialect__ = 'default' def test_values(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) q2 = q.select_entity_from(sel).values(User.name) eq_(list(q2), [('jack',), ('ed',)]) q = sess.query(User) q2 = q.order_by(User.id).\ values(User.name, User.name + " " + cast(User.id, String(50))) eq_( list(q2), [('jack', 'jack 7'), ('ed', 'ed 8'), ('fred', 'fred 9'), ('chuck', 'chuck 10')] ) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(User.id, Address.id).\ values(User.name, Address.email_address) eq_(list(q2), [('ed', '[email protected]'), ('ed', '[email protected]'), ('ed', '[email protected]'), ('fred', '[email protected]')]) q2 = q.join('addresses').\ filter(User.name.like('%e%')).\ order_by(desc(Address.email_address)).\ slice(1, 3).values(User.name, Address.email_address) eq_(list(q2), [('ed', '[email protected]'), ('ed', '[email protected]')]) adalias = aliased(Address) q2 = q.join(adalias, 'addresses').\ filter(User.name.like('%e%')).order_by(adalias.email_address).\ values(User.name, adalias.email_address) eq_(list(q2), [('ed', '[email protected]'), ('ed', '[email protected]'), ('ed', '[email protected]'), ('fred', '[email protected]')]) q2 = q.values(func.count(User.name)) assert next(q2) == (4,) q2 = q.select_entity_from(sel).filter(User.id==8).values(User.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'ed', 'ed')]) # using User.xxx is alised against "sel", so this query returns nothing q2 = q.select_entity_from(sel).\ filter(User.id==8).\ filter(User.id>sel.c.id).values(User.name, sel.c.name, User.name) eq_(list(q2), []) # whereas this uses users.c.xxx, is not aliased and creates a new join q2 = q.select_entity_from(sel).\ filter(users.c.id==8).\ filter(users.c.id>sel.c.id).values(users.c.name, sel.c.name, User.name) eq_(list(q2), [('ed', 'jack', 'jack')]) def test_alias_naming(self): User = self.classes.User sess = create_session() ua = aliased(User, name="foobar") q= sess.query(ua) self.assert_compile( q, "SELECT foobar.id AS foobar_id, " "foobar.name AS foobar_name FROM users AS foobar" ) @testing.fails_on('mssql', 'FIXME: unknown') def test_values_specific_order_by(self): users, User = self.tables.users, self.classes.User sess = create_session() assert list(sess.query(User).values()) == list() sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User) u2 = aliased(User) q2 = q.select_entity_from(sel).\ filter(u2.id>1).\ order_by(User.id, sel.c.id, u2.id).\ values(User.name, sel.c.name, u2.name) eq_(list(q2), [('jack', 'jack', 'jack'), ('jack', 'jack', 'ed'), ('jack', 'jack', 'fred'), ('jack', 'jack', 'chuck'), ('ed', 'ed', 'jack'), ('ed', 'ed', 'ed'), ('ed', 'ed', 'fred'), ('ed', 'ed', 'chuck')]) @testing.fails_on('mssql', 'FIXME: unknown') @testing.fails_on('oracle', "Oracle doesn't support boolean expressions as " "columns") @testing.fails_on('postgresql+pg8000', "pg8000 parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on('postgresql+zxjdbc', "zxjdbc parses the SQL itself before passing on " "to PG, doesn't parse this") @testing.fails_on("firebird", "unknown") def test_values_with_boolean_selects(self): """Tests a values clause that works with select boolean evaluations""" User = self.classes.User sess = create_session() q = sess.query(User) q2 = q.group_by(User.name.like('%j%')).\ order_by(desc(User.name.like('%j%'))).\ values(User.name.like('%j%'), func.count(User.name.like('%j%'))) eq_(list(q2), [(True, 1), (False, 3)]) q2 = q.order_by(desc(User.name.like('%j%'))).values(User.name.like('%j%')) eq_(list(q2), [(True,), (False,), (False,), (False,)]) def test_correlated_subquery(self): """test that a subquery constructed from ORM attributes doesn't leak out those entities to the outermost query. """ Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() subq = select([func.count()]).\ where(User.id==Address.user_id).\ correlate(users).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name='jack'), 1), (User(id=8,name='ed'), 3), (User(id=9,name='fred'), 1)] ) # same thing without the correlate, as it should # not be needed subq = select([func.count()]).\ where(User.id==Address.user_id).\ label('count') # we don't want Address to be outside of the subquery here eq_( list(sess.query(User, subq)[0:3]), [(User(id=7,name='jack'), 1), (User(id=8,name='ed'), 3), (User(id=9,name='fred'), 1)] ) def test_column_queries(self): Address, users, User = (self.classes.Address, self.tables.users, self.classes.User) sess = create_session() eq_(sess.query(User.name).all(), [('jack',), ('ed',), ('fred',), ('chuck',)]) sel = users.select(User.id.in_([7, 8])).alias() q = sess.query(User.name) q2 = q.select_entity_from(sel).all() eq_(list(q2), [('jack',), ('ed',)]) eq_(sess.query(User.name, Address.email_address).filter(User.id==Address.user_id).all(), [ ('jack', '[email protected]'), ('ed', '[email protected]'), ('ed', '[email protected]'), ('ed', '[email protected]'), ('fred', '[email protected]') ]) eq_(sess.query(User.name, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User.id, User.name).\ order_by(User.id).all(), [('jack', 1), ('ed', 3), ('fred', 1), ('chuck', 0)] ) eq_(sess.query(User, func.count(Address.email_address)).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(Address.email_address), User).\ outerjoin(User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name='jack',id=7)), (3, User(name='ed',id=8)), (1, User(name='fred',id=9)), (0, User(name='chuck',id=10))] ) adalias = aliased(Address) eq_(sess.query(User, func.count(adalias.email_address)).\ outerjoin(adalias, 'addresses').group_by(User).\ order_by(User.id).all(), [(User(name='jack',id=7), 1), (User(name='ed',id=8), 3), (User(name='fred',id=9), 1), (User(name='chuck',id=10), 0)] ) eq_(sess.query(func.count(adalias.email_address), User).\ outerjoin(adalias, User.addresses).group_by(User).\ order_by(User.id).all(), [(1, User(name='jack',id=7)), (3, User(name='ed',id=8)), (1, User(name='fred',id=9)), (0, User(name='chuck',id=10))] ) # select from aliasing + explicit aliasing eq_( sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ order_by(User.id, adalias.id).all(), [ (User(name='jack',id=7), '[email protected]'), (User(name='ed',id=8), '[email protected]'), (User(name='ed',id=8), '[email protected]'), (User(name='ed',id=8), '[email protected]'), (User(name='fred',id=9), '[email protected]'), (User(name='chuck',id=10), None) ] ) # anon + select from aliasing eq_( sess.query(User).join(User.addresses, aliased=True).\ filter(Address.email_address.like('%ed%')).\ from_self().all(), [ User(name='ed',id=8), User(name='fred',id=9), ] ) # test eager aliasing, with/without select_entity_from aliasing for q in [ sess.query(User, adalias.email_address).\ outerjoin(adalias, User.addresses).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), sess.query(User, adalias.email_address, adalias.id).\ outerjoin(adalias, User.addresses).\ from_self(User, adalias.email_address).\ options(joinedload(User.addresses)).\ order_by(User.id, adalias.id).limit(10), ]: eq_( q.all(), [(User(addresses=[ Address(user_id=7,email_address='[email protected]',id=1)], name='jack',id=7), '[email protected]'), (User(addresses=[ Address(user_id=8,email_address='[email protected]',id=2), Address(user_id=8,email_address='[email protected]',id=3), Address(user_id=8,email_address='[email protected]',id=4)], name='ed',id=8), '[email protected]'), (User(addresses=[ Address(user_id=8,email_address='[email protected]',id=2), Address(user_id=8,email_address='[email protected]',id=3), Address(user_id=8,email_address='[email protected]',id=4)],name='ed',id=8), '[email protected]'), (User(addresses=[ Address(user_id=8,email_address='[email protected]',id=2), Address(user_id=8,email_address='[email protected]',id=3), Address(user_id=8,email_address='[email protected]',id=4)],name='ed',id=8), '[email protected]'), (User(addresses=[Address(user_id=9,email_address='[email protected]',id=5)],name='fred',id=9), '[email protected]'), (User(addresses=[],name='chuck',id=10), None)] ) def test_column_from_limited_joinedload(self): User = self.classes.User sess = create_session() def go(): results = sess.query(User).limit(1).\ options(joinedload('addresses')).\ add_column(User.name).all() eq_(results, [(User(name='jack'), 'jack')]) self.assert_sql_count(testing.db, go, 1) @testing.fails_on("firebird", "unknown") @testing.fails_on('postgresql+pg8000', "'type oid 705 not mapped to py type' (due to literal)") def test_self_referential(self): Order = self.classes.Order sess = create_session() oalias = aliased(Order) for q in [ sess.query(Order, oalias).\ filter(Order.user_id==oalias.user_id).filter(Order.user_id==7).\ filter(Order.id>oalias.id).order_by(Order.id, oalias.id), sess.query(Order, oalias).from_self().filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).\ order_by(Order.id, oalias.id), # same thing, but reversed. sess.query(oalias, Order).from_self().filter(oalias.user_id==Order.user_id).\ filter(oalias.user_id==7).filter(Order.id<oalias.id).\ order_by(oalias.id, Order.id), # here we go....two layers of aliasing sess.query(Order, oalias).filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).\ from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), # gratuitous four layers sess.query(Order, oalias).filter(Order.user_id==oalias.user_id).\ filter(Order.user_id==7).filter(Order.id>oalias.id).from_self().\ from_self().from_self().order_by(Order.id, oalias.id).\ limit(10).options(joinedload(Order.items)), ]: eq_( q.all(), [ (Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description='order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description='order 1',isopen=0,user_id=7,id=1)), (Order(address_id=None,description='order 5',isopen=0,user_id=7,id=5), Order(address_id=1,description='order 3',isopen=1,user_id=7,id=3)) ] ) # ensure column expressions are taken from inside the subquery, not restated at the top q = sess.query(Order.id, Order.description, literal_column("'q'").label('foo')).\ filter(Order.description == 'order 3').from_self() self.assert_compile(q, "SELECT anon_1.orders_id AS " "anon_1_orders_id, anon_1.orders_descriptio" "n AS anon_1_orders_description, " "anon_1.foo AS anon_1_foo FROM (SELECT " "orders.id AS orders_id, " "orders.description AS orders_description, " "'q' AS foo FROM orders WHERE " "orders.description = :description_1) AS " "anon_1") eq_( q.all(), [(3, 'order 3', 'q')] ) def test_multi_mappers(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) test_session = create_session()<|fim▁hole|> (user7, user8, user9, user10) = test_session.query(User).all() (address1, address2, address3, address4, address5) = \ test_session.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] sess = create_session() selectquery = users.outerjoin(addresses).select(use_labels=True, order_by=[users.c.id, addresses.c.id]) eq_(list(sess.query(User, Address).instances(selectquery.execute())), expected) sess.expunge_all() for address_entity in (Address, aliased(Address)): q = sess.query(User).add_entity(address_entity).\ outerjoin(address_entity, 'addresses').\ order_by(User.id, address_entity.id) eq_(q.all(), expected) sess.expunge_all() q = sess.query(User).add_entity(address_entity) q = q.join(address_entity, 'addresses') q = q.filter_by(email_address='[email protected]') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ filter_by(email_address='[email protected]') eq_(q.all(), [(user8, address3)]) sess.expunge_all() q = sess.query(User, address_entity).join(address_entity, 'addresses').\ options(joinedload('addresses')).\ filter_by(email_address='[email protected]') eq_(list(util.OrderedSet(q.all())), [(user8, address3)]) sess.expunge_all() def test_aliased_multi_mappers(self): User, addresses, users, Address = (self.classes.User, self.tables.addresses, self.tables.users, self.classes.Address) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() (address1, address2, address3, address4, address5) = sess.query(Address).all() expected = [(user7, address1), (user8, address2), (user8, address3), (user8, address4), (user9, address5), (user10, None)] q = sess.query(User) adalias = addresses.alias('adalias') q = q.add_entity(Address, alias=adalias).select_entity_from(users.outerjoin(adalias)) l = q.order_by(User.id, adalias.c.id).all() assert l == expected sess.expunge_all() q = sess.query(User).add_entity(Address, alias=adalias) l = q.select_entity_from(users.outerjoin(adalias)).filter(adalias.c.email_address=='[email protected]').all() assert l == [(user8, address3)] def test_with_entities(self): User, Address = self.classes.User, self.classes.Address sess = create_session() q = sess.query(User).filter(User.id==7).order_by(User.name) self.assert_compile( q.with_entities(User.id,Address).\ filter(Address.user_id == User.id), 'SELECT users.id AS users_id, addresses.id ' 'AS addresses_id, addresses.user_id AS ' 'addresses_user_id, addresses.email_address' ' AS addresses_email_address FROM users, ' 'addresses WHERE users.id = :id_1 AND ' 'addresses.user_id = users.id ORDER BY ' 'users.name') def test_multi_columns(self): users, User = self.tables.users, self.classes.User sess = create_session() expected = [(u, u.name) for u in sess.query(User).all()] for add_col in (User.name, users.c.name): assert sess.query(User).add_column(add_col).all() == expected sess.expunge_all() assert_raises(sa_exc.InvalidRequestError, sess.query(User).add_column, object()) def test_add_multi_columns(self): """test that add_column accepts a FROM clause.""" users, User = self.tables.users, self.classes.User sess = create_session() eq_( sess.query(User.id).add_column(users).all(), [(7, 7, 'jack'), (8, 8, 'ed'), (9, 9, 'fred'), (10, 10, 'chuck')] ) def test_multi_columns_2(self): """test aliased/nonalised joins with the usage of add_column()""" User, Address, addresses, users = (self.classes.User, self.classes.Address, self.tables.addresses, self.tables.users) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [(user7, 1), (user8, 3), (user9, 1), (user10, 0) ] q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin('addresses').\ add_column(func.count(Address.id).label('count')) eq_(q.all(), expected) sess.expunge_all() adalias = aliased(Address) q = sess.query(User) q = q.group_by(users).order_by(User.id).outerjoin(adalias, 'addresses').\ add_column(func.count(adalias.id).label('count')) eq_(q.all(), expected) sess.expunge_all() # TODO: figure out why group_by(users) doesn't work here s = select([users, func.count(addresses.c.id).label('count')]).\ select_from(users.outerjoin(addresses)).\ group_by(*[c for c in users.c]).order_by(User.id) q = sess.query(User) l = q.add_column("count").from_statement(s).all() assert l == expected def test_raw_columns(self): addresses, users, User = (self.tables.addresses, self.tables.users, self.classes.User) sess = create_session() (user7, user8, user9, user10) = sess.query(User).all() expected = [ (user7, 1, "Name:jack"), (user8, 3, "Name:ed"), (user9, 1, "Name:fred"), (user10, 0, "Name:chuck")] adalias = addresses.alias() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected # test with a straight statement s = select([users, func.count(addresses.c.id).label('count'), ("Name:" + users.c.name).label('concat')], from_obj=[users.outerjoin(addresses)], group_by=[c for c in users.c], order_by=[users.c.id]) q = create_session().query(User) l = q.add_column("count").add_column("concat").from_statement(s).all() assert l == expected sess.expunge_all() # test with select_entity_from() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).select_entity_from(users.outerjoin(addresses))\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(addresses.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin('addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() q = create_session().query(User).add_column(func.count(adalias.c.id))\ .add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\ .group_by(users).order_by(users.c.id) assert q.all() == expected sess.expunge_all() def test_expression_selectable_matches_mzero(self): User, Address = self.classes.User, self.classes.Address ua = aliased(User) aa = aliased(Address) s = create_session() for crit, j, exp in [ (User.id + Address.id, User.addresses, "SELECT users.id + addresses.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + Address.id, Address.user, "SELECT users.id + addresses.id AS anon_1 " "FROM addresses JOIN users ON users.id = " "addresses.user_id" ), (Address.id + User.id, User.addresses, "SELECT addresses.id + users.id AS anon_1 " "FROM users JOIN addresses ON users.id = " "addresses.user_id" ), (User.id + aa.id, (aa, User.addresses), "SELECT users.id + addresses_1.id AS anon_1 " "FROM users JOIN addresses AS addresses_1 " "ON users.id = addresses_1.user_id" ), ]: q = s.query(crit) mzero = q._mapper_zero() assert mzero.mapped_table is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) for crit, j, exp in [ (ua.id + Address.id, ua.addresses, "SELECT users_1.id + addresses.id AS anon_1 " "FROM users AS users_1 JOIN addresses " "ON users_1.id = addresses.user_id"), (ua.id + aa.id, (aa, ua.addresses), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM users AS users_1 JOIN addresses AS " "addresses_1 ON users_1.id = addresses_1.user_id"), (ua.id + aa.id, (ua, aa.user), "SELECT users_1.id + addresses_1.id AS anon_1 " "FROM addresses AS addresses_1 JOIN " "users AS users_1 " "ON users_1.id = addresses_1.user_id") ]: q = s.query(crit) mzero = q._mapper_zero() assert inspect(mzero).selectable is q._entity_zero().selectable q = q.join(j) self.assert_compile(q, exp) def test_aliased_adapt_on_names(self): User, Address = self.classes.User, self.classes.Address sess = Session() agg_address = sess.query(Address.id, func.sum(func.length(Address.email_address)).label('email_address') ).group_by(Address.user_id) ag1 = aliased(Address, agg_address.subquery()) ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True) # first, without adapt on names, 'email_address' isn't matched up - we get the raw "address" # element in the SELECT self.assert_compile( sess.query(User, ag1.email_address).join(ag1, User.addresses).filter(ag1.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, addresses.email_address " "AS addresses_email_address FROM addresses, users JOIN " "(SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE addresses.email_address > :email_address_1" ) # second, 'email_address' matches up to the aggreagte, and we get a smooth JOIN # from users->subquery and that's it self.assert_compile( sess.query(User, ag2.email_address).join(ag2, User.addresses).filter(ag2.email_address > 5), "SELECT users.id AS users_id, users.name AS users_name, " "anon_1.email_address AS anon_1_email_address FROM users " "JOIN (SELECT addresses.id AS id, sum(length(addresses.email_address)) " "AS email_address FROM addresses GROUP BY addresses.user_id) AS " "anon_1 ON users.id = addresses.user_id WHERE anon_1.email_address > :email_address_1", ) class SelectFromTest(QueryTest, AssertsCompiledSQL): run_setup_mappers = None __dialect__ = 'default' def test_replace_with_select(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])).alias() sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [User(id=7), User(id=8)]) eq_(sess.query(User).select_entity_from(sel).filter(User.id==8).all(), [User(id=8)]) eq_(sess.query(User).select_entity_from(sel).order_by(desc(User.name)).all(), [ User(name='jack',id=7), User(name='ed',id=8) ]) eq_(sess.query(User).select_entity_from(sel).order_by(asc(User.name)).all(), [ User(name='ed',id=8), User(name='jack',id=7) ]) eq_(sess.query(User).select_entity_from(sel).options(joinedload('addresses')).first(), User(name='jack', addresses=[Address(id=1)]) ) def test_join_mapper_order_by(self): """test that mapper-level order_by is adapted to a selectable.""" User, users = self.classes.User, self.tables.users mapper(User, users, order_by=users.c.id) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_differentiate_self_external(self): """test some different combinations of joining a table to a subquery of itself.""" users, User = self.tables.users, self.classes.User mapper(User, users) sess = create_session() sel = sess.query(User).filter(User.id.in_([7, 8])).subquery() ualias = aliased(User) self.assert_compile( sess.query(User).join(sel, User.id>sel.c.id), "SELECT users.id AS users_id, users.name AS users_name FROM " "users JOIN (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 ON users.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_entity_from(sel).filter(ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "users AS users_1, (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 WHERE users_1.id > anon_1.id", ) self.assert_compile( sess.query(ualias).select_entity_from(sel).join(ualias, ualias.id>sel.c.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name " "FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON users_1.id > anon_1.id" ) self.assert_compile( sess.query(ualias).select_entity_from(sel).join(ualias, ualias.id>User.id), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM (SELECT users.id AS id, users.name AS name FROM " "users WHERE users.id IN (:id_1, :id_2)) AS anon_1 " "JOIN users AS users_1 ON anon_1.id < users_1.id" ) salias = aliased(User, sel) self.assert_compile( sess.query(salias).join(ualias, ualias.id>salias.id), "SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", ) # this one uses an explicit join(left, right, onclause) so works self.assert_compile( sess.query(ualias).select_entity_from(join(sel, ualias, ualias.id>sel.c.id)), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name FROM " "(SELECT users.id AS id, users.name AS name FROM users WHERE users.id " "IN (:id_1, :id_2)) AS anon_1 JOIN users AS users_1 ON users_1.id > anon_1.id", use_default_dialect=True ) def test_aliased_class_vs_nonaliased(self): User, users = self.classes.User, self.tables.users mapper(User, users) ua = aliased(User) sess = create_session() self.assert_compile( sess.query(User).select_from(ua).join(User, ua.name > User.name), "SELECT users.id AS users_id, users.name AS users_name " "FROM users AS users_1 JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(User.name).select_from(ua).join(User, ua.name > User.name), "SELECT users.name AS users_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua.name).select_from(ua).join(User, ua.name > User.name), "SELECT users_1.name AS users_1_name FROM users AS users_1 " "JOIN users ON users.name < users_1.name" ) self.assert_compile( sess.query(ua).select_from(User).join(ua, ua.name > User.name), "SELECT users_1.id AS users_1_id, users_1.name AS users_1_name " "FROM users JOIN users AS users_1 ON users.name < users_1.name" ) # this is tested in many other places here, just adding it # here for comparison self.assert_compile( sess.query(User.name).\ select_entity_from(users.select().where(users.c.id > 5)), "SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, " "users.name AS name FROM users WHERE users.id > :id_1) AS anon_1" ) def test_join_no_order_by(self): User, users = self.classes.User, self.tables.users mapper(User, users) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).all(), [ User(name='jack',id=7), User(name='ed',id=8) ] ) def test_join(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() eq_(sess.query(User).select_entity_from(sel).join('addresses'). add_entity(Address).order_by(User.id).order_by(Address.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='[email protected]',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=4)) ] ) adalias = aliased(Address) eq_(sess.query(User).select_entity_from(sel).join(adalias, 'addresses'). add_entity(adalias).order_by(User.id).order_by(adalias.id).all(), [ (User(name='jack',id=7), Address(user_id=7,email_address='[email protected]',id=1)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=2)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=3)), (User(name='ed',id=8), Address(user_id=8,email_address='[email protected]',id=4)) ] ) def test_more_joins(self): users, Keyword, orders, items, order_items, Order, Item, \ User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) eq_(sess.query(User).select_entity_from(sel).\ join('orders', 'items', 'keywords').\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',id=7) ]) eq_(sess.query(User).select_entity_from(sel).\ join('orders', 'items', 'keywords', aliased=True).\ filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',id=7) ]) def test_very_nested_joins_with_joinedload(self): users, Keyword, orders, items, order_items, Order, Item, \ User, keywords, item_keywords = (self.tables.users, self.classes.Keyword, self.tables.orders, self.tables.items, self.tables.order_items, self.classes.Order, self.classes.Item, self.classes.User, self.tables.keywords, self.tables.item_keywords) mapper(User, users, properties={ 'orders':relationship(Order, backref='user'), # o2m, m2o }) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, order_by=items.c.id), #m2m }) mapper(Item, items, properties={ 'keywords':relationship(Keyword, secondary=item_keywords, order_by=keywords.c.id) #m2m }) mapper(Keyword, keywords) sess = create_session() sel = users.select(users.c.id.in_([7, 8])) def go(): eq_( sess.query(User).select_entity_from(sel). options(joinedload_all('orders.items.keywords')). join('orders', 'items', 'keywords', aliased=True). filter(Keyword.name.in_(['red', 'big', 'round'])).\ all(), [ User(name='jack',orders=[ Order(description='order 1',items=[ Item(description='item 1', keywords=[ Keyword(name='red'), Keyword(name='big'), Keyword(name='round') ]), Item(description='item 2', keywords=[ Keyword(name='red',id=2), Keyword(name='small',id=5), Keyword(name='square') ]), Item(description='item 3', keywords=[ Keyword(name='green',id=3), Keyword(name='big',id=4), Keyword(name='round',id=6)]) ]), Order(description='order 3',items=[ Item(description='item 3', keywords=[ Keyword(name='green',id=3), Keyword(name='big',id=4), Keyword(name='round',id=6) ]), Item(description='item 4',keywords=[],id=4), Item(description='item 5',keywords=[],id=5) ]), Order(description='order 5', items=[ Item(description='item 5',keywords=[])]) ]) ]) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() sel2 = orders.select(orders.c.id.in_([1,2,3])) eq_(sess.query(Order).select_entity_from(sel2).\ join('items', 'keywords').\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description='order 1',id=1), Order(description='order 2',id=2), ]) eq_(sess.query(Order).select_entity_from(sel2).\ join('items', 'keywords', aliased=True).\ filter(Keyword.name == 'red').\ order_by(Order.id).all(), [ Order(description='order 1',id=1), Order(description='order 2',id=2), ]) def test_replace_with_eager(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(Address, order_by=addresses.c.id) }) mapper(Address, addresses) sel = users.select(users.c.id.in_([7, 8])) sess = create_session() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).order_by(User.id).all(), [ User(id=7, addresses=[Address(id=1)]), User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)]) ] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).filter(User.id==8).order_by(User.id).all(), [User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])] ) self.assert_sql_count(testing.db, go, 1) sess.expunge_all() def go(): eq_(sess.query(User).options( joinedload('addresses') ).select_entity_from(sel).order_by(User.id)[1], User(id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)])) self.assert_sql_count(testing.db, go, 1) class CustomJoinTest(QueryTest): run_setup_mappers = None def test_double_same_mappers(self): """test aliasing of joins with a custom join condition""" addresses, items, order_items, orders, Item, User, Address, Order, users = (self.tables.addresses, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.users) mapper(Address, addresses) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select', order_by=items.c.id), }) mapper(Item, items) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select'), open_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 1, users.c.id==orders.c.user_id), lazy='select'), closed_orders = relationship(Order, primaryjoin = and_(orders.c.isopen == 0, users.c.id==orders.c.user_id), lazy='select') )) q = create_session().query(User) eq_( q.join('open_orders', 'items', aliased=True).filter(Item.id==4).\ join('closed_orders', 'items', aliased=True).filter(Item.id==3).all(), [User(id=7)] ) class ExternalColumnsTest(QueryTest): """test mappers with SQL-expressions added as column properties.""" run_setup_mappers = None def test_external_columns_bad(self): users, User = self.tables.users, self.classes.User assert_raises_message(sa_exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={ 'concat': (users.c.id * 2), }) clear_mappers() def test_external_columns(self): """test querying mappings that reference external columns or selectables.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'concat': column_property((users.c.id * 2)), 'count': column_property( select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).\ correlate(users).\ as_scalar()) }) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() sess.query(Address).options(joinedload('user')).all() eq_(sess.query(User).all(), [ User(id=7, concat=14, count=1), User(id=8, concat=16, count=3), User(id=9, concat=18, count=1), User(id=10, concat=20, count=0), ] ) address_result = [ Address(id=1, user=User(id=7, concat=14, count=1)), Address(id=2, user=User(id=8, concat=16, count=3)), Address(id=3, user=User(id=8, concat=16, count=3)), Address(id=4, user=User(id=8, concat=16, count=3)), Address(id=5, user=User(id=9, concat=18, count=1)) ] eq_(sess.query(Address).all(), address_result) # run the eager version twice to test caching of aliased clauses for x in range(2): sess.expunge_all() def go(): eq_(sess.query(Address).\ options(joinedload('user')).\ order_by(Address.id).all(), address_result) self.assert_sql_count(testing.db, go, 1) ualias = aliased(User) eq_( sess.query(Address, ualias).join(ualias, 'user').all(), [(address, address.user) for address in address_result] ) eq_( sess.query(Address, ualias.count).\ join(ualias, 'user').\ join('user', aliased=True).\ order_by(Address.id).all(), [ (Address(id=1), 1), (Address(id=2), 3), (Address(id=3), 3), (Address(id=4), 3), (Address(id=5), 1) ] ) eq_(sess.query(Address, ualias.concat, ualias.count). join(ualias, 'user'). join('user', aliased=True).order_by(Address.id).all(), [ (Address(id=1), 14, 1), (Address(id=2), 16, 3), (Address(id=3), 16, 3), (Address(id=4), 16, 3), (Address(id=5), 18, 1) ] ) ua = aliased(User) eq_(sess.query(Address, ua.concat, ua.count). select_entity_from(join(Address, ua, 'user')). options(joinedload(Address.user)).order_by(Address.id).all(), [ (Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1), (Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3), (Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1) ] ) eq_(list(sess.query(Address).join('user').values(Address.id, User.id, User.concat, User.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) eq_(list(sess.query(Address, ua).select_entity_from(join(Address,ua, 'user')).values(Address.id, ua.id, ua.concat, ua.count)), [(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3), (5, 9, 18, 1)] ) def test_external_columns_joinedload(self): users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) # in this test, we have a subquery on User that accesses "addresses", underneath # an joinedload for "addresses". So the "addresses" alias adapter needs to *not* hit # the "addresses" table within the "user" subquery, but "user" still needs to be adapted. # therefore the long standing practice of eager adapters being "chained" has been removed # since its unnecessary and breaks this exact condition. mapper(User, users, properties={ 'addresses':relationship(Address, backref='user', order_by=addresses.c.id), 'concat': column_property((users.c.id * 2)), 'count': column_property(select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).correlate(users)) }) mapper(Address, addresses) mapper(Order, orders, properties={ 'address':relationship(Address), # m2o }) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).get(1) eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) sess = create_session() def go(): o1 = sess.query(Order).options(joinedload_all('address.user')).first() eq_(o1.address.user.count, 1) self.assert_sql_count(testing.db, go, 1) def test_external_columns_compound(self): # see [ticket:2167] for background users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'fullname':column_property(users.c.name.label('x')) }) mapper(Address, addresses, properties={ 'username':column_property( select([User.fullname]).\ where(User.id==addresses.c.user_id).label('y')) }) sess = create_session() a1 = sess.query(Address).first() eq_(a1.username, "jack") sess = create_session() a1 = sess.query(Address).from_self().first() eq_(a1.username, "jack") class TestOverlyEagerEquivalentCols(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): base = Table('base', metadata, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('data', String(50)) ) sub1 = Table('sub1', metadata, Column('id', Integer, ForeignKey('base.id'), primary_key=True), Column('data', String(50)) ) sub2 = Table('sub2', metadata, Column('id', Integer, ForeignKey('base.id'), ForeignKey('sub1.id'), primary_key=True), Column('data', String(50)) ) def test_equivs(self): base, sub2, sub1 = (self.tables.base, self.tables.sub2, self.tables.sub1) class Base(fixtures.ComparableEntity): pass class Sub1(fixtures.ComparableEntity): pass class Sub2(fixtures.ComparableEntity): pass mapper(Base, base, properties={ 'sub1':relationship(Sub1), 'sub2':relationship(Sub2) }) mapper(Sub1, sub1) mapper(Sub2, sub2) sess = create_session() s11 = Sub1(data='s11') s12 = Sub1(data='s12') s2 = Sub2(data='s2') b1 = Base(data='b1', sub1=[s11], sub2=[]) b2 = Base(data='b1', sub1=[s12], sub2=[]) sess.add(b1) sess.add(b2) sess.flush() # theres an overlapping ForeignKey here, so not much option except # to artifically control the flush order b2.sub2 = [s2] sess.flush() q = sess.query(Base).outerjoin('sub2', aliased=True) assert sub1.c.id not in q._filter_aliases.equivalents eq_( sess.query(Base).join('sub1').outerjoin('sub2', aliased=True).\ filter(Sub1.id==1).one(), b1 ) class LabelCollideTest(fixtures.MappedTest): """Test handling for a label collision. This collision is handled by core, see ticket:2702 as well as test/sql/test_selectable->WithLabelsTest. here we want to make sure the end result is as we expect. """ @classmethod def define_tables(cls, metadata): Table('foo', metadata, Column('id', Integer, primary_key=True), Column('bar_id', Integer) ) Table('foo_bar', metadata, Column('id', Integer, primary_key=True), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(cls.Basic): pass @classmethod def setup_mappers(cls): mapper(cls.classes.Foo, cls.tables.foo) mapper(cls.classes.Bar, cls.tables.foo_bar) @classmethod def insert_data(cls): s = Session() s.add_all([ cls.classes.Foo(id=1, bar_id=2), cls.classes.Bar(id=3) ]) s.commit() def test_overlap_plain(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0) def test_overlap_subquery(self): s = Session() row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0] def go(): eq_(row.Foo.id, 1) eq_(row.Foo.bar_id, 2) eq_(row.Bar.id, 3) # all three columns are loaded independently without # overlap, no additional SQL to load all attributes self.assert_sql_count(testing.db, go, 0)<|fim▁end|>
<|file_name|>VFB2PhisXML.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python import sys sys.path.append("../build/") import phisSchema import pyxb import warnings # Strategy: # Perhaps cleanest would be to build a separate interface for data that may vary from VFB. # This also allows separation of Jython code # OTOH - this gives another layer of mappings to maintain. # Sketch of interface: # minimal vars to set (for now): # image_id, image URL, source links; expressed feature (+ its type - gene or transgene); classification of struc & overlapped region # Generator functions live outside the classes. They generate objects that must then be bound. def gen_OntologyTerm(id_name, ID): """Takes id_name lookup dict for ontology terms and an ID Returns a phisSchema.OntologyTerm object""" ot = phisSchema.OntologyTerm() ot.termId = ID ot.termLabel = id_name[ID] return ot def gen_Link(display_name, url): """Takes display_name and URI as args and returns a phisSchema.Link object""" gen_Link = phisSchema.Link() gen_Link.display_name = display_name gen_Link.url = url return gen_Link def gen_Annotation(ot, text, mode): """Generate a phisSchema.Annotation object based on specified: ot: ontology term text: free text mode: Manual/Automated""" annotation = phisSchema.Annotation() annotation.annotation_freetext = text annotation.ontology_term = ot annotation.annotationMode = mode return annotation def gen_roi_Coordinates(x, y, z): """Generate a phisSchema.Coordinates object for an roi Each arg specifies a range in the form of a list or tuple with 2 elements """ try: assert len(x) == 2 assert len(y) == 2 assert len(z) == 2 except: warnings.warn("Percent arrays should have only 2 members - specifying a range.") coord = phisSchema.Coordinates() coord.x_coordinates = _gen_PercentArray(*x) coord.y_coordinates = _gen_PercentArray(*y) coord.z_coordinates = _gen_PercentArray(*z) return coord def _gen_PercentArray(a, b): AB = (a, b) pa = phisSchema.PercentArray() pa.extend(AB) return pa def gen_GenotypeComponent(gf_symbol=False, gf_id=False, gene_symbol=False, gene_id=False, gf_ensembl_id=False): ## How to specify channel. Use defaults? ### """Generate a phisSchema.GenotypeComponent object. All args are strings. Please specify each arg with a keyword """ gc = phisSchema.GenotypeComponent() if gene_id: gc.gene_id = gene_id if gene_symbol: gc.gene_symbol = gene_symbol if gf_symbol: gc.genetic_feature_symbol = gf_symbol if gf_id: gc.genetic_feature_id = gf_id if gf_ensembl_id: gc.genetic_feature_ensembl_id = gf_ensembl_id return gc class imageDataSet(): """Class to use for generating sets of images from a common source. Assumes all datasets have common source name and URL. And that they share a background channel marker and visualization methods for background and signal channels. All of these are set by methods rather than KWARGS. """ # May not be worth bothering with a class here def __init__(self, ont_dict): ### Do we have a way to distinguish general source from specific source links? self.doc = phisSchema.Doc() self.source = '' self.background_channel_marker = '' self.signal_channel_visualisation_methods = [] self.background_channel_visualisation_methods = [] self.ont_dict = ont_dict def set_source(self, source_name, source_url): """source_name and source_url are strings""" self.source = gen_Link(source_name, source_url) def set_background_channel_marker(self, genotype_component): """Takes a phisSchema.genotypeComponent object as an arg""" self.background_channel_marker = genotype_component def add_signal_channel_visualisation_method(self, sfid): """sfid is the shortFormId of and FBbi visualisation method""" self.signal_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid)) def add_background_channel_visualisation_method(self, sfid): """sfid is the shortFormId of and FBbi visualisation method""" self.background_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid)) class VfbImage(): """Interface class for loading VFB data. Assumes 3D confocal image with 2 channels - a background stain channel and a signal channel depicting some interesting expression/anatomy""" # Define constants here: Or should this just jump straight to populating model? host = gen_Link("Virtual Fly Brain", "http://www.virtualflybrain.org") # for image_description.host def __init__(self, ont, image_dataset): """ont: an ID:name dict of ontology terms used in XML to be produced d: A image_dataset object """ self.ont = ont self._initialise_image() self._unpack_image_dataset(image_dataset) self.image.image_description.host = self.host def _unpack_image_dataset(self, image_dataset): self.set_source(image_dataset.source) # self.set_signal_channel_visualisation_method(image_dataset.) # Needs extend rather than append? # self.set_background_channel_visualisation_method(image_dataset.) # Needs extend rather than append? self.set_expressed_feature_for_background_channel(image_dataset.background_channel_marker) def set_organism(self, stage, sex): """stage must be a phisSchema.ontologyTerm object; sex must be the string 'Male' or 'Female'""" organism = phisSchema.Organism() organism.taxon = "Drosophila melanogaster" organism.sex = sex organism.ncbi_taxon_id = "NCBItaxon_7227" organism.stage=stage self.image.organism = organism def _initialise_image(self): """Assume 2 channels each with an associated ROI at 100%. All objects generated by multiple iterations appended to common doc. Generate IDs for two channels and corresponding ROIs according to the scheme: image_id-a/b roi_id-a/b; channel_id-a/b - where id = self.vfb_image_id. channel1/roi1 = background. channel2/roi2 = signal.""" # Generate Root objects self.image = phisSchema.Image() self.channel1 = phisSchema.Channel() self.channel2 = phisSchema.Channel() self.roi1 = phisSchema.Roi() self.roi2 = phisSchema.Roi() # bind root objects to doc # Which pattern?? # This doesn't work for multiple images rois: self.doc.append(image) # Need to work on checking the more obvious self.doc.image.append(self.image) self.doc.image.append(self.image) self.doc.channel.append(self.channel1) self.doc.channel.append(self.channel2) self.doc.roi.append(self.roi1) self.doc.roi.append(self.roi2) # Populate IDs self.image.id = "image_" + self.vfb_image_id self.channel1.id = "channel_" + self.vfb_image_id + "-a" self.channel2.id = "channel_" + self.vfb_image_id + "-b" self.roi1.id = "roi_" + self.vfb_image_id + "-a" self.roi2.id = "roi_" + self.vfb_image_id + "-b" self.image.associated_roi = pyxb.BIND() # Special magic self.image.associated_roi.el.append(self.roi1.id) # Is this correct, or should I be populating a string array and appending that? self.image.associated_roi.el.append(self.roi2.id) self.image.associated_channel = pyxb.BIND() self.image.associated_channel.el.append(self.channel1.id) self.image.associated_channel.el.append(self.channel2.id) self.channel1.associated_image = self.image.id self.channel2.associated_image = self.image.id self.roi1.associated_image = self.image.id self.roi2.associated_image = self.image.id self.roi1.associated_channel = pyxb.BIND() self.roi1.associated_channel.el.append(self.channel1.id) self.roi2.associated_channel = pyxb.BIND() self.roi2.associated_channel.el.append(self.channel2.id) self.channel1.associated_roi = pyxb.BIND() self.channel1.associated_roi.el.append(self.roi1.id) self.channel2.associated_roi = pyxb.BIND() self.channel2.associated_roi.el.append(self.roi2.id) # both ROIs cover whole image: self.roi1.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100)) self.roi2.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100)) self.depicted_anatomy_background = phisSchema.AnnotationArray() self.roi1.depicted_anatomical_structure = self.depicted_anatomy_background self.depicted_anatomy_exp_channel = phisSchema.AnnotationArray() self.roi2.depicted_anatomical_structure = self.depicted_anatomy_exp_channel # Expansions. Add more here as needed. self.image_description = phisSchema.ImageDescription() self.image.image_description = self.image_description self.image.image_description.sample_preparation = pyxb.BIND() self.image.image_description.imaging_method = pyxb.BIND() # Method 1 - intermediate node and directly bind imaging_methods = phisSchema.OntologyTermArray() self.image.image_description.imaging_method = imaging_methods # But remember - this is only possible because of an earlier pyxB expansion imaging_methods.append(gen_OntologyTerm(self.ont, "FBbi_00000251")) # Method 2 - pyxB.BIND() expansion self.image.image_description.sample_preparation = pyxb.BIND() self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000024")) # whole mount tissue self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000002")) # chemically fixed # Set methods generate the relevant object and bind it. def set_dimensions(self, x, y, z=0): """x, y and z are dimensions in pixels. Z is optional (default 0)""" dimensions = phisSchema.Dimensions() dimensions.image_width = x dimensions.image_height = y dimensions.image_depth = z self.image_description.image_dimensions = dimensions def set_image_and_sample_type(self, wt_or_mut, exp_anat_phen): self.image.image_description.sample_type = "wild type" ita = phisSchema.ImageTypeArray() ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both. self.image.image_description.image_type = ita def set_source(self, source): """source must be a phisSchema.Link object. Assumes source of image and organism are the same.""" self.image.image_description.image_generated_by = source self.image.image_description.organism_generated_by = source def set_background_channel_visualisation_method(self, sfid): self.channel2.visualisation_method = pyxb.BIND() self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid)) def set_signal_channel_visualisation_method(self, sfid): self.channel2.visualisation_method = pyxb.BIND() self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid)) def add_background_depicted_entity(self, sfid, text, mode): # By convention, background channel is always roi1 annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode) self.depicted_anatomy_background.append(annotation) def add_depicted_anatomy_for_expressed_feature(self, sfid, text, mode): # By convention, background channel is always roi1 annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode) self.depicted_anatomy_exp_channel.append(annotation) def set_is_expression_pattern(self, s = True): """By convention channel2 is signal channel.""" # Should really just be a boolean. if s: self.channel2.is_expression_pattern = "Yes" else: self.channel2.is_expression_pattern = "No" def set_expressed_feature_for_signal_channel(self, genotype_component): """genotype_component: a phisSchema.GenotypeComponent object.""" self.channel2.depicts_expression_of = genotype_component def set_expressed_feature_for_background_channel(self, genotype_component): """genotype_component: a phisSchema.GenotypeComponent object.""" self.channel1.depicts_expression_of = genotype_component def set_image_context_url(self, url): self.image.image_description.image_context_url = url class VfbWtAdultBrainImage(VfbImage): """Args: - ont is a name_id dict lookup for ontology terms. - image_dataset is an imageDataSet object - vfb_image_id is an id string for the image - image_url is also a string Compulsory fields to set in order to generate XML: - set_sex("Male/Female") - set_is_expression_pattern(True/False) - add_depicted_anatomy_for_expressed_feature(ont_term) Other necessary fields to set for usable XML: - set_expressed_feature - set_visualisation_method Set by default: - sample prep: chemically fixed; whole mount tissue - imaging methods: confocal microscopy - image has 2 channels - one background, and one signal. - organism: Dmel - stage: adult - Background channel anatomy: adult brain - Dimensions = 512,512,512 """ # Consider ditching this subclass if don't find a bunch of more specific things to say. Might be better to have subclasses for neuron, clone and expression pattern # One doc for all images. def __init__(self, ont, image_dataset, vfb_image_id, image_url): self.ont = ont self.doc = image_dataset.doc self.vfb_image_id = vfb_image_id self._initialise_image() self.image.image_description.image_url = image_url self.set_source(image_dataset.source) self.stage = gen_OntologyTerm(ont, "FBdv_00005369") # Hmmmm - global! self.image.image_description.host = self.host self.set_dimensions(512, 512, 512) self.add_background_depicted_entity("FBbt_00003624", "background channel", "Manual") ita = phisSchema.ImageTypeArray() ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both. self.image.image_description.image_type = ita self.image.image_description.sample_type = "wild type" def set_sex(self, sex): """sex = string "Male"/"Femle". Automatically sets doc.image.organism""" self.set_organism(self.stage, sex) # Test # For testing purposes. Will be autogenerated from ontology files in full run) <|fim▁hole|> # Notes # Assignment is simple - once you get all the way out to a node. #depicted.termId = "FBbi_1234567" #depicted.termLabel = "fubar" # Append and instance of depicted to the list (el) #image.depicted_anatomical_structure = pyxb.BIND() #image.depicted_anatomical_structure.append(depicted) # Testing #print image.depicted_anatomical_structure.toxml() # '<?xml version="1.0" ?><depicted_anatomical_structure><el><anatomy_ontology_id>FBbi_1234567</anatomy_ontology_id><anatomy_ontology_term>fubar</anatomy_ontology_term></el></depicted_anatomical_structure>' # But all this feels quite verbose - can I make use of the Factory methods on some nodes to make this easier?<|fim▁end|>
<|file_name|>factory.py<|end_file_name|><|fim▁begin|># Copyright 2012 Kevin Ormbrek # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.<|fim▁hole|># # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from suds.sudsobject import Object as SudsObject class _FactoryKeywords(object): def set_wsdl_object_attribute(self, object, name, value): """Sets the attribute of a WSDL object. Example: | ${order search request}= | Create Wsdl Object | OrderSearchRequest | | | Set Wsdl Object Attribute | ${order search request} | id | 4065 | """ self._assert_is_suds_object(object) getattr(object, name) setattr(object, name, value) def get_wsdl_object_attribute(self, object, name): """Gets the attribute of a WSDL object. Extendend variable syntax may be used to access attributes; however, some WSDL objects may have attribute names that are illegal in Python, necessitating this keyword. Example: | ${sale record}= | Call Soap Method | getLastSale | | | ${price}= | Get Wsdl Object Attribute | ${sale record} | Price | """ self._assert_is_suds_object(object) return getattr(object, name) def create_wsdl_object(self, type, *name_value_pairs): """Creates a WSDL object of the specified `type`. Requested `type` must be defined in the WSDL, in an import specified by the WSDL, or with `Add Doctor Import`. `type` is case sensitive. Example: | ${contact}= | Create Wsdl Object | Contact | | | Set Wsdl Object Attribute | ${contact} | Name | Kelly Newman | Attribute values can be set by passing the attribute name and value in pairs. This is equivalent to the two lines above: | ${contact}= | Create Wsdl Object | Contact | Name | Kelly Newman | """ if len(name_value_pairs) % 2 != 0: raise ValueError("Creating a WSDL object failed. There should be " "an even number of name-value pairs.") obj = self._client().factory.create(type) for i in range(0, len(name_value_pairs), 2): self.set_wsdl_object_attribute(obj, name_value_pairs[i], name_value_pairs[i + 1]) return obj # private def _assert_is_suds_object(self, object): if not isinstance(object, SudsObject): raise ValueError("Object must be a WSDL object (suds.sudsobject.Object).")<|fim▁end|>
# You may obtain a copy of the License at